code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import unittest
import random
import numpy as np
from mep.genetics.population import Population
from mep.genetics.chromosome import Chromosome
class TestPopulation(unittest.TestCase):
"""
Test the Population class.
"""
def test_random_tournament_selection(self):
"""
Test the random_tournament_selection(...)
"""
# make it so this repeatable
random.seed(0)
# construct the population
num_examples = 5
num_features = 7
population = Population(np.zeros((num_examples, num_features)), [], 1, 1, 1, 1, 1, 1, 1)
# confirm the number of feature variables (not critical for this test)
self.assertEqual(num_features, population.num_feature_variables)
# test the tournament selection; not that it randomly chooses the not as good chromosome
min_chromosome, max_chromosome = Chromosome([], []), Chromosome([], [])
min_chromosome.error = 1
max_chromosome.error = 2
population.chromosomes = [min_chromosome, max_chromosome]
self.assertEqual(max_chromosome, population.random_tournament_selection(1))
def test_larger_random_tournament_selection(self):
"""
Test the random_tournament_selection(...)
"""
# make it so this repeatable
random.seed(0)
# construct the population
num_examples = 5
num_features = 7
population = Population(np.zeros((num_examples, num_features)), [], 1, 1, 1, 1, 1, 1, 1)
# test the tournament selection; not that it randomly chooses the not as good chromosome
min_chromosome, max_chromosome = Chromosome([], []), Chromosome([], [])
min_chromosome.error = 1
max_chromosome.error = 2
population.chromosomes = [min_chromosome, max_chromosome]
self.assertEqual(min_chromosome, population.random_tournament_selection(10))
|
[
"numpy.zeros",
"random.seed",
"mep.genetics.chromosome.Chromosome"
] |
[((401, 415), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (412, 415), False, 'import random\n'), ((1321, 1335), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (1332, 1335), False, 'import random\n'), ((534, 572), 'numpy.zeros', 'np.zeros', (['(num_examples, num_features)'], {}), '((num_examples, num_features))\n', (542, 572), True, 'import numpy as np\n'), ((891, 909), 'mep.genetics.chromosome.Chromosome', 'Chromosome', (['[]', '[]'], {}), '([], [])\n', (901, 909), False, 'from mep.genetics.chromosome import Chromosome\n'), ((911, 929), 'mep.genetics.chromosome.Chromosome', 'Chromosome', (['[]', '[]'], {}), '([], [])\n', (921, 929), False, 'from mep.genetics.chromosome import Chromosome\n'), ((1454, 1492), 'numpy.zeros', 'np.zeros', (['(num_examples, num_features)'], {}), '((num_examples, num_features))\n', (1462, 1492), True, 'import numpy as np\n'), ((1658, 1676), 'mep.genetics.chromosome.Chromosome', 'Chromosome', (['[]', '[]'], {}), '([], [])\n', (1668, 1676), False, 'from mep.genetics.chromosome import Chromosome\n'), ((1678, 1696), 'mep.genetics.chromosome.Chromosome', 'Chromosome', (['[]', '[]'], {}), '([], [])\n', (1688, 1696), False, 'from mep.genetics.chromosome import Chromosome\n')]
|
from typing import Callable
import numpy as np
import torch
import torch.nn as nn
from util.data import transform_observation
class PommerQEmbeddingRNN(nn.Module):
def __init__(self, embedding_model):
super(PommerQEmbeddingRNN, self).__init__()
self.embedding_model = embedding_model
self.memory = []
self.steps = 10
# Stacked lstm
self.rnn = [nn.LSTM(64, 64) for step in range(self.steps)]
self.linear = nn.Sequential(
nn.Flatten(),
nn.ReLU(),
nn.Linear(in_features=64, out_features=6),
nn.Softmax(dim=-1)
)
def forward(self, obs):
while len(self.memory) >= self.steps:
self.memory.pop(0)
while len(self.memory) != self.steps:
self.memory.append(obs)
# x=obs[0] # Board Embedding
x = None
h = None
for obs_n, rnn_n in zip(self.memory, self.rnn):
x_n = obs_n[0]
x, h = rnn_n(x_n, h)
x = self.linear(x).squeeze()
return x
def get_transformer(self) -> Callable:
"""
Return a callable for input transformation.
The callable should take a ``dict`` containing data of a single
observation from the Pommerman environment and return a ``list``
of individual numpy arrays that can be used later as an input
value in the ``forward()`` function.
"""
def transformer(obs: dict) -> list:
planes = transform_observation(obs, p_obs=True, centralized=True)
planes = np.array(planes, dtype=np.float32)
# Generate embedding
# flattened = planes.flatten()
# flattened = torch.tensor(flattened, device=torch.device('cpu')) # TODO: Make 'cpu' variable
X = torch.tensor(planes, device=torch.device('cpu')).unsqueeze(0)
board_embedding = self.embedding_model.forward(X)
board_embedding = board_embedding.detach().numpy()
return [
board_embedding
]
return transformer
|
[
"torch.nn.ReLU",
"util.data.transform_observation",
"torch.nn.Softmax",
"numpy.array",
"torch.nn.Linear",
"torch.device",
"torch.nn.LSTM",
"torch.nn.Flatten"
] |
[((400, 415), 'torch.nn.LSTM', 'nn.LSTM', (['(64)', '(64)'], {}), '(64, 64)\n', (407, 415), True, 'import torch.nn as nn\n'), ((497, 509), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (507, 509), True, 'import torch.nn as nn\n'), ((523, 532), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (530, 532), True, 'import torch.nn as nn\n'), ((546, 587), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(64)', 'out_features': '(6)'}), '(in_features=64, out_features=6)\n', (555, 587), True, 'import torch.nn as nn\n'), ((601, 619), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (611, 619), True, 'import torch.nn as nn\n'), ((1510, 1566), 'util.data.transform_observation', 'transform_observation', (['obs'], {'p_obs': '(True)', 'centralized': '(True)'}), '(obs, p_obs=True, centralized=True)\n', (1531, 1566), False, 'from util.data import transform_observation\n'), ((1588, 1622), 'numpy.array', 'np.array', (['planes'], {'dtype': 'np.float32'}), '(planes, dtype=np.float32)\n', (1596, 1622), True, 'import numpy as np\n'), ((1850, 1869), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1862, 1869), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
#
from math import pi
import numpy
from .. import helpers
def show(scheme, backend="mpl"):
"""Displays scheme for 3D ball quadrature.
"""
helpers.backend_to_function[backend](
scheme.points,
scheme.weights,
volume=4.0 / 3.0 * pi,
edges=[],
balls=[((0.0, 0.0, 0.0), 1.0)],
)
return
def integrate(f, center, radius, rule, dot=numpy.dot):
center = numpy.array(center)
rr = numpy.multiply.outer(radius, rule.points)
rr = numpy.swapaxes(rr, 0, -2)
ff = numpy.array(f((rr + center).T))
return numpy.array(radius) ** 3 * dot(ff, rule.weights)
|
[
"numpy.multiply.outer",
"numpy.array",
"numpy.swapaxes"
] |
[((438, 457), 'numpy.array', 'numpy.array', (['center'], {}), '(center)\n', (449, 457), False, 'import numpy\n'), ((467, 508), 'numpy.multiply.outer', 'numpy.multiply.outer', (['radius', 'rule.points'], {}), '(radius, rule.points)\n', (487, 508), False, 'import numpy\n'), ((518, 543), 'numpy.swapaxes', 'numpy.swapaxes', (['rr', '(0)', '(-2)'], {}), '(rr, 0, -2)\n', (532, 543), False, 'import numpy\n'), ((596, 615), 'numpy.array', 'numpy.array', (['radius'], {}), '(radius)\n', (607, 615), False, 'import numpy\n')]
|
from __future__ import print_function
import keras.backend as K
import keras.losses as losses
import keras.optimizers as optimizers
import numpy as np
from keras.callbacks import ModelCheckpoint
from keras.layers.advanced_activations import LeakyReLU
from keras.layers import Input, RepeatVector, Reshape
from keras.layers.embeddings import Embedding
from keras.layers.merge import Concatenate, Multiply
from keras.losses import binary_crossentropy
from keras.models import Model, Sequential
from .multi_sampler import *
class PretrainImageAutoencoder(RobotMultiPredictionSampler):
def __init__(self, taskdef, *args, **kwargs):
'''
As in the other models, we call super() to parse arguments from the
command line and set things like our optimizer and learning rate.
'''
super(PretrainImageAutoencoder, self).__init__(taskdef, *args, **kwargs)
self.PredictorCb = ImageCb
self.save_encoder_decoder = True
def _makePredictor(self, features):
'''
Create model to predict possible manipulation goals.
'''
(images, arm, gripper) = features
img_shape, image_size, arm_size, gripper_size = self._sizes(
images,
arm,
gripper)
img_in = Input(img_shape,name="predictor_img_in")
img0_in = Input(img_shape,name="predictor_img0_in")
option_in = Input((1,), name="predictor_option_in")
encoder = self._makeImageEncoder(img_shape)
ins = [img0_in, img_in]
# Create the encoder
enc = encoder(img_in)
#enc = Dropout(self.dropout_rate)(enc)
decoder = self._makeImageDecoder(
self.hidden_shape,
self.skip_shape,)
out = decoder(enc)
if not self.no_disc:
# Create the discriminator to make sure this is a good image
image_discriminator = MakeImageClassifier(self, img_shape)
image_discriminator.load_weights(
self.makeName("discriminator", "classifier"))
image_discriminator.trainable = False
o2 = image_discriminator([img0_in, out])
if self.no_disc:
ae = Model(ins, [out])
ae.compile(
loss=["mae"],
loss_weights=[1.],
optimizer=self.getOptimizer())
else:
ae = Model(ins, [out, o2])
ae.compile(
loss=["mae"] + ["categorical_crossentropy"],
loss_weights=[1.,1e-3],
optimizer=self.getOptimizer())
encoder.summary()
decoder.summary()
ae.summary()
return ae, ae, None, [img_in], enc
def _getData(self, *args, **kwargs):
features, targets = GetAllMultiData(self.num_options, *args, **kwargs)
[I, q, g, oin, label, q_target, g_target,] = features
o1 = targets[1]
I0 = I[0,:,:,:]
length = I.shape[0]
I0 = np.tile(np.expand_dims(I0,axis=0),[length,1,1,1])
if self.no_disc:
return [I0, I], [I]
else:
o1_1h = np.squeeze(ToOneHot2D(o1, self.num_options))
return [I0, I], [I, o1_1h]
|
[
"keras.layers.Input",
"numpy.expand_dims",
"keras.models.Model"
] |
[((1293, 1334), 'keras.layers.Input', 'Input', (['img_shape'], {'name': '"""predictor_img_in"""'}), "(img_shape, name='predictor_img_in')\n", (1298, 1334), False, 'from keras.layers import Input, RepeatVector, Reshape\n'), ((1352, 1394), 'keras.layers.Input', 'Input', (['img_shape'], {'name': '"""predictor_img0_in"""'}), "(img_shape, name='predictor_img0_in')\n", (1357, 1394), False, 'from keras.layers import Input, RepeatVector, Reshape\n'), ((1414, 1453), 'keras.layers.Input', 'Input', (['(1,)'], {'name': '"""predictor_option_in"""'}), "((1,), name='predictor_option_in')\n", (1419, 1453), False, 'from keras.layers import Input, RepeatVector, Reshape\n'), ((2223, 2240), 'keras.models.Model', 'Model', (['ins', '[out]'], {}), '(ins, [out])\n', (2228, 2240), False, 'from keras.models import Model, Sequential\n'), ((2420, 2441), 'keras.models.Model', 'Model', (['ins', '[out, o2]'], {}), '(ins, [out, o2])\n', (2425, 2441), False, 'from keras.models import Model, Sequential\n'), ((3023, 3049), 'numpy.expand_dims', 'np.expand_dims', (['I0'], {'axis': '(0)'}), '(I0, axis=0)\n', (3037, 3049), True, 'import numpy as np\n')]
|
import torch
import numpy as np
import pickle
def filterit(s,W2ID):
s=s.lower()
S=''
for c in s:
if c in ' abcdefghijklmnopqrstuvwxyz0123456789':
S+=c
S = " ".join([x if x and x in W2ID else "<unk>" for x in S.split()])
return S
def Sentence2Embeddings(sentence,W2ID,EMB):
if type(sentence)==str:
sentence = filterit(sentence, W2ID)
#print(sentence)
IDS = torch.tensor([W2ID[i] for i in sentence.split(" ")])
return EMB(IDS)
if type(sentence)==list:
sembs = []
for sent in sentence:
sent = filterit(sent,W2ID)
IDS = torch.tensor([W2ID[i] for i in sent.split(" ")])
sembs.append(EMB(IDS))
sembs = torch.nn.utils.rnn.pad_sequence(sembs,batch_first=True)
return sembs
def GetEmbeddings(path='./student_code/supportfiles/GloVe300.d'):
GloVe = pickle.load(open(path,'rb'))
W2ID = {w:i for i,w in enumerate(sorted(list(GloVe.keys())))}
EMB = torch.nn.Embedding(len(W2ID),300)
EMB.weight.requires_grad=False
GloVeW = np.vstack([GloVe[w] for w in W2ID])
EMB.weight.data.copy_(torch.from_numpy(GloVeW))
return W2ID, EMB
def getAnsWords(path='./student_code/supportfiles/CoAttAns.d'):
with open(path,'rb') as file:
data = pickle.load(file)
return data
def Answer2OneHot1(answers,AW):
A=[]
for answer in answers:
Aembs = torch.zeros(len(AW))
for w in answer.split(" "):
if w in AW:
Aembs[AW[w]]=1
break
else:
Aembs[0]=1
break
A.append(Aembs)
A = torch.stack(A)
return A
def Answer2OneHot(answers,AW):
A=[]
for answer in answers:
Aembs = torch.zeros(len(AW))
w = answer.split(" ")[0]
if w in AW:Aembs[AW[w]]=1
else:Aembs[-1]=1
A.append(Aembs)
A = torch.stack(A)
return A
|
[
"torch.stack",
"pickle.load",
"torch.nn.utils.rnn.pad_sequence",
"numpy.vstack",
"torch.from_numpy"
] |
[((1080, 1115), 'numpy.vstack', 'np.vstack', (['[GloVe[w] for w in W2ID]'], {}), '([GloVe[w] for w in W2ID])\n', (1089, 1115), True, 'import numpy as np\n'), ((1651, 1665), 'torch.stack', 'torch.stack', (['A'], {}), '(A)\n', (1662, 1665), False, 'import torch\n'), ((1908, 1922), 'torch.stack', 'torch.stack', (['A'], {}), '(A)\n', (1919, 1922), False, 'import torch\n'), ((737, 793), 'torch.nn.utils.rnn.pad_sequence', 'torch.nn.utils.rnn.pad_sequence', (['sembs'], {'batch_first': '(True)'}), '(sembs, batch_first=True)\n', (768, 793), False, 'import torch\n'), ((1142, 1166), 'torch.from_numpy', 'torch.from_numpy', (['GloVeW'], {}), '(GloVeW)\n', (1158, 1166), False, 'import torch\n'), ((1303, 1320), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (1314, 1320), False, 'import pickle\n')]
|
import numpy as np
import torch
import torch.nn as nn
from two_thinning.average_based.RL.basic_neuralnet_RL.neural_network import AverageTwoThinningNet
n = 10
m = n
epsilon = 0.1
train_episodes = 3000
eval_runs = 300
patience = 20
print_progress = True
print_behaviour = False
def reward(x):
return -np.max(x)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def greedy(model, ball_number):
action_values = model(torch.DoubleTensor([ball_number]))
a = torch.argmax(action_values)
return a
def epsilon_greedy(model, ball_number, epsilon=epsilon):
action_values = model(torch.DoubleTensor([ball_number]))
r = torch.rand(1)
if r < epsilon:
a = torch.randint(len(action_values), (1,))[0]
else:
a = torch.argmax(action_values)
return a, action_values[a]
def evaluate_q_values(model, n=n, m=m, reward=reward, eval_runs=eval_runs, print_behaviour=print_behaviour):
with torch.no_grad():
sum_loads = 0
for _ in range(eval_runs):
loads = np.zeros(n)
for i in range(m):
a = greedy(model, i)
if print_behaviour:
print(f"With loads {loads} the trained model chose {a}")
randomly_selected = np.random.randint(n)
if loads[randomly_selected] <= a:
loads[randomly_selected] += 1
else:
loads[np.random.randint(n)] += 1
sum_loads += reward(loads)
avg_score = sum_loads / eval_runs
return avg_score
def train(n=n, m=m, epsilon=epsilon, reward=reward, episodes=train_episodes, eval_runs=eval_runs, patience=patience,
print_progress=print_progress, print_behaviour=print_behaviour, device=device):
curr_model = AverageTwoThinningNet(m, device)
best_model = AverageTwoThinningNet(m, device)
optimizer = torch.optim.Adam(curr_model.parameters())
mse_loss = nn.MSELoss()
best_eval_score = None
not_improved = 0
for ep in range(episodes):
loads = np.zeros(n)
for i in range(m):
a, old_val = epsilon_greedy(curr_model, i, epsilon)
randomly_selected = np.random.randint(n)
if loads[randomly_selected] <= a:
loads[randomly_selected] += 1
else:
loads[np.random.randint(n)] += 1
if i == m - 1:
new_val = torch.as_tensor(reward(loads)).to(device)
else:
_, new_val = epsilon_greedy(curr_model, i + 1, epsilon)
new_val = new_val.detach()
loss = mse_loss(old_val, new_val)
optimizer.zero_grad()
loss.backward()
optimizer.step()
curr_eval_score = evaluate_q_values(curr_model, n=n, m=m, reward=reward, eval_runs=eval_runs,
print_behaviour=print_behaviour)
if best_eval_score is None or curr_eval_score > best_eval_score:
best_eval_score = curr_eval_score
best_model.load_state_dict(curr_model.state_dict())
not_improved = 0
if print_progress:
print(f"At episode {ep} the best eval score has improved to {curr_eval_score}.")
elif not_improved < patience:
not_improved += 1
if print_progress:
print(f"At episode {ep} no improvement happened.")
else:
if print_progress:
print(f"Training has stopped after episode {ep} as the eval score didn't improve anymore.")
break
return best_model
if __name__ == "__main__":
train()
|
[
"torch.nn.MSELoss",
"torch.argmax",
"torch.DoubleTensor",
"numpy.zeros",
"two_thinning.average_based.RL.basic_neuralnet_RL.neural_network.AverageTwoThinningNet",
"numpy.max",
"numpy.random.randint",
"torch.cuda.is_available",
"torch.rand",
"torch.no_grad"
] |
[((494, 521), 'torch.argmax', 'torch.argmax', (['action_values'], {}), '(action_values)\n', (506, 521), False, 'import torch\n'), ((663, 676), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (673, 676), False, 'import torch\n'), ((1804, 1836), 'two_thinning.average_based.RL.basic_neuralnet_RL.neural_network.AverageTwoThinningNet', 'AverageTwoThinningNet', (['m', 'device'], {}), '(m, device)\n', (1825, 1836), False, 'from two_thinning.average_based.RL.basic_neuralnet_RL.neural_network import AverageTwoThinningNet\n'), ((1854, 1886), 'two_thinning.average_based.RL.basic_neuralnet_RL.neural_network.AverageTwoThinningNet', 'AverageTwoThinningNet', (['m', 'device'], {}), '(m, device)\n', (1875, 1886), False, 'from two_thinning.average_based.RL.basic_neuralnet_RL.neural_network import AverageTwoThinningNet\n'), ((1960, 1972), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1970, 1972), True, 'import torch.nn as nn\n'), ((309, 318), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (315, 318), True, 'import numpy as np\n'), ((353, 378), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (376, 378), False, 'import torch\n'), ((451, 484), 'torch.DoubleTensor', 'torch.DoubleTensor', (['[ball_number]'], {}), '([ball_number])\n', (469, 484), False, 'import torch\n'), ((620, 653), 'torch.DoubleTensor', 'torch.DoubleTensor', (['[ball_number]'], {}), '([ball_number])\n', (638, 653), False, 'import torch\n'), ((774, 801), 'torch.argmax', 'torch.argmax', (['action_values'], {}), '(action_values)\n', (786, 801), False, 'import torch\n'), ((953, 968), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (966, 968), False, 'import torch\n'), ((2070, 2081), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2078, 2081), True, 'import numpy as np\n'), ((1047, 1058), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1055, 1058), True, 'import numpy as np\n'), ((2205, 2225), 'numpy.random.randint', 'np.random.randint', (['n'], {}), '(n)\n', (2222, 2225), True, 'import numpy as np\n'), ((1276, 1296), 'numpy.random.randint', 'np.random.randint', (['n'], {}), '(n)\n', (1293, 1296), True, 'import numpy as np\n'), ((2358, 2378), 'numpy.random.randint', 'np.random.randint', (['n'], {}), '(n)\n', (2375, 2378), True, 'import numpy as np\n'), ((1445, 1465), 'numpy.random.randint', 'np.random.randint', (['n'], {}), '(n)\n', (1462, 1465), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on Apr 13, 2015
BLAS class to use with ocl backend.
███████████████████████████████████████████████████████████████████████████████
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
███████████████████████████████████████████████████████████████████████████████
"""
from cuda4py.blas import CUBLAS_OP_N, CUBLAS_OP_T
import numpy
import opencl4py.blas as clblas
import os
import threading
import weakref
from zope.interface import implementer
from veles.accelerated_units import AcceleratedUnit, IOpenCLUnit
from veles.config import root
from veles.dummy import DummyWorkflow
from veles.logger import Logger
from veles.numpy_ext import roundup
@implementer(IOpenCLUnit)
class Builder(AcceleratedUnit):
"""Dummy unit for building OpenCL kernels.
"""
def __init__(self, workflow, **kwargs):
super(Builder, self).__init__(workflow, **kwargs)
self.source = kwargs["source"]
self.defines = kwargs["defines"]
self.kernel_name = kwargs["kernel_name"]
self.cache_file_name = kwargs["cache_file_name"]
self.dtype = kwargs["dtype"]
@property
def kernel(self):
return self._kernel_
def ocl_init(self):
self.sources_[self.source] = {}
self.build_program(self.defines, self.cache_file_name, self.dtype)
self.assign_kernel(self.kernel_name)
def ocl_run(self):
pass
class OCLBLAS(Logger):
"""Class with BLAS functionality similar to CUBLAS.
It uses CLBLAS when available or custom kernels otherwise.
"""
@staticmethod
def attach_to_device(device):
if device.blas is None:
device.blas = OCLBLAS(device)
def __init__(self, device):
super(OCLBLAS, self).__init__()
self._lock_ = threading.Lock()
self._device = weakref.ref(device)
self.kernels = {}
self._const_i = numpy.zeros(3, dtype=numpy.uint64)
try:
if (root.common.engine.ocl.clBLAS is not True or
root.common.engine.precision_level > 0):
raise ValueError()
if "CLBLAS_STORAGE_PATH" not in os.environ:
found = False
for dirnme in root.common.engine.device_dirs:
for path, _, files in os.walk(dirnme):
for f in files:
if f.endswith(".kdb"):
found = True
os.environ["CLBLAS_STORAGE_PATH"] = path
break
if found:
break
if found:
break
self.blas = clblas.CLBLAS()
self._sgemm = self.clblas_sgemm
self._dgemm = self.clblas_dgemm
self.debug("Using clBLAS for matrix multiplication")
except (OSError, RuntimeError, ValueError):
self._sgemm = self.veles_gemm
self._dgemm = self.veles_gemm
self.debug("Using Veles OpenCL kernels for matrix multiplication")
@property
def device(self):
return self._device()
@staticmethod
def gemm(dtype):
if dtype == numpy.float32:
return OCLBLAS.sgemm
if dtype == numpy.float64:
return OCLBLAS.dgemm
raise ValueError("Invalid dtype %s" % dtype)
def sgemm(self, transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA=0, offsetB=0, offsetC=0):
return self._sgemm(
transA, transB, rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C,
offsetA=offsetA, offsetB=offsetB, offsetC=offsetC)
def dgemm(self, transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA=0, offsetB=0, offsetC=0):
return self._dgemm(
transA, transB, rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C,
offsetA=offsetA, offsetB=offsetB, offsetC=offsetC)
def clblas_sgemm(self, transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA=0, offsetB=0, offsetC=0):
"""Does a matrix multiplication like in CUBLAS using clBLAS.
Matricies are assumed to be tightly packed and stored like in CUBLAS.
Single precision (float) version.
"""
self.blas.sgemm((self.device.queue_,), clblas.clblasColumnMajor,
transA, transB, rowsCountA, columnCountB,
commonSideLength, alpha, A, B, beta, C,
offsetA=offsetA, offsetB=offsetB, offsetC=offsetC)
def clblas_dgemm(self, transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA=0, offsetB=0, offsetC=0):
"""Does a matrix multiplication like in CUBLAS using clBLAS.
Matricies are assumed to be tightly packed and stored like in CUBLAS.
Double precision (double) version.
"""
self.blas.dgemm((self.device.queue_,), clblas.clblasColumnMajor,
transA, transB, rowsCountA, columnCountB,
commonSideLength, alpha, A, B, beta, C,
offsetA=offsetA, offsetB=offsetB, offsetC=offsetC)
def veles_gemm(self, transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA=0, offsetB=0, offsetC=0):
"""Does a matrix multiplication like in CUBLAS using custom kernel.
Matricies are assumed to be tightly packed and stored like in CUBLAS.
"""
with self._lock_:
self._veles_gemm(transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA, offsetB, offsetC)
def _veles_gemm(self, transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA, offsetB, offsetC):
dtype = alpha.dtype
key = (transA, transB, rowsCountA, columnCountB, commonSideLength,
dtype)
krn_info = self.kernels.get(key)
if krn_info is None:
block_size, vector_opt = self.device.device_info.get_kernel_bs_vo(
kernel="matrix_multiplication", dtype=dtype)
defines = {
"BLOCK_SIZE": block_size,
"VECTOR_OPT": int(bool(vector_opt)),
"B_WIDTH": rowsCountA,
"A_WIDTH": columnCountB,
"AB_COMMON": commonSideLength
}
if transB == CUBLAS_OP_T:
defines["A_COL"] = 1
else:
assert transB == CUBLAS_OP_N
if transA == CUBLAS_OP_N:
defines["B_COL"] = 1
else:
assert transA == CUBLAS_OP_T
global_size = (roundup(rowsCountA, block_size),
roundup(columnCountB, block_size))
local_size = (block_size, block_size)
w = DummyWorkflow()
builder = Builder(
w, source="gemm", defines=defines, kernel_name="gemm",
cache_file_name=(
"veles_gemm_%s" % "_".join(str(x) for x in key)),
dtype=dtype)
builder.initialize(self.device)
krn_info = (builder.kernel, global_size, local_size)
self.kernels[key] = krn_info
del builder
del w
# Set the constants and execute the kernel
krn = krn_info[0]
self._const_i[0:3] = offsetA, offsetB, offsetC
# Our kernel stores output in row-major order, so swap A and B
krn.set_args(B, A, C, alpha, beta, self._const_i[1:2],
self._const_i[0:1], self._const_i[2:3])
global_size = krn_info[1]
local_size = krn_info[2]
self.device.queue_.execute_kernel(krn, global_size, local_size,
need_event=False)
|
[
"opencl4py.blas.CLBLAS",
"zope.interface.implementer",
"os.walk",
"numpy.zeros",
"threading.Lock",
"veles.dummy.DummyWorkflow",
"veles.numpy_ext.roundup",
"weakref.ref"
] |
[((1621, 1645), 'zope.interface.implementer', 'implementer', (['IOpenCLUnit'], {}), '(IOpenCLUnit)\n', (1632, 1645), False, 'from zope.interface import implementer\n'), ((2720, 2736), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (2734, 2736), False, 'import threading\n'), ((2760, 2779), 'weakref.ref', 'weakref.ref', (['device'], {}), '(device)\n', (2771, 2779), False, 'import weakref\n'), ((2830, 2864), 'numpy.zeros', 'numpy.zeros', (['(3)'], {'dtype': 'numpy.uint64'}), '(3, dtype=numpy.uint64)\n', (2841, 2864), False, 'import numpy\n'), ((3641, 3656), 'opencl4py.blas.CLBLAS', 'clblas.CLBLAS', ([], {}), '()\n', (3654, 3656), True, 'import opencl4py.blas as clblas\n'), ((8184, 8199), 'veles.dummy.DummyWorkflow', 'DummyWorkflow', ([], {}), '()\n', (8197, 8199), False, 'from veles.dummy import DummyWorkflow\n'), ((8023, 8054), 'veles.numpy_ext.roundup', 'roundup', (['rowsCountA', 'block_size'], {}), '(rowsCountA, block_size)\n', (8030, 8054), False, 'from veles.numpy_ext import roundup\n'), ((8083, 8116), 'veles.numpy_ext.roundup', 'roundup', (['columnCountB', 'block_size'], {}), '(columnCountB, block_size)\n', (8090, 8116), False, 'from veles.numpy_ext import roundup\n'), ((3225, 3240), 'os.walk', 'os.walk', (['dirnme'], {}), '(dirnme)\n', (3232, 3240), False, 'import os\n')]
|
import numpy as np
from lipkin.model import LipkinModel
class HartreeFock(LipkinModel):
name = 'Hartree-Fock'
def __init__(self, epsilon, V, Omega):
if Omega%2 == 1:
raise ValueError('This HF implementation assumes N = Omega = even.')
LipkinModel.__init__(self, epsilon, V, Omega, Omega)
self.r_gs = (-1)**(0.5*self.Omega)
self.err = 1E-8
def solve_equations(self, num_iter=100, theta0=0.0, phi0=0.0):
# set initial tau
tau = np.array([theta0, phi0])
# construct HF hamiltonian
h = self.get_self_consistent_hamiltonian(tau)
# construct kinetic energy
T = np.zeros((2,2), dtype=np.complex128)
T[0,0] = -0.5*self.epsilon*self.Omega
T[1,1] = 0.5*self.epsilon*self.Omega
# container for single particle potential
Gamma = np.zeros((2,2), dtype=np.complex128)
for i in range(num_iter):
# solve eigenvalue problem
eigvals, eigvecs = np.linalg.eig(h)
# construct new density matrix
rho = np.outer(eigvecs[:,0], np.conjugate(eigvecs[:,0]))
# construct new potential
Gamma[0,1] = -self.V*self.Omega*(self.Omega-1)*rho[1,0]
Gamma[1,0] = -self.V*self.Omega*(self.Omega-1)*rho[0,1]
# construct new hamiltonian
h = T + Gamma
# calculate energy
E = 0.5*np.trace(np.dot(T+h, rho)).real
return E
def get_self_consistent_hamiltonian(self, tau):
theta, phi = tau[0], tau[1]
h = np.empty((2,2), dtype=np.complex128)
h[0,0] = 1
h[1,1] = -1
h[0,1] = self.chi*np.sin(theta)*np.exp(1j*phi)
h[1,0] = self.chi*np.sin(theta)*np.exp(-1j*phi)
return -0.5*self.epsilon*self.Omega*h
def minimize_energy(self, num_iter=10000):
# pick small initial tau = (theta, phi)
tau = np.random.normal(0.0, 0.1, 2)
# initialize adam optimizer
self.m = np.zeros(2)
self.v = np.zeros(2)
# start minimizing
for self.t in range(1, num_iter+1):
E = self.get_energy(tau)
grad = self.get_gradient_energy(tau)
tau = self.update_tau(tau, grad)
return tau
def minimize_signature_projected_energy(self, r, num_iter=10000):
# pick small initial tau = (theta, phi)
tau = np.random.normal(0.0, 0.1, 2)
# initialize adam optimizer
self.m = np.zeros(2)
self.v = np.zeros(2)
# start minimizing
for self.t in range(1, num_iter+1):
Er = self.get_signature_projected_energy(r, tau)
grad = self.get_gradient_projected_energy(r, tau)
tau = self.update_tau(tau, grad)
return tau
def get_energy(self, tau):
theta, phi = tau[0], tau[1]
E = np.cos(theta)+0.5*self.chi*(np.sin(theta)**2)*np.cos(2*phi);
return -0.5*self.epsilon*self.Omega*E
def get_gradient_energy(self, tau):
theta, phi = tau[0], tau[1]
factor = 0.5*self.epsilon*self.Omega*np.sin(theta)
dE_dtheta = factor*(1-self.chi*np.cos(theta)*np.cos(2*phi))
dE_dphi = factor*self.chi*np.sin(theta)*np.sin(2*phi)
return np.array([dE_dtheta, dE_dphi])
def get_weight(self, r, tau):
theta = tau[0]
a = 1.0+r*self.r_gs*(np.cos(theta))**(self.Omega-2)
b = 1.0+r*self.r_gs*(np.cos(theta))**self.Omega
if a < self.err and b < self.err:
return float((self.Omega-2))/float(self.Omega)
else:
return (a+self.err)/(b+self.err)
def get_gradient_weight(self, r, tau):
theta = tau[0]
a = 2*(1+r*self.r_gs*(np.cos(theta))**self.Omega)-self.Omega*(np.sin(theta))**2
a *= r*self.r_gs*np.sin(theta)*(np.cos(theta))**(self.Omega-3)
b = (1+r*self.r_gs*(np.cos(theta))**self.Omega)**2
if a < self.err and b < self.err:
return np.array([theta*float((self.Omega-2))/float(self.Omega), 0])
return np.array([(a+self.err)/(b+self.err), 0])
def get_signature_projected_energy(self, r, tau):
return self.get_energy(tau)*self.get_weight(r, tau)
def get_gradient_projected_energy(self, r, tau):
E = self.get_energy(tau)
W = self.get_weight(r, tau)
gradE = self.get_gradient_energy(tau)
gradW = self.get_gradient_weight(r, tau)
return E*gradW + W*gradE
def update_tau(self, tau, gradient, eta0=0.001, beta1=0.9, beta2=0.999, epsilon=1.0E-8):
eta = eta0*np.sqrt(1.0-beta2**self.t)/(1.0-beta1**self.t)
self.m = beta1*self.m+(1.0-beta1)*gradient;
self.v = beta2*self.v+(1.0-beta2)*np.square(gradient);
tau -= eta*np.divide(self.m, np.sqrt(self.v)+epsilon)
self.t += 1
return tau
|
[
"numpy.empty",
"numpy.square",
"numpy.zeros",
"numpy.linalg.eig",
"lipkin.model.LipkinModel.__init__",
"numpy.sin",
"numpy.array",
"numpy.exp",
"numpy.random.normal",
"numpy.cos",
"numpy.dot",
"numpy.conjugate",
"numpy.sqrt"
] |
[((292, 344), 'lipkin.model.LipkinModel.__init__', 'LipkinModel.__init__', (['self', 'epsilon', 'V', 'Omega', 'Omega'], {}), '(self, epsilon, V, Omega, Omega)\n', (312, 344), False, 'from lipkin.model import LipkinModel\n'), ((538, 562), 'numpy.array', 'np.array', (['[theta0, phi0]'], {}), '([theta0, phi0])\n', (546, 562), True, 'import numpy as np\n'), ((717, 754), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {'dtype': 'np.complex128'}), '((2, 2), dtype=np.complex128)\n', (725, 754), True, 'import numpy as np\n'), ((920, 957), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {'dtype': 'np.complex128'}), '((2, 2), dtype=np.complex128)\n', (928, 957), True, 'import numpy as np\n'), ((1740, 1777), 'numpy.empty', 'np.empty', (['(2, 2)'], {'dtype': 'np.complex128'}), '((2, 2), dtype=np.complex128)\n', (1748, 1777), True, 'import numpy as np\n'), ((2123, 2152), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(0.1)', '(2)'], {}), '(0.0, 0.1, 2)\n', (2139, 2152), True, 'import numpy as np\n'), ((2215, 2226), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2223, 2226), True, 'import numpy as np\n'), ((2244, 2255), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2252, 2255), True, 'import numpy as np\n'), ((2655, 2684), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(0.1)', '(2)'], {}), '(0.0, 0.1, 2)\n', (2671, 2684), True, 'import numpy as np\n'), ((2747, 2758), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2755, 2758), True, 'import numpy as np\n'), ((2776, 2787), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2784, 2787), True, 'import numpy as np\n'), ((3574, 3604), 'numpy.array', 'np.array', (['[dE_dtheta, dE_dphi]'], {}), '([dE_dtheta, dE_dphi])\n', (3582, 3604), True, 'import numpy as np\n'), ((4419, 4465), 'numpy.array', 'np.array', (['[(a + self.err) / (b + self.err), 0]'], {}), '([(a + self.err) / (b + self.err), 0])\n', (4427, 4465), True, 'import numpy as np\n'), ((1079, 1095), 'numpy.linalg.eig', 'np.linalg.eig', (['h'], {}), '(h)\n', (1092, 1095), True, 'import numpy as np\n'), ((1865, 1883), 'numpy.exp', 'np.exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (1871, 1883), True, 'import numpy as np\n'), ((1920, 1939), 'numpy.exp', 'np.exp', (['(-1.0j * phi)'], {}), '(-1.0j * phi)\n', (1926, 1939), True, 'import numpy as np\n'), ((3154, 3167), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (3160, 3167), True, 'import numpy as np\n'), ((3414, 3427), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (3420, 3427), True, 'import numpy as np\n'), ((3544, 3559), 'numpy.sin', 'np.sin', (['(2 * phi)'], {}), '(2 * phi)\n', (3550, 3559), True, 'import numpy as np\n'), ((1193, 1220), 'numpy.conjugate', 'np.conjugate', (['eigvecs[:, 0]'], {}), '(eigvecs[:, 0])\n', (1205, 1220), True, 'import numpy as np\n'), ((1851, 1864), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1857, 1864), True, 'import numpy as np\n'), ((1906, 1919), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1912, 1919), True, 'import numpy as np\n'), ((3200, 3215), 'numpy.cos', 'np.cos', (['(2 * phi)'], {}), '(2 * phi)\n', (3206, 3215), True, 'import numpy as np\n'), ((3530, 3543), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (3536, 3543), True, 'import numpy as np\n'), ((4159, 4172), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4165, 4172), True, 'import numpy as np\n'), ((4174, 4187), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4180, 4187), True, 'import numpy as np\n'), ((4994, 5024), 'numpy.sqrt', 'np.sqrt', (['(1.0 - beta2 ** self.t)'], {}), '(1.0 - beta2 ** self.t)\n', (5001, 5024), True, 'import numpy as np\n'), ((5135, 5154), 'numpy.square', 'np.square', (['gradient'], {}), '(gradient)\n', (5144, 5154), True, 'import numpy as np\n'), ((3481, 3496), 'numpy.cos', 'np.cos', (['(2 * phi)'], {}), '(2 * phi)\n', (3487, 3496), True, 'import numpy as np\n'), ((3705, 3718), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (3711, 3718), True, 'import numpy as np\n'), ((3765, 3778), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (3771, 3778), True, 'import numpy as np\n'), ((4116, 4129), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4122, 4129), True, 'import numpy as np\n'), ((5193, 5208), 'numpy.sqrt', 'np.sqrt', (['self.v'], {}), '(self.v)\n', (5200, 5208), True, 'import numpy as np\n'), ((1560, 1578), 'numpy.dot', 'np.dot', (['(T + h)', 'rho'], {}), '(T + h, rho)\n', (1566, 1578), True, 'import numpy as np\n'), ((3182, 3195), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (3188, 3195), True, 'import numpy as np\n'), ((3467, 3480), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (3473, 3480), True, 'import numpy as np\n'), ((4233, 4246), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4239, 4246), True, 'import numpy as np\n'), ((4076, 4089), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4082, 4089), True, 'import numpy as np\n')]
|
# coding: utf-8
# In[1]:
get_ipython().run_cell_magic('javascript', '', '<!-- Ignore this block -->\nIPython.OutputArea.prototype._should_scroll = function(lines) {\n return false;\n}')
# ## Use housing data
# I have loaded the required modules. Pandas and Numpy. I have also included sqrt function from Math library.<br>
# I have imported division from future library. Remove this if the code is executed on Python 3. This import mimics behaviour of division operator of python 3 on python 2
# In[2]:
import pandas as pd
import numpy as np
from __future__ import division
from math import sqrt
""" File path change accordingly"""
inputFilepath = "data/house.csv"
"""Using default seperator"""
housingData = pd.read_csv(inputFilepath)
housingData.head(10)
# ### TextEncoder
#
# Here the data is mix of numbers and text. Text value cannot be directly used and should be converted to numeric data.<br>
# For this I have created a function text encoder which accepts a pandas series. Text encoder returns a lookUp dictionary for recreating the numeric value for text value and encoded text vector.
# For encoding I have applied a lambda function that will return value from dictionary.
# In[3]:
""" Converts the text features into numeric values so that they can be used by
the downstream algorithms.
Accepts pandas series and returns lookup dictionary and encoded vector"""
def textEncoder(textVector):
if type(textVector) == pd.core.series.Series:
lookUpDictionary = {}
lookupValue = 1
for key in textVector.unique():
lookUpDictionary[key] = lookupValue
lookupValue +=1
textVector = textVector.apply(lambda a: lookUpDictionary[a])
return lookUpDictionary,textVector
else:
raise TypeError("Expected a pandas series as an input")
# I have encoded nbhd and brick column using text encoder. The lookup dictionary is not used in downstream code. However any future predictions wil require text data to be encoded and hence I have provided the lookup dictionary.
# In[4]:
nbhdFeatureLookup, housingData['nbhd'] = textEncoder(housingData['nbhd'])
brickFeatureLookup, housingData['brick'] = textEncoder(housingData['brick'])
housingData.head(10)
# ### SplitDataSet Procedure
# This method splits the dataset into trainset and testset based upon the trainSetSize value. For splitting the dataset, I am using pandas.sample to split the data. This gives me trainset. For testset I am calculating complement of the trainset. This I am doing by droping the index present in training set.
# In[5]:
"""Splits the provided pandas dataframe into training and test dataset"""
def splitDataSet(inputDataframe, trainSetSize):
trainSet = inputDataframe.sample(frac=trainSetSize)
testSet = inputDataframe.drop(trainSet.index)
return trainSet,testSet
# ## 2. Choose those columns, which can help you in prediction i.e. contain some useful information. You can drop irrelevant columns. Give reason for choosing or dropping any column.
# ### generatePearsonCoefficient Procedure
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/f76ccfa7c2ed7f5b085115086107bbe25d329cec" />
# For sample:-
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/bd1ccc2979b0fd1c1aec96e386f686ae874f9ec0" />
# For selecting some features and for dropping others I am using Pearson's Coefficient. The value of Pearson's coefficient lies between [-1, 1] and tells how two features are related<br>
# <table>
# <tr><td>Strength of Association</td><td>Positive</td><td>Negative</td></tr><tr><td>Small</td><td>.1 to .3 </td><td>-0.1 to -0.3 </td></tr><tr><td>Medium</td><td>.3 to .5 </td><td>-0.3 to -0.5 </td></tr><tr><td>Large</td><td>.5 to 1.0 </td><td>-0.5 to -1.0 </td></tr></table>
#
# In[6]:
"""Generate pearson's coefficient"""
def generatePearsonCoefficient(A, B):
A_meanDiff = A - A.mean()
B_meanDiff = B - B.mean()
return ((A_meanDiff * B_meanDiff).sum())/(sqrt((
A_meanDiff * A_meanDiff).sum()) * sqrt((B_meanDiff * B_meanDiff).sum()))
# In[7]:
"""Generate the value of pearson constant for all the features"""
print("Pearson's coefficient of corelation for "+
"nbhd and price is "+ str(generatePearsonCoefficient(housingData.nbhd,housingData.price)))
print("Pearson's coefficient of corelation for "+
"offers and price is "+ str(generatePearsonCoefficient(housingData.offers,housingData.price)))
print("Pearson's coefficient of corelation for "+
"sqft and price is "+ str(generatePearsonCoefficient(housingData.sqft,housingData.price)))
print("Pearson's coefficient of corelation for "+
"bedrooms and price is "+ str(generatePearsonCoefficient(housingData.bedrooms,housingData.price)))
print("Pearson's coefficient of corelation for "+
"bathrooms and price is "+ str(generatePearsonCoefficient(housingData.bathrooms,housingData.price)))
print("Pearson's coefficient of corelation for "+
"brick and price is "+ str(generatePearsonCoefficient(housingData.brick,housingData.price)))
# The value of Pearson's constant suggests that sqft, bedroom and bathroonm have strong corelation with price. Offers has a weak negative corelation and nbhd and brick has mediup corelation with price. I am keeping all the features as they have some corelation with the data.
# # Visualizing the relation between the X and Y
# Here I have used subplots to plot different X features and their relation with Y.
# In[8]:
import matplotlib.pyplot as plt
"""Set global rcParams for pyplotlib"""
plt.rcParams["figure.figsize"] = "18,15"
figure, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3,2,sharey='none')
ax1.plot(housingData.nbhd,housingData.price,"ro")
ax1.grid()
ax1.set_title("nbhd vs price")
ax2.plot(housingData.offers,housingData.price,"ro")
ax2.grid()
ax2.set_title("no of offers vs price")
ax3.plot(housingData.sqft,housingData.price,"ro")
ax3.grid()
ax3.set_title("sqft vs price")
ax4.plot(housingData.brick,housingData.price,"ro")
ax4.grid()
ax4.set_title("brick vs price")
ax5.plot(housingData.bedrooms,housingData.price,"ro")
ax5.grid()
ax5.set_title("no of bedrooms vs price")
ax6.plot(housingData.bathrooms,housingData.price,"ro")
ax6.grid()
ax6.set_title("bathrooms vs price")
plt.show()
# ### gaussianSolverProcedure
# <b>Algorithm:-</b><br>
# <b>Step-1</b><br>
# Generate an augmented matrix.<br>
# <b>Step-2</b><br>
# Calculate pivot for a given column. Pivot is defined as a largest value in column following its index.<br>
# <b>Step-3</b><br>
# Place the piviot in the current row column.(Swap the row)<br>
# <b>Step-4</b><br>
# Make the value of other elements under pivot as zero. Use only row operations for this. Repeat this untill we get a upper triangular matrix.<br>
# <b>Step-5</b><br>
# Solve the upper trangular matrix using backward substitution.<br><br>
#
# The gaussian solver accepts two matrices A and B and tries to solve for x such that Ax = B
#
# In[9]:
"""Method for solving system of linear equations using gaussian elimination method"""
def gaussianSolver(A,B):
augmentedMatrix = np.hstack((A,B)) * 1.0
n = augmentedMatrix.shape[0]
for i in range(0, n):
"""Set default pivot value as diagonal matrix """
pivot = augmentedMatrix[i][i]
pivotRow = i
"""Check for a bigger pivot value"""
for j in range(i+1, n):
if abs(augmentedMatrix[j][i]) > abs(pivot):
pivot = augmentedMatrix[j][i]
pivotRow = j
"""If pivot has changed. Swap the rows"""
if pivotRow != i:
for j in range(0, n+1):
augmentedMatrix[pivotRow][j], augmentedMatrix[i][j] = augmentedMatrix[i][j], augmentedMatrix[pivotRow][j]
"""Make all the column values below pivot as zero by performing matrix row operations"""
for j in range(i+1, n):
op = -1 * (augmentedMatrix[j][i]/augmentedMatrix[i][i])
for k in range(0, n+1):
augmentedMatrix[j][k] = augmentedMatrix[j][k] + ( op * augmentedMatrix[i][k] )
""" Backward substitution to get values for B"""
beta = np.zeros(n)
for i in range(n - 1, -1,-1):
diff = 0
for k in range (i + 1, n):
diff = diff + (beta[k] * augmentedMatrix[i][k])
beta[i] = (augmentedMatrix[i][n] - diff)/augmentedMatrix[i][i]
return beta
# ### choleskyDecompositionSolver Procedure
# As per cholesky decomposition a positive definite matrix A can be represented as L.L<sup>T</sup> where L<sup>T</sup> is lower trangular matrix and L<sup>T</sup> is it's transpose.<br>
# Here L is called cholesky factor<br>
# The problem comes down to L.L<sup>T</sup>x = B<br>
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/abf826b0ffb86e190d432828d7485f52f618eaed" />
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/bb5adc5916e0762b2eca921de3e70ccae9bd2999" />
#
# <b>Algorithm:-</b><br>
# <b>Step-1</b><br>
# Initialize a zero matrix<br>
# <b>Step-2</b><br>
# Calculate L using the above formula. If calculating for a diagonal element then stop the procedure and move to calculate for next row. This will generate lower trangular matrix<br/>
# <b>Step-3</b><br>
# Calculate vector Y using forward substitution. LY = b<br>
# <b>Step-4</b><br>
# Calculate vector X using backward substitution.L*X = Y<br>
# In[10]:
"""Method for solving the system of linear equations using cholesky decomposition"""
def choleskyDecompositionSolver(A, B):
"""Converting the matrix values to float"""
A = A * 1.0
B = B * 1.0
n = A.shape[0]
if A.shape[0] == A.shape[1]:
"""Generate cholesky factor"""
L = np.zeros(shape = A.shape)
for i in range(0, n):
for j in range (0, n):
L[i][j] = A[i][j]
"""Calculating diagonal elements"""
if i == j:
for k in range(0, j):
L[i][j] = L[i][j] - (L[i][k] * L[i][k])
L[i][j] = sqrt(L[i][j])
break;
"""Calculating non diagonal elements"""
product = 0
for k in range (0, j):
product = product + (L[i][k] * L[j][k])
L[i][j] = (L[i][j] - product)/L[j][j]
"""Solving the system of linear equation
Ax=b
A can be decomposed into LU such that
Ly=b
Ux=y """
"""Forward substitution"""
Y = np.zeros(n)
for i in range(0, n):
diff = 0
for k in range (i -1, -1, -1):
diff = diff + (Y[k] * L[i][k])
Y[i] = (B[i] - diff)/L[i][i]
"""Backward substitution"""
beta = np.zeros(n)
U = L.T
for i in range(n - 1, -1,-1):
diff = 0
for k in range (i + 1, n):
diff = diff + (beta[k] * U[i][k])
beta[i] = (Y[i] - diff)/U[i][i]
return beta
else:
raise ValueError("Matrix A is not symmetric")
# ### qrDecompositionSolver Procedure
# A matrix A can be represented as product of Q and R where Q is orthogonal matrix (Q<sup>T</sup>Q = QQ<sup>T</sup> = I) and R is upper triangular matrix.
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/4b845398dd7df51edc31561a612423b20a83eb04" />
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/74356955f03f5c1171e9e812174a715eb112aef8" />
# <br>QR decompsition can be done in four steps
# <ul><li>Calculation of orthogonal basis</li><li>Calculation of orthonormal</li><li>QR factor calculation</li><li>Solving system of linear equation</li></ul>
# <br>
# <b>Algorithm:-</b><br>
# <b>Step-1</b><br>
# Calculate orthogonal basis using gram Schmidt method. For first vector the value is itself. For subsequent vectors the orthogonalbasis is vector - projection on perviously calculated orthogonal basis
# <br><b>Step-2</b><br>
# For calculating orthonormal we divide orthogonal basis by magnitude of respective vectors
# <br><b>Step-3</b><br>
# Q = [orthonormalBasis]<br>
# R = Q<sup>T</sup>A
# <br><b>Step-4</b><br>
# For calculating the value of X in AX = B,<br>
# We calculate Y = Q<sup>T</sup>B<br>
# We solve RX = Y using backward substitution
#
# In[11]:
"""QR decomposition can be done in three steps
1) Calculation orthogonal basis
2) Calculation orthonormal
3) QR factor calculation"""
def qrDecompositionSolver(A, B):
A = A * 1.0
B = B * 1.0
"""Calculating the orthogonal basis"""
n = A.shape[1]
# Store deepcopy of A for processing
orthoBasis = np.array(A, copy = True)
for i in range(1, n):
"""Calculate the projections"""
diff = 0
for j in range(i-1, -1, -1):
diff = diff + (np.dot(orthoBasis[:,i],
orthoBasis[:,j])/np.dot(orthoBasis[:,j],orthoBasis[:,j]))*orthoBasis[:,j]
orthoBasis[:,i] = orthoBasis[:,i] - diff
"""Calculating orthonormal"""
for i in range(0, n):
orthoBasis[:,i] = orthoBasis[:,i]/np.sqrt(np.sum(np.square(orthoBasis[:,i])))
"""QR factorization"""
Q = orthoBasis
R = np.dot(orthoBasis.T,A)
"""Solving system of linear equation"""
B = np.dot(Q.T,B)
"""Backward substitution"""
beta = np.zeros(n)
for i in range(n - 1, -1,-1):
diff = 0
for k in range (i + 1, n):
diff = diff + (beta[k] * R[i][k])
beta[i] = (B[i] - diff)/R[i][i]
return beta
# ### learnLinregNormEq
# Solves system of linear equation in form of <br>
# X<sup>T</sup>XB = X<sup>T</sup>Y<br>
# Accepts three arguments X, Y and solver. Default value for solver is gaussianSolver
# In[12]:
"""Method to learn linear regression using normal equations. Default solver is
gaussian solver"""
def learnLinregNormEq(X, Y, solver = gaussianSolver):
if isinstance(X,np.ndarray) and isinstance(Y,np.ndarray):
if X.shape[0] != Y.shape[0]:
raise ValueError("The shape of X and Y is inconsistant")
X = np.insert(X, 0, 1, axis=1)
Xtranspose = X.T
XtX = np.dot(Xtranspose,X)
XtY = np.dot(Xtranspose,Y)
return solver(XtX, XtY)
else:
raise TypeError("Expected X and Y as numpy.ndarray")
# ### predictLinearRegression Procedure
# This method performs predicts the value for Y given X and model parameters. This method will add bias to X.
# In[13]:
"""Method to make prediction for yTest"""
def predictionLinearRegression(X, modelParameters):
X = np.insert(X, 0, 1, axis=1)
yPrediction = np.dot(X,modelParameters.T)
return yPrediction
# ### RMSE procedure
# Will calculate root mean squared error for given Ytrue values and YPrediction
#
# In[14]:
"""Model accuracy estimator RMSE"""
def RMSE(yTrue, yPrediction):
n = yTrue.shape[0]
return sqrt((1.0) * np.sum(np.square((yTrue - yPrediction))))/n
# # Solving the linear equations using gaussianSolver
# Here I am splitting the dataset into training and test set. For this I am using splitDataSet procedure with 80-20 split.<br>
# I have taken all the features.
# In[15]:
trainSet,testSet = splitDataSet(housingData,0.8)
Ytrain = trainSet.as_matrix(columns=['price'])
Ytest = testSet.as_matrix(columns=['price'])
print("Total items in training set "+str(Ytrain.shape))
print("Total items in test set "+str(Ytest.shape))
Xtrain = trainSet.as_matrix(columns = ['sqft','bedrooms','bathrooms','brick','nbhd','offers'])
Xtest = testSet.as_matrix(columns = ['sqft','bedrooms','bathrooms','brick','nbhd','offers'])
# I am learning linear regression using default (Gaussian) solver. I am making predictions using predictionLinearRegression procedure. I am calculating the RMSE using RMSE procedure and average of residuals using mean.
# In[16]:
"""Learn model parameters using gaussian solver"""
modelParamsGaussian = learnLinregNormEq(Xtrain, Ytrain)
"""Make prediction using modelParams"""
yPredictionGaussian = predictionLinearRegression(Xtest, modelParamsGaussian)
"""Calulate RMSE"""
print("RMSE for gaussian solver is "+str(RMSE(Ytest.flatten(),yPredictionGaussian)))
print("Average residual for gaussian solver is "+str((Ytest.flatten() - yPredictionGaussian).mean()))
# In[17]:
plt.plot(yPredictionGaussian - Ytest.flatten(), Ytest,"ro",label="ytest - ybar vs ytest")
plt.title("Plot for gaussian solver")
plt.xlabel("ytest - ybar")
plt.ylabel("ytest")
plt.show()
# # Solving the system of equations using Cholesky method
# In[18]:
trainSet,testSet = splitDataSet(housingData,0.8)
Ytrain = trainSet.as_matrix(columns=['price'])
Ytest = testSet.as_matrix(columns=['price'])
print("Total items in training set "+str(Ytrain.shape))
print("Total items in test set "+str(Ytest.shape))
Xtrain = trainSet.as_matrix(columns = ['sqft','bedrooms','bathrooms','brick','nbhd','offers'])
Xtest = testSet.as_matrix(columns = ['sqft','bedrooms','bathrooms','brick','nbhd','offers'])
# In[19]:
"""Learn model parameters using Cholesky solver"""
modelParamsCholesky = learnLinregNormEq(Xtrain, Ytrain,choleskyDecompositionSolver)
"""Make prediction using modelParams"""
yPredictionCholesky = predictionLinearRegression(Xtest, modelParamsCholesky)
"""Calulate RMSE"""
print("RMSE for Cholesky solver is "+str(RMSE(Ytest.flatten(),yPredictionCholesky)))
print("Average residual for Cholesky solver is "+str((Ytest.flatten() - yPredictionCholesky).mean()))
# In[20]:
plt.plot(yPredictionCholesky - Ytest.flatten(), Ytest,"bo",label="ytest - ybar vs ytest")
plt.title("Plot for Cholesky solver")
plt.xlabel("ytest - ybar")
plt.ylabel("ytest")
plt.show()
# # Solving the system of equations using QR decomposition method
# In[21]:
trainSet,testSet = splitDataSet(housingData,0.8)
Ytrain = trainSet.as_matrix(columns=['price'])
Ytest = testSet.as_matrix(columns=['price'])
print("Total items in training set "+str(Ytrain.shape))
print("Total items in test set "+str(Ytest.shape))
Xtrain = trainSet.as_matrix(columns = ['sqft','bedrooms','bathrooms','brick','nbhd','offers'])
Xtest = testSet.as_matrix(columns = ['sqft','bedrooms','bathrooms','brick','nbhd','offers'])
# In[22]:
"""Learn model parameters using QR Decomposition solver"""
modelParamsQR = learnLinregNormEq(Xtrain, Ytrain,qrDecompositionSolver)
"""Make prediction using modelParams"""
yPredictionQR = predictionLinearRegression(Xtest, modelParamsQR)
"""Calulate RMSE"""
print("RMSE for QR Decomposition solver is "+str(RMSE(Ytest.flatten(),yPredictionQR)))
print("Average residual for QR Decomposition solver is "+str((Ytest.flatten() - yPredictionQR).mean()))
# In[23]:
plt.plot(yPredictionQR - Ytest.flatten(), Ytest,"go",label="ytest - ybar vs ytest")
plt.title("Plot for QR decomposition solver")
plt.xlabel("ytest - ybar")
plt.ylabel("ytest")
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"math.sqrt",
"pandas.read_csv",
"numpy.square",
"numpy.zeros",
"numpy.insert",
"numpy.hstack",
"numpy.array",
"numpy.dot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots"
] |
[((722, 748), 'pandas.read_csv', 'pd.read_csv', (['inputFilepath'], {}), '(inputFilepath)\n', (733, 748), True, 'import pandas as pd\n'), ((5725, 5758), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(2)'], {'sharey': '"""none"""'}), "(3, 2, sharey='none')\n", (5737, 5758), True, 'import matplotlib.pyplot as plt\n'), ((6354, 6364), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6362, 6364), True, 'import matplotlib.pyplot as plt\n'), ((16720, 16757), 'matplotlib.pyplot.title', 'plt.title', (['"""Plot for gaussian solver"""'], {}), "('Plot for gaussian solver')\n", (16729, 16757), True, 'import matplotlib.pyplot as plt\n'), ((16758, 16784), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""ytest - ybar"""'], {}), "('ytest - ybar')\n", (16768, 16784), True, 'import matplotlib.pyplot as plt\n'), ((16785, 16804), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ytest"""'], {}), "('ytest')\n", (16795, 16804), True, 'import matplotlib.pyplot as plt\n'), ((16805, 16815), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16813, 16815), True, 'import matplotlib.pyplot as plt\n'), ((17906, 17943), 'matplotlib.pyplot.title', 'plt.title', (['"""Plot for Cholesky solver"""'], {}), "('Plot for Cholesky solver')\n", (17915, 17943), True, 'import matplotlib.pyplot as plt\n'), ((17944, 17970), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""ytest - ybar"""'], {}), "('ytest - ybar')\n", (17954, 17970), True, 'import matplotlib.pyplot as plt\n'), ((17971, 17990), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ytest"""'], {}), "('ytest')\n", (17981, 17990), True, 'import matplotlib.pyplot as plt\n'), ((17991, 18001), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17999, 18001), True, 'import matplotlib.pyplot as plt\n'), ((19084, 19129), 'matplotlib.pyplot.title', 'plt.title', (['"""Plot for QR decomposition solver"""'], {}), "('Plot for QR decomposition solver')\n", (19093, 19129), True, 'import matplotlib.pyplot as plt\n'), ((19130, 19156), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""ytest - ybar"""'], {}), "('ytest - ybar')\n", (19140, 19156), True, 'import matplotlib.pyplot as plt\n'), ((19157, 19176), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ytest"""'], {}), "('ytest')\n", (19167, 19176), True, 'import matplotlib.pyplot as plt\n'), ((19177, 19187), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19185, 19187), True, 'import matplotlib.pyplot as plt\n'), ((8306, 8317), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (8314, 8317), True, 'import numpy as np\n'), ((12902, 12924), 'numpy.array', 'np.array', (['A'], {'copy': '(True)'}), '(A, copy=True)\n', (12910, 12924), True, 'import numpy as np\n'), ((13474, 13497), 'numpy.dot', 'np.dot', (['orthoBasis.T', 'A'], {}), '(orthoBasis.T, A)\n', (13480, 13497), True, 'import numpy as np\n'), ((13554, 13568), 'numpy.dot', 'np.dot', (['Q.T', 'B'], {}), '(Q.T, B)\n', (13560, 13568), True, 'import numpy as np\n'), ((13616, 13627), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (13624, 13627), True, 'import numpy as np\n'), ((14908, 14934), 'numpy.insert', 'np.insert', (['X', '(0)', '(1)'], {'axis': '(1)'}), '(X, 0, 1, axis=1)\n', (14917, 14934), True, 'import numpy as np\n'), ((14953, 14981), 'numpy.dot', 'np.dot', (['X', 'modelParameters.T'], {}), '(X, modelParameters.T)\n', (14959, 14981), True, 'import numpy as np\n'), ((7195, 7212), 'numpy.hstack', 'np.hstack', (['(A, B)'], {}), '((A, B))\n', (7204, 7212), True, 'import numpy as np\n'), ((9891, 9914), 'numpy.zeros', 'np.zeros', ([], {'shape': 'A.shape'}), '(shape=A.shape)\n', (9899, 9914), True, 'import numpy as np\n'), ((10755, 10766), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (10763, 10766), True, 'import numpy as np\n'), ((11009, 11020), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (11017, 11020), True, 'import numpy as np\n'), ((14389, 14415), 'numpy.insert', 'np.insert', (['X', '(0)', '(1)'], {'axis': '(1)'}), '(X, 0, 1, axis=1)\n', (14398, 14415), True, 'import numpy as np\n'), ((14464, 14485), 'numpy.dot', 'np.dot', (['Xtranspose', 'X'], {}), '(Xtranspose, X)\n', (14470, 14485), True, 'import numpy as np\n'), ((14499, 14520), 'numpy.dot', 'np.dot', (['Xtranspose', 'Y'], {}), '(Xtranspose, Y)\n', (14505, 14520), True, 'import numpy as np\n'), ((10252, 10265), 'math.sqrt', 'sqrt', (['L[i][j]'], {}), '(L[i][j])\n', (10256, 10265), False, 'from math import sqrt\n'), ((13382, 13409), 'numpy.square', 'np.square', (['orthoBasis[:, i]'], {}), '(orthoBasis[:, i])\n', (13391, 13409), True, 'import numpy as np\n'), ((15245, 15275), 'numpy.square', 'np.square', (['(yTrue - yPrediction)'], {}), '(yTrue - yPrediction)\n', (15254, 15275), True, 'import numpy as np\n'), ((13079, 13121), 'numpy.dot', 'np.dot', (['orthoBasis[:, i]', 'orthoBasis[:, j]'], {}), '(orthoBasis[:, i], orthoBasis[:, j])\n', (13085, 13121), True, 'import numpy as np\n'), ((13154, 13196), 'numpy.dot', 'np.dot', (['orthoBasis[:, j]', 'orthoBasis[:, j]'], {}), '(orthoBasis[:, j], orthoBasis[:, j])\n', (13160, 13196), True, 'import numpy as np\n')]
|
"""Convexified Belief Propagation Class"""
import numpy as np
from .MatrixBeliefPropagator import MatrixBeliefPropagator, logsumexp, sparse_dot
class ConvexBeliefPropagator(MatrixBeliefPropagator):
"""
Class to perform convexified belief propagation based on counting numbers. The class allows for non-Bethe
counting numbers for the different factors in the MRF. If the factors are all non-negative, then the adjusted
Bethe free energy is convex, providing better guarantees about the convergence and bounds of the primal
and dual objective values.
"""
def __init__(self, markov_net, counting_numbers=None):
"""
Initialize a convexified belief propagator.
:param markov_net: MarkovNet object encoding the probability distribution
:type markov_net: MarkovNet
:param counting_numbers: a dictionary with an entry for each variable and edge such that the value is a float
representing the counting number to use in computing the convexified Bethe formulas
and corresponding message passing updates.
:type counting_numbers: dict
"""
super(ConvexBeliefPropagator, self).__init__(markov_net)
self.unary_counting_numbers = np.ones(len(self.mn.variables))
self.edge_counting_numbers = np.ones(2 * self.mn.num_edges)
default_counting_numbers = dict()
for var in markov_net.variables:
default_counting_numbers[var] = 1
for neighbor in markov_net.neighbors[var]:
if var < neighbor:
default_counting_numbers[(var, neighbor)] = 1
if counting_numbers:
self._set_counting_numbers(counting_numbers)
else:
self._set_counting_numbers(default_counting_numbers)
def _set_counting_numbers(self, counting_numbers):
"""
Store the provided counting numbers and set up the associated vectors for the ordered variable representation.
:param counting_numbers: a dictionary with an entry for each variable and edge with counting number values
:type counting_numbers: dict
:return: None
"""
self.edge_counting_numbers = np.zeros(2 * self.mn.num_edges)
for edge, i in self.mn.message_index.items():
reversed_edge = edge[::-1]
if edge in counting_numbers:
self.edge_counting_numbers[i] = counting_numbers[edge]
self.edge_counting_numbers[i + self.mn.num_edges] = counting_numbers[edge]
elif reversed_edge in counting_numbers:
self.edge_counting_numbers[i] = counting_numbers[reversed_edge]
self.edge_counting_numbers[i + self.mn.num_edges] = counting_numbers[reversed_edge]
else:
raise KeyError('Edge %s was not assigned a counting number.' % repr(edge))
self.unary_counting_numbers = np.zeros((len(self.mn.variables), 1))
for var, i in self.mn.var_index.items():
self.unary_counting_numbers[i] = counting_numbers[var]
self.unary_coefficients = self.unary_counting_numbers.copy()
for edge, i in self.mn.message_index.items():
self.unary_coefficients[self.mn.var_index[edge[0]]] += self.edge_counting_numbers[i]
self.unary_coefficients[self.mn.var_index[edge[1]]] += self.edge_counting_numbers[i]
def compute_bethe_entropy(self):
if self.fully_conditioned:
entropy = 0
else:
entropy = - np.sum(self.edge_counting_numbers[:self.mn.num_edges] *
(np.nan_to_num(self.pair_belief_tensor) * np.exp(self.pair_belief_tensor))) \
- np.sum(self.unary_counting_numbers.T *
(np.nan_to_num(self.belief_mat) * np.exp(self.belief_mat)))
return entropy
def update_messages(self):
self.compute_beliefs()
adjusted_message_prod = self.mn.edge_pot_tensor - np.hstack((self.message_mat[:, self.mn.num_edges:],
self.message_mat[:, :self.mn.num_edges]))
adjusted_message_prod /= self.edge_counting_numbers
adjusted_message_prod += self.belief_mat[:, self.mn.message_from]
messages = np.squeeze(logsumexp(adjusted_message_prod, 1)) * self.edge_counting_numbers
messages = np.nan_to_num(messages - messages.max(0))
change = np.sum(np.abs(messages - self.message_mat))
self.message_mat = messages
return change
def compute_beliefs(self):
if not self.fully_conditioned:
self.belief_mat = self.mn.unary_mat + self.augmented_mat
self.belief_mat += sparse_dot(self.message_mat, self.mn.message_to_map)
self.belief_mat /= self.unary_coefficients.T
log_z = logsumexp(self.belief_mat, 0)
self.belief_mat = self.belief_mat - log_z
def compute_pairwise_beliefs(self):
if not self.fully_conditioned:
adjusted_message_prod = self.belief_mat[:, self.mn.message_from] \
- np.nan_to_num(np.hstack((self.message_mat[:, self.mn.num_edges:],
self.message_mat[:, :self.mn.num_edges])) /
self.edge_counting_numbers)
to_messages = adjusted_message_prod[:, :self.mn.num_edges].reshape(
(self.mn.max_states, 1, self.mn.num_edges))
from_messages = adjusted_message_prod[:, self.mn.num_edges:].reshape(
(1, self.mn.max_states, self.mn.num_edges))
beliefs = self.mn.edge_pot_tensor[:, :, self.mn.num_edges:] / \
self.edge_counting_numbers[self.mn.num_edges:] + to_messages + from_messages
beliefs -= logsumexp(beliefs, (0, 1))
self.pair_belief_tensor = beliefs
|
[
"numpy.abs",
"numpy.nan_to_num",
"numpy.zeros",
"numpy.ones",
"numpy.hstack",
"numpy.exp"
] |
[((1362, 1392), 'numpy.ones', 'np.ones', (['(2 * self.mn.num_edges)'], {}), '(2 * self.mn.num_edges)\n', (1369, 1392), True, 'import numpy as np\n'), ((2257, 2288), 'numpy.zeros', 'np.zeros', (['(2 * self.mn.num_edges)'], {}), '(2 * self.mn.num_edges)\n', (2265, 2288), True, 'import numpy as np\n'), ((4039, 4137), 'numpy.hstack', 'np.hstack', (['(self.message_mat[:, self.mn.num_edges:], self.message_mat[:, :self.mn.\n num_edges])'], {}), '((self.message_mat[:, self.mn.num_edges:], self.message_mat[:, :\n self.mn.num_edges]))\n', (4048, 4137), True, 'import numpy as np\n'), ((4519, 4554), 'numpy.abs', 'np.abs', (['(messages - self.message_mat)'], {}), '(messages - self.message_mat)\n', (4525, 4554), True, 'import numpy as np\n'), ((5214, 5312), 'numpy.hstack', 'np.hstack', (['(self.message_mat[:, self.mn.num_edges:], self.message_mat[:, :self.mn.\n num_edges])'], {}), '((self.message_mat[:, self.mn.num_edges:], self.message_mat[:, :\n self.mn.num_edges]))\n', (5223, 5312), True, 'import numpy as np\n'), ((3835, 3865), 'numpy.nan_to_num', 'np.nan_to_num', (['self.belief_mat'], {}), '(self.belief_mat)\n', (3848, 3865), True, 'import numpy as np\n'), ((3868, 3891), 'numpy.exp', 'np.exp', (['self.belief_mat'], {}), '(self.belief_mat)\n', (3874, 3891), True, 'import numpy as np\n'), ((3663, 3701), 'numpy.nan_to_num', 'np.nan_to_num', (['self.pair_belief_tensor'], {}), '(self.pair_belief_tensor)\n', (3676, 3701), True, 'import numpy as np\n'), ((3704, 3735), 'numpy.exp', 'np.exp', (['self.pair_belief_tensor'], {}), '(self.pair_belief_tensor)\n', (3710, 3735), True, 'import numpy as np\n')]
|
import unittest
from tests.test_support import TestSupport
from mock import Mock
from maskgen.masks.donor_rules import VideoDonor, AudioDonor, AllStreamDonor, AllAudioStreamDonor, \
VideoDonorWithoutAudio, InterpolateDonor,AudioZipDonor
from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, \
get_end_time_from_segment, get_end_frame_from_segment
class TestDonorRules(TestSupport):
def test_video_donor(self):
graph = Mock()
def lkup_preds(x):
return {'b':['a'],'e':['d']}[x]
def lkup_edge(x,y):
return {'ab':{'op':'NoSelect'},'de':{'op':'SelectSomething','arguments': {'Start Time': 20, 'End Time':100}}}[x + y]
graph.predecessors = lkup_preds
graph.get_edge = lkup_edge
graph.dir = '.'
donor = VideoDonor(graph, 'e','f', 'x',(None,self.locateFile('tests/videos/sample1.mov')), (None,self.locateFile('tests/videos/sample1.mov')))
args = donor.arguments()
self.assertEqual(20, args['Start Time']['defaultvalue'])
self.assertEqual(100, args['End Time']['defaultvalue'])
segments = donor.create(arguments={'include audio':'yes','Start Time':30,'End Time':150})
for segment in segments:
if get_type_of_segment(segment) == 'audio':
self.assertEqual(115542,get_start_frame_from_segment(segment))
self.assertEqual(509061, get_end_frame_from_segment(segment))
else:
self.assertEqual(30, get_start_frame_from_segment(segment))
self.assertEqual(150, get_end_frame_from_segment(segment))
self.assertEqual(2620.0, get_start_time_from_segment(segment))
self.assertEqual(11543, int(get_end_time_from_segment(segment)))
donor = VideoDonor(graph, 'b','c','x', (None,self.locateFile('tests/videos/sample1.mov')), (None,self.locateFile('tests/videos/sample1.mov')))
args = donor.arguments()
self.assertEqual(1, args['Start Time']['defaultvalue'])
self.assertEqual(0, args['End Time']['defaultvalue'])
segments = donor.create(arguments={'include audio':'yes','Start Time':30,'End Time':150})
for segment in segments:
if get_type_of_segment(segment) == 'audio':
self.assertEqual(115542,get_start_frame_from_segment(segment))
self.assertEqual(509061, get_end_frame_from_segment(segment))
else:
self.assertEqual(30, get_start_frame_from_segment(segment))
self.assertEqual(150, get_end_frame_from_segment(segment))
self.assertEqual(2620.0, get_start_time_from_segment(segment))
self.assertEqual(11543, int(get_end_time_from_segment(segment)))
segments = donor.create(arguments={'include audio': 'no', 'Start Time': 30, 'End Time': 150})
self.assertEqual(0,len([segment for segment in segments if get_type_of_segment(segment) == 'audio']))
donor = VideoDonorWithoutAudio(graph, 'b','c', 'x', (None,self.locateFile('tests/videos/sample1.mov')),
(None,self.locateFile('tests/videos/sample1.mov')))
self.assertTrue('include audio' not in donor.arguments())
def test_audio_donor(self):
graph = Mock()
def lkup_preds(x):
return {'b': ['a'], 'e': ['d']}[x]
def lkup_edge(x, y):
return \
{'ab': {'op': 'NoSelect'}, 'ef': {'op': 'SelectSomething', 'arguments': {'Start Time': "00:00:00.000000"}}}[
x + y]
graph.predecessors = lkup_preds
graph.get_edge = lkup_edge
graph.dir = '.'
donor = AudioDonor(graph, 'e', 'f', 'x', (None, self.locateFile('tests/videos/sample1.mov')),
(None, self.locateFile('tests/videos/sample1.mov')))
args = donor.arguments()
self.assertEqual("00:00:00.000000", args['Start Time']['defaultvalue'])
self.assertEqual("00:00:00.000000", args['End Time']['defaultvalue'])
segments = donor.create(arguments={'Start Time': "00:00:01.11", 'End Time': "00:00:01.32"})
for segment in segments:
self.assertEqual(48951, get_start_frame_from_segment(segment))
self.assertEqual(58212, get_end_frame_from_segment(segment))
self.assertAlmostEqual(1109.97, get_start_time_from_segment(segment),places=1)
self.assertEqual(1320.0, int(get_end_time_from_segment(segment)))
donor = AllStreamDonor(graph, 'e', 'f', 'y', (None, self.locateFile('tests/videos/sample1.mov')),
(None, self.locateFile('tests/videos/sample1.mov')))
args = donor.arguments()
self.assertEqual(0,len(args))
segments = donor.create(arguments={})
types = set()
for segment in segments:
types.add(get_type_of_segment(segment))
if get_type_of_segment(segment) == 'audio':
self.assertEqual(1, get_start_frame_from_segment(segment))
self.assertEqual(2617262, get_end_frame_from_segment(segment))
self.assertAlmostEqual(0, get_start_time_from_segment(segment), places=1)
self.assertAlmostEqual(59348, int(get_end_time_from_segment(segment)))
else:
self.assertEqual(1, get_start_frame_from_segment(segment))
self.assertEqual(803, get_end_frame_from_segment(segment))
self.assertAlmostEqual(0, get_start_time_from_segment(segment), places=1)
self.assertAlmostEqual(59348, int(get_end_time_from_segment(segment)))
self.assertEqual(2,len(types))
donor = AllAudioStreamDonor(graph, 'e', 'f', 'y', (None, self.locateFile('tests/videos/sample1.mov')),
(None, self.locateFile('tests/videos/sample1.mov')))
self.assertEqual(0, len(donor.arguments()))
self.assertEqual(['audio'],donor.media_types())
def test_audio_zip_donor(self):
graph = Mock()
def lkup_preds(x):
return {'b': ['a'], 'e': ['d']}[x]
def lkup_edge(x, y):
return \
{'ab': {'op': 'NoSelect'}, 'ef': {'op': 'SelectSomething', 'arguments': {'Start Time': "00:00:00.000000"}}}[
x + y]
graph.predecessors = lkup_preds
graph.get_edge = lkup_edge
graph.dir = '.'
donor = AudioZipDonor(graph, 'e', 'f', 'x', (None, self.locateFile('tests/zips/test.wav.zip')),
(None, self.locateFile('tests/videos/sample1.mov')))
args = donor.arguments()
self.assertEqual("00:00:00.000000", args['Start Time']['defaultvalue'])
segments = donor.create(arguments={'Start Time': "00:00:09.11", 'End Time': "00:00:16.32", 'sample rate':44100})
for segment in segments:
self.assertEqual(401752, get_start_frame_from_segment(segment))
self.assertEqual(719713, get_end_frame_from_segment(segment))
self.assertAlmostEqual(9110, get_start_time_from_segment(segment),places=1)
self.assertEqual(16320.0, int(get_end_time_from_segment(segment)))
segments = donor.create(
arguments={'Start Time': "00:00:00.00", 'End Time': "00:00:00.00", 'sample rate': 44100})
for segment in segments:
self.assertEqual(1, get_start_frame_from_segment(segment))
self.assertEqual(1572865, get_end_frame_from_segment(segment))
self.assertAlmostEqual(0.0, get_start_time_from_segment(segment),places=1)
self.assertEqual(35665, int(get_end_time_from_segment(segment)))
def test_image_donor(self):
import numpy as np
from maskgen.image_wrap import ImageWrapper
graph = Mock()
def lkup_preds(x):
return {'b': ['a'], 'e': ['d']}[x]
def lkup_edge(x, y):
return \
{'ab': {'op': 'NoSelect'}, 'de': {'op': 'SelectRegion'}}[
x + y]
withoutalpha = ImageWrapper(np.zeros((400, 400, 3), dtype=np.uint8))
withAlpha = ImageWrapper(np.zeros((400, 400, 4), dtype=np.uint8))
mask = ImageWrapper(np.ones((400, 400),dtype = np.uint8)*255)
mask.image_array[0:30, 0:30] = 0
withAlpha.image_array[0:30, 0:30, 3] = 255
graph.predecessors = lkup_preds
graph.get_edge = lkup_edge
graph.dir = '.'
graph.get_edge_image = Mock(return_value=mask)
donor = InterpolateDonor(graph, 'e', 'f', 'x', (withoutalpha, self.locateFile('tests/videos/sample1.mov')),
(withAlpha, self.locateFile('tests/videos/sample1.mov')))
mask = donor.create(arguments={})
self.assertTrue(np.all(mask.image_array[0:30,0:30] == 255))
self.assertEquals(900,np.sum((mask.image_array/255)))
donor = InterpolateDonor(graph, 'b', 'c', 'x', (withoutalpha, self.locateFile('tests/videos/sample1.mov')),
(withAlpha, self.locateFile('tests/videos/sample1.mov')))
mask = donor.create(arguments={})
self.assertIsNone(mask)
donor = InterpolateDonor(graph, 'b', 'c', 'x', (withAlpha, self.locateFile('tests/videos/sample1.mov')),
(withAlpha, self.locateFile('tests/videos/sample1.mov')))
mask = donor.create(arguments={})
self.assertTrue(np.all(mask.image_array[0:30, 0:30] == 0))
self.assertEquals(159100, np.sum((mask.image_array / 255)))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"numpy.sum",
"maskgen.video_tools.get_type_of_segment",
"maskgen.video_tools.get_start_time_from_segment",
"numpy.zeros",
"maskgen.video_tools.get_end_frame_from_segment",
"numpy.ones",
"mock.Mock",
"maskgen.video_tools.get_start_frame_from_segment",
"maskgen.video_tools.get_end_time_from_segment",
"numpy.all"
] |
[((9585, 9600), 'unittest.main', 'unittest.main', ([], {}), '()\n', (9598, 9600), False, 'import unittest\n'), ((501, 507), 'mock.Mock', 'Mock', ([], {}), '()\n', (505, 507), False, 'from mock import Mock\n'), ((3326, 3332), 'mock.Mock', 'Mock', ([], {}), '()\n', (3330, 3332), False, 'from mock import Mock\n'), ((6064, 6070), 'mock.Mock', 'Mock', ([], {}), '()\n', (6068, 6070), False, 'from mock import Mock\n'), ((7819, 7825), 'mock.Mock', 'Mock', ([], {}), '()\n', (7823, 7825), False, 'from mock import Mock\n'), ((8490, 8513), 'mock.Mock', 'Mock', ([], {'return_value': 'mask'}), '(return_value=mask)\n', (8494, 8513), False, 'from mock import Mock\n'), ((8082, 8121), 'numpy.zeros', 'np.zeros', (['(400, 400, 3)'], {'dtype': 'np.uint8'}), '((400, 400, 3), dtype=np.uint8)\n', (8090, 8121), True, 'import numpy as np\n'), ((8156, 8195), 'numpy.zeros', 'np.zeros', (['(400, 400, 4)'], {'dtype': 'np.uint8'}), '((400, 400, 4), dtype=np.uint8)\n', (8164, 8195), True, 'import numpy as np\n'), ((8782, 8825), 'numpy.all', 'np.all', (['(mask.image_array[0:30, 0:30] == 255)'], {}), '(mask.image_array[0:30, 0:30] == 255)\n', (8788, 8825), True, 'import numpy as np\n'), ((8856, 8886), 'numpy.sum', 'np.sum', (['(mask.image_array / 255)'], {}), '(mask.image_array / 255)\n', (8862, 8886), True, 'import numpy as np\n'), ((9441, 9482), 'numpy.all', 'np.all', (['(mask.image_array[0:30, 0:30] == 0)'], {}), '(mask.image_array[0:30, 0:30] == 0)\n', (9447, 9482), True, 'import numpy as np\n'), ((9518, 9548), 'numpy.sum', 'np.sum', (['(mask.image_array / 255)'], {}), '(mask.image_array / 255)\n', (9524, 9548), True, 'import numpy as np\n'), ((1296, 1324), 'maskgen.video_tools.get_type_of_segment', 'get_type_of_segment', (['segment'], {}), '(segment)\n', (1315, 1324), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((1700, 1736), 'maskgen.video_tools.get_start_time_from_segment', 'get_start_time_from_segment', (['segment'], {}), '(segment)\n', (1727, 1736), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((2274, 2302), 'maskgen.video_tools.get_type_of_segment', 'get_type_of_segment', (['segment'], {}), '(segment)\n', (2293, 2302), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((2678, 2714), 'maskgen.video_tools.get_start_time_from_segment', 'get_start_time_from_segment', (['segment'], {}), '(segment)\n', (2705, 2714), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((4247, 4284), 'maskgen.video_tools.get_start_frame_from_segment', 'get_start_frame_from_segment', (['segment'], {}), '(segment)\n', (4275, 4284), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((4322, 4357), 'maskgen.video_tools.get_end_frame_from_segment', 'get_end_frame_from_segment', (['segment'], {}), '(segment)\n', (4348, 4357), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((4403, 4439), 'maskgen.video_tools.get_start_time_from_segment', 'get_start_time_from_segment', (['segment'], {}), '(segment)\n', (4430, 4439), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((4909, 4937), 'maskgen.video_tools.get_type_of_segment', 'get_type_of_segment', (['segment'], {}), '(segment)\n', (4928, 4937), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((4954, 4982), 'maskgen.video_tools.get_type_of_segment', 'get_type_of_segment', (['segment'], {}), '(segment)\n', (4973, 4982), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((6931, 6968), 'maskgen.video_tools.get_start_frame_from_segment', 'get_start_frame_from_segment', (['segment'], {}), '(segment)\n', (6959, 6968), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((7007, 7042), 'maskgen.video_tools.get_end_frame_from_segment', 'get_end_frame_from_segment', (['segment'], {}), '(segment)\n', (7033, 7042), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((7085, 7121), 'maskgen.video_tools.get_start_time_from_segment', 'get_start_time_from_segment', (['segment'], {}), '(segment)\n', (7112, 7121), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((7413, 7450), 'maskgen.video_tools.get_start_frame_from_segment', 'get_start_frame_from_segment', (['segment'], {}), '(segment)\n', (7441, 7450), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((7490, 7525), 'maskgen.video_tools.get_end_frame_from_segment', 'get_end_frame_from_segment', (['segment'], {}), '(segment)\n', (7516, 7525), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((7567, 7603), 'maskgen.video_tools.get_start_time_from_segment', 'get_start_time_from_segment', (['segment'], {}), '(segment)\n', (7594, 7603), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((8225, 8260), 'numpy.ones', 'np.ones', (['(400, 400)'], {'dtype': 'np.uint8'}), '((400, 400), dtype=np.uint8)\n', (8232, 8260), True, 'import numpy as np\n'), ((1377, 1414), 'maskgen.video_tools.get_start_frame_from_segment', 'get_start_frame_from_segment', (['segment'], {}), '(segment)\n', (1405, 1414), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((1457, 1492), 'maskgen.video_tools.get_end_frame_from_segment', 'get_end_frame_from_segment', (['segment'], {}), '(segment)\n', (1483, 1492), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((1549, 1586), 'maskgen.video_tools.get_start_frame_from_segment', 'get_start_frame_from_segment', (['segment'], {}), '(segment)\n', (1577, 1586), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((1626, 1661), 'maskgen.video_tools.get_end_frame_from_segment', 'get_end_frame_from_segment', (['segment'], {}), '(segment)\n', (1652, 1661), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((1778, 1812), 'maskgen.video_tools.get_end_time_from_segment', 'get_end_time_from_segment', (['segment'], {}), '(segment)\n', (1803, 1812), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((2355, 2392), 'maskgen.video_tools.get_start_frame_from_segment', 'get_start_frame_from_segment', (['segment'], {}), '(segment)\n', (2383, 2392), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((2435, 2470), 'maskgen.video_tools.get_end_frame_from_segment', 'get_end_frame_from_segment', (['segment'], {}), '(segment)\n', (2461, 2470), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((2527, 2564), 'maskgen.video_tools.get_start_frame_from_segment', 'get_start_frame_from_segment', (['segment'], {}), '(segment)\n', (2555, 2564), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((2604, 2639), 'maskgen.video_tools.get_end_frame_from_segment', 'get_end_frame_from_segment', (['segment'], {}), '(segment)\n', (2630, 2639), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((2756, 2790), 'maskgen.video_tools.get_end_time_from_segment', 'get_end_time_from_segment', (['segment'], {}), '(segment)\n', (2781, 2790), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((4491, 4525), 'maskgen.video_tools.get_end_time_from_segment', 'get_end_time_from_segment', (['segment'], {}), '(segment)\n', (4516, 4525), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((5031, 5068), 'maskgen.video_tools.get_start_frame_from_segment', 'get_start_frame_from_segment', (['segment'], {}), '(segment)\n', (5059, 5068), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((5112, 5147), 'maskgen.video_tools.get_end_frame_from_segment', 'get_end_frame_from_segment', (['segment'], {}), '(segment)\n', (5138, 5147), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((5191, 5227), 'maskgen.video_tools.get_start_time_from_segment', 'get_start_time_from_segment', (['segment'], {}), '(segment)\n', (5218, 5227), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((5380, 5417), 'maskgen.video_tools.get_start_frame_from_segment', 'get_start_frame_from_segment', (['segment'], {}), '(segment)\n', (5408, 5417), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((5457, 5492), 'maskgen.video_tools.get_end_frame_from_segment', 'get_end_frame_from_segment', (['segment'], {}), '(segment)\n', (5483, 5492), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((5536, 5572), 'maskgen.video_tools.get_start_time_from_segment', 'get_start_time_from_segment', (['segment'], {}), '(segment)\n', (5563, 5572), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((7174, 7208), 'maskgen.video_tools.get_end_time_from_segment', 'get_end_time_from_segment', (['segment'], {}), '(segment)\n', (7199, 7208), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((7654, 7688), 'maskgen.video_tools.get_end_time_from_segment', 'get_end_time_from_segment', (['segment'], {}), '(segment)\n', (7679, 7688), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((5289, 5323), 'maskgen.video_tools.get_end_time_from_segment', 'get_end_time_from_segment', (['segment'], {}), '(segment)\n', (5314, 5323), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((5634, 5668), 'maskgen.video_tools.get_end_time_from_segment', 'get_end_time_from_segment', (['segment'], {}), '(segment)\n', (5659, 5668), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n'), ((2963, 2991), 'maskgen.video_tools.get_type_of_segment', 'get_type_of_segment', (['segment'], {}), '(segment)\n', (2982, 2991), False, 'from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, get_end_time_from_segment, get_end_frame_from_segment\n')]
|
#%% -*- coding: utf-8 -*-
"""
Created on Sun Apr 26 02:47:57 2020
plot sherical hermonics in 3D with radial colormap
http://balbuceosastropy.blogspot.com/2015/06/spherical-harmonics-in-python.html
"""
from __future__ import division
import scipy as sci
import scipy.special as sp
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm, colors
#%% ===========================================================================
l = 4 #degree
m = 2 # order
PHI, THETA = np.mgrid[0:2*np.pi:200j, 0:np.pi:100j] #arrays of angular variables
R = np.abs(sp.sph_harm(m, l, PHI, THETA)) #Array with the absolute values of Ylm
"""
THETA = pi/2 - G_Lat*pi/180
PHI = G_Long*pi/180 + pi
R = G_Grid + 50000
"""
#Now we convert to cartesian coordinates
# for the 3D representation
X = R * np.sin(THETA) * np.cos(PHI)
Y = R * np.sin(THETA) * np.sin(PHI)
Z = R * np.cos(THETA)
N = R/R.max() # Normalize R for the plot colors to cover the entire range of colormap.
fig, ax = plt.subplots(subplot_kw=dict(projection='3d'), figsize=(7,5))
im = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=cm.jet(N))
ax.set_title(r'$|Y^2_ 4|$', fontsize=20)
m = cm.ScalarMappable(cmap=cm.jet)
m.set_array(R) # Assign the unnormalized data array to the mappable
#so that the scale corresponds to the values of R
fig.colorbar(m, shrink=0.8);
#%% ===========================================================================
l = 4 # degree
m = 2 # order
PHI, THETA = np.mgrid[0:2*np.pi:200j, 0:np.pi:100j]
R = sp.sph_harm(m, l, PHI, THETA).real
X = R * np.sin(THETA) * np.cos(PHI)
Y = R * np.sin(THETA) * np.sin(PHI)
Z = R * np.cos(THETA)
#As R has negative values, we'll use an instance of Normalize
#see http://stackoverflow.com/questions/25023075/normalizing-colormap-used-by-facecolors-in-matplotlib
norm = colors.Normalize()
fig, ax = plt.subplots(subplot_kw=dict(projection='3d'), figsize=(7,5))
m = cm.ScalarMappable(cmap=cm.jet)
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=cm.jet(norm(R)))
ax.set_title('real$(Y^2_ 4)$', fontsize=20)
m.set_array(R)
fig.colorbar(m, shrink=0.8);
#%% ===========================================================================
l = 4 # degree
m = 2 # order
PHI, THETA = np.mgrid[0:2*np.pi:300j, 0:np.pi:150j]
R = sp.sph_harm(m, l, PHI, THETA).real
s = 1
X = (s*R+1) * np.sin(THETA) * np.cos(PHI)
Y = (s*R+1) * np.sin(THETA) * np.sin(PHI)
Z = (s*R+1) * np.cos(THETA)
norm = colors.Normalize()
fig, ax = plt.subplots(subplot_kw=dict(projection='3d'), figsize=(7,5))
m = cm.ScalarMappable(cmap=cm.jet)
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=cm.terrain(norm(R)))
ax.set_title('1 + real$(Y^2_ 4)$', fontsize=20)
m.set_array(R)
fig.colorbar(m, shrink=0.8);
#%%
|
[
"scipy.special.sph_harm",
"matplotlib.colors.Normalize",
"matplotlib.cm.ScalarMappable",
"matplotlib.cm.jet",
"numpy.sin",
"numpy.cos"
] |
[((1230, 1260), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'cmap': 'cm.jet'}), '(cmap=cm.jet)\n', (1247, 1260), False, 'from matplotlib import cm, colors\n'), ((1908, 1926), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {}), '()\n', (1924, 1926), False, 'from matplotlib import cm, colors\n'), ((2003, 2033), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'cmap': 'cm.jet'}), '(cmap=cm.jet)\n', (2020, 2033), False, 'from matplotlib import cm, colors\n'), ((2534, 2552), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {}), '()\n', (2550, 2552), False, 'from matplotlib import cm, colors\n'), ((2629, 2659), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'cmap': 'cm.jet'}), '(cmap=cm.jet)\n', (2646, 2659), False, 'from matplotlib import cm, colors\n'), ((635, 664), 'scipy.special.sph_harm', 'sp.sph_harm', (['m', 'l', 'PHI', 'THETA'], {}), '(m, l, PHI, THETA)\n', (646, 664), True, 'import scipy.special as sp\n'), ((878, 889), 'numpy.cos', 'np.cos', (['PHI'], {}), '(PHI)\n', (884, 889), True, 'import numpy as np\n'), ((914, 925), 'numpy.sin', 'np.sin', (['PHI'], {}), '(PHI)\n', (920, 925), True, 'import numpy as np\n'), ((934, 947), 'numpy.cos', 'np.cos', (['THETA'], {}), '(THETA)\n', (940, 947), True, 'import numpy as np\n'), ((1605, 1634), 'scipy.special.sph_harm', 'sp.sph_harm', (['m', 'l', 'PHI', 'THETA'], {}), '(m, l, PHI, THETA)\n', (1616, 1634), True, 'import scipy.special as sp\n'), ((1665, 1676), 'numpy.cos', 'np.cos', (['PHI'], {}), '(PHI)\n', (1671, 1676), True, 'import numpy as np\n'), ((1701, 1712), 'numpy.sin', 'np.sin', (['PHI'], {}), '(PHI)\n', (1707, 1712), True, 'import numpy as np\n'), ((1721, 1734), 'numpy.cos', 'np.cos', (['THETA'], {}), '(THETA)\n', (1727, 1734), True, 'import numpy as np\n'), ((2372, 2401), 'scipy.special.sph_harm', 'sp.sph_harm', (['m', 'l', 'PHI', 'THETA'], {}), '(m, l, PHI, THETA)\n', (2383, 2401), True, 'import scipy.special as sp\n'), ((2444, 2455), 'numpy.cos', 'np.cos', (['PHI'], {}), '(PHI)\n', (2450, 2455), True, 'import numpy as np\n'), ((2486, 2497), 'numpy.sin', 'np.sin', (['PHI'], {}), '(PHI)\n', (2492, 2497), True, 'import numpy as np\n'), ((2512, 2525), 'numpy.cos', 'np.cos', (['THETA'], {}), '(THETA)\n', (2518, 2525), True, 'import numpy as np\n'), ((862, 875), 'numpy.sin', 'np.sin', (['THETA'], {}), '(THETA)\n', (868, 875), True, 'import numpy as np\n'), ((898, 911), 'numpy.sin', 'np.sin', (['THETA'], {}), '(THETA)\n', (904, 911), True, 'import numpy as np\n'), ((1174, 1183), 'matplotlib.cm.jet', 'cm.jet', (['N'], {}), '(N)\n', (1180, 1183), False, 'from matplotlib import cm, colors\n'), ((1649, 1662), 'numpy.sin', 'np.sin', (['THETA'], {}), '(THETA)\n', (1655, 1662), True, 'import numpy as np\n'), ((1685, 1698), 'numpy.sin', 'np.sin', (['THETA'], {}), '(THETA)\n', (1691, 1698), True, 'import numpy as np\n'), ((2428, 2441), 'numpy.sin', 'np.sin', (['THETA'], {}), '(THETA)\n', (2434, 2441), True, 'import numpy as np\n'), ((2470, 2483), 'numpy.sin', 'np.sin', (['THETA'], {}), '(THETA)\n', (2476, 2483), True, 'import numpy as np\n')]
|
"""
Common sub models for lubricants
"""
import numpy as np
__all__ = ['constant_array_property', 'roelands', 'barus', 'nd_barus', 'nd_roelands', 'dowson_higginson',
'nd_dowson_higginson']
def constant_array_property(value: float):
""" Produce a closure that returns an index able constant value
Parameters
----------
value: float
The value of the constant
Returns
-------
inner: closure
A closure that returns a fully populated array the same size as the just_touching_gap keyword argument, this is
guaranteed to be in the current state dict, and therefore passed as a keyword when sub models are saved.
Notes
-----
Using this closure means that lubrication steps can be writen for the general case, using indexing on fluid
properties.
See Also
--------
constant_array_property
Examples
--------
>>> closure = constant_array_property(1.23)
>>> constant_array = closure(just_touching_gap = np.ones((5,5)))
>>> constant_array.shape
(5,5)
>>> constant_array[0,0]
1,23
"""
def inner(just_touching_gap: np.ndarray, **kwargs):
return np.ones_like(just_touching_gap) * value
return inner
def roelands(eta_0, pressure_0, z):
""" The roelands pressure viscosity equation
Parameters
----------
eta_0, pressure_0, z: float
Coefficients for the equation, see notes for details
Returns
-------
inner: closure
A callable that produces the viscosity terms according to the Roelands equation, see notes for details
Notes
-----
The roelands equation linking viscosity (eta) to the fluid pressure (p) is given by:
eta(p) = eta_0*exp((ln(eta_0)+9.67)*(-1+(1+(p/p_0)^z))
eta_0, p_0 and z are coefficients that depend on the oil and it's temperature.
"""
ln_eta_0 = np.log(eta_0) + 9.67
def inner(pressure: np.ndarray, **kwargs):
return eta_0 * np.exp(ln_eta_0 * (-1 + (1 + pressure / pressure_0) ** z))
return inner
def nd_roelands(eta_0: float, pressure_0: float, pressure_hertzian: float, z: float):
""" The roelands pressure viscosity equation in a non dimentional form
Parameters
----------
eta_0, pressure_0, z: float
Coefficients for the equation, see notes for details
pressure_hertzian: float
The hertzian pressure used to non dimentionalise the pressure term in the equation. Should be the same as is
used in the reynolds solver
Returns
-------
inner: closure
A callable that produces the non dimentional viscosity according to the Roelands equation, see notes for details
Notes
-----
The roelands equation linking viscosity (eta) to the non dimentional fluid pressure (nd_p) is given by:
eta(p)/eta_0 = exp((ln(eta_0)+9.67)*(-1+(1+(nd_p/p_0*p_h)^z))
eta_0, p_0 and z are coefficients that depend on the oil and it's temperature.
p_h is the hertzian pressure used to non dimentionalise the pressure term.
"""
ln_eta_0 = np.log(eta_0) + 9.67
p_all = pressure_hertzian / pressure_0
def inner(nd_pressure: np.ndarray, **kwargs):
return np.exp(ln_eta_0 * (-1 + (1 + p_all * nd_pressure) ** z))
return inner
def barus(eta_0: float, alpha: float):
""" The Barus pressure viscosity equation
Parameters
----------
eta_0, alpha: float
Coefficients in the equation, see notes for details
Returns
-------
inner: closure
A callable that returns the resulting viscosity according to the barus equation
Notes
-----
The Barus equation linking pressure (p) to viscosity (eta) is given by:
eta(p) = eta_0*exp(alpha*p)
In which eta_0 and alpha are coefficients which depend on the lubricant and it's temperature
"""
def inner(pressure: np.ndarray, **kwargs):
return eta_0 * np.exp(alpha * pressure)
return inner
def nd_barus(pressure_hertzian: float, alpha: float):
""" A non dimentional form of the Barus equation
Parameters
----------
alpha: float
A coefficient in the Barus equation, see notes for details
pressure_hertzian: float
The hertzian pressure used to non dimensionalise the pressure
Returns
-------
inner: closure
A callable that will produce the non dimentional viscosity according to the barus equation
Notes
-----
The non dimentional Barus equation relating the viscosity (eta) to the non dimentional pressure (nd_p) is given by:
eta(p)/eta_0 = exp(alpha*p_h*nd_p)
In which alpha is alpha is a coefficient which will depend on the lubricant used and the temperature
p_h is the hertzian pressure used to non dimentionalise the pressure, this must be the same as is passed to the
reynolds solver.
"""
def inner(nd_pressure: np.ndarray, **kwargs):
return np.exp(alpha * pressure_hertzian * nd_pressure)
return inner
def dowson_higginson(rho_0: float):
""" The Dowson Higginson equation relating pressure to density
Parameters
----------
rho_0: float
A coefficient of the dowson higginson equation, seen notes for details
Returns
-------
inner: closure
A callable that returns the density based on the pressure according to the dowson higginson equation
Notes
-----
The dowson higginson equation relating pressure (p) to density (rho) is given by:
rho(p) = rho_0 * (5.9e8+1.34*p)/(5.9e8+p)
In which rho_0 is the parameter of the equation which will depend on the lubricant used and it's temperature
"""
def inner(pressure: np.ndarray, **kwargs):
return rho_0 * (5.9e8 + 1.34 * pressure) / (5.9e8 + pressure)
return inner
def nd_dowson_higginson(pressure_hertzian: float):
""" A non dimentional form of the Dowson Higginson equation relating pressure to density
Parameters
----------
pressure_hertzian: float
The hertzian pressure used to non dimentionalise the pressure, this must match the pressure given to the
reynolds solver
Returns
-------
inner: closure
A callable that returns the non dimentional density based on the non dimentional pressure
Notes
-----
The non dimentional dowson higginson equation relating non dimensional pressure (nd_p) to density (rho) is given by:
rho(p)/rho_0 = (5.9e8+1.34*p_h*nd_p)/(5.9e8+p_h*nd_p)
In which p_h is the hertzian pressure used to non denationalise the pressure and rho_0 is a parameter of the
dimentional form of the dowson higginson equation. Here the value rho(p)/rho_0 is returned
"""
constant = 5.9e8 / pressure_hertzian
def inner(nd_pressure: np.ndarray, **kwargs):
return (constant + 1.34 * nd_pressure) / (constant + nd_pressure)
return inner
|
[
"numpy.exp",
"numpy.ones_like",
"numpy.log"
] |
[((1879, 1892), 'numpy.log', 'np.log', (['eta_0'], {}), '(eta_0)\n', (1885, 1892), True, 'import numpy as np\n'), ((3063, 3076), 'numpy.log', 'np.log', (['eta_0'], {}), '(eta_0)\n', (3069, 3076), True, 'import numpy as np\n'), ((3193, 3249), 'numpy.exp', 'np.exp', (['(ln_eta_0 * (-1 + (1 + p_all * nd_pressure) ** z))'], {}), '(ln_eta_0 * (-1 + (1 + p_all * nd_pressure) ** z))\n', (3199, 3249), True, 'import numpy as np\n'), ((4913, 4960), 'numpy.exp', 'np.exp', (['(alpha * pressure_hertzian * nd_pressure)'], {}), '(alpha * pressure_hertzian * nd_pressure)\n', (4919, 4960), True, 'import numpy as np\n'), ((1179, 1210), 'numpy.ones_like', 'np.ones_like', (['just_touching_gap'], {}), '(just_touching_gap)\n', (1191, 1210), True, 'import numpy as np\n'), ((1971, 2029), 'numpy.exp', 'np.exp', (['(ln_eta_0 * (-1 + (1 + pressure / pressure_0) ** z))'], {}), '(ln_eta_0 * (-1 + (1 + pressure / pressure_0) ** z))\n', (1977, 2029), True, 'import numpy as np\n'), ((3907, 3931), 'numpy.exp', 'np.exp', (['(alpha * pressure)'], {}), '(alpha * pressure)\n', (3913, 3931), True, 'import numpy as np\n')]
|
import numpy as np
from gradient_boosting import *
def test_train_predict():
X_train, y_train = load_dataset("data/tiny.rent.train")
X_val, y_val = load_dataset("data/tiny.rent.test")
y_mean, trees = gradient_boosting_mse(X_train, y_train, 5, max_depth=2, nu=0.1)
assert(np.around(y_mean, decimals=4)== 3839.1724)
y_hat_train = gradient_boosting_predict(X_train, trees, y_mean, nu=0.1)
assert(np.around(r2_score(y_train, y_hat_train), decimals=4)==0.5527)
y_hat = gradient_boosting_predict(X_val, trees, y_mean, nu=0.1)
assert(np.around(r2_score(y_val, y_hat), decimals=4)==0.5109)
|
[
"numpy.around"
] |
[((287, 316), 'numpy.around', 'np.around', (['y_mean'], {'decimals': '(4)'}), '(y_mean, decimals=4)\n', (296, 316), True, 'import numpy as np\n')]
|
# -*- coding: UTF-8 -*-
#
# Copyright (C) 2008-2011 <NAME> <<EMAIL>>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Gas dynamics solver of the Euler equations.
"""
from solvcon.kerpak.cuse import CuseSolver
from solvcon.kerpak.cuse import CuseCase
from solvcon.kerpak.cuse import CuseBC
from solvcon.anchor import Anchor
from solvcon.hook import BlockHook
################################################################################
# Utility.
################################################################################
class MovingShock(object):
"""
Define relations across a moving shock wave. Subscript 1 denotes
quantities before shock (have not touched by shock), subscript 2 denotes
quantities after shock (passed by shock).
@ivar ga: ratio of specific heat.
@itype ga: float
@ivar Ms: Mach number of shock wave.
@itype Ms: float
@ivar gasconst: gas constant.
@itype gasconst: float
"""
def __init__(self, ga, Ms, **kw):
self.ga = ga
self.Ms = Ms
self.gasconst = kw.pop('gasconst', 1.0)
@property
def ratio_p(self):
"""
ratio of upstream/downstream pressure.
"""
ga = self.ga
Ms = self.Ms
return (2*ga*Ms**2 - (ga-1))/(ga+1)
@property
def ratio_rho(self):
"""
ratio of upstream/downstream density.
"""
ga = self.ga
Ms = self.Ms
return (ga+1)*Ms**2/(2+(ga-1)*Ms**2)
@property
def ratio_T(self):
"""
ratio of upstream/downstream temperature.
"""
ga = self.ga
Ms = self.Ms
return self.ratio_p/self.ratio_rho
@property
def M2(self):
"""
Mach number behind standing normal shock wave.
"""
from math import sqrt
ga = self.ga
Ms = self.Ms
return sqrt(((ga-1)*Ms**2+2)/(2*ga*Ms**2-(ga-1)))
@property
def M2p(self):
"""
Mach number behind moving normal shock wave.
"""
from math import sqrt
M1 = self.Ms
M2 = self.M2
ratio_a = sqrt(self.ratio_T)
return M1/ratio_a - M2
def calc_temperature(self, p, rho):
"""
Calculate temperature according to given pressure and density.
@param p: pressure.
@type p: float
@param rho: density.
@type rho: float
@return: temperature
@rtype: float
"""
return p/(rho*self.gasconst)
def calc_speedofsound(self, p, rho):
"""
Calculate speed of sound according to given pressure and density.
@param p: pressure.
@type p: float
@param rho: density.
@type rho: float
@return: speed of sound
@rtype: float
"""
from math import sqrt
ga = self.ga
return sqrt(ga*p/rho)
def calc_speeds(self, p, rho):
"""
Calculate shock wave speed and upstream speed for static downstream.
@param p: downstream pressure.
@type p: float
@param rho: downstream density.
@type rho: float
@return: a 2-tuple for shock wave and upstream speeds.
@rtype: (float, float)
"""
M1 = self.Ms
M2 = self.M2
a1 = self.calc_speedofsound(p, rho)
a2 = self.calc_speedofsound(p*self.ratio_p, rho*self.ratio_rho)
return M1*a1, M1*a1 - M2*a2
###############################################################################
# Solver.
###############################################################################
class GasdynSolver(CuseSolver):
"""
Gas dynamics solver of the Euler equations.
"""
def __init__(self, blk, *args, **kw):
kw['nsca'] = 1
super(GasdynSolver, self).__init__(blk, *args, **kw)
from solvcon.dependency import getcdll
__clib_gasdyn_c = {
2: getcdll('gasdyn2d_c', raise_on_fail=False),
3: getcdll('gasdyn3d_c', raise_on_fail=False),
}
__clib_gasdyn_cu = {
2: getcdll('gasdyn2d_cu', raise_on_fail=False),
3: getcdll('gasdyn3d_cu', raise_on_fail=False),
}
del getcdll
@property
def _clib_gasdyn_c(self):
return self.__clib_gasdyn_c[self.ndim]
@property
def _clib_gasdyn_cu(self):
return self.__clib_gasdyn_cu[self.ndim]
@property
def _clib_mcu(self):
return self.__clib_gasdyn_cu[self.ndim]
_gdlen_ = 0
@property
def _jacofunc_(self):
return self._clib_gasdyn_c.calc_jaco
def calccfl(self, worker=None):
from ctypes import byref
if self.scu:
self._clib_gasdyn_cu.calc_cfl(self.ncuth,
byref(self.cumgr.exd), self.cumgr.gexd.gptr)
else:
self._clib_gasdyn_c.calc_cfl(byref(self.exd))
###############################################################################
# Case.
###############################################################################
class GasdynCase(CuseCase):
"""
Gas dynamics case.
"""
from solvcon.domain import Domain
defdict = {
'solver.solvertype': GasdynSolver,
'solver.domaintype': Domain,
}
del Domain
def load_block(self):
loaded = super(GasdynCase, self).load_block()
if hasattr(loaded, 'ndim'):
ndim = loaded.ndim
else:
ndim = loaded.blk.ndim
self.execution.neq = ndim+2
return loaded
###############################################################################
# Boundary conditions.
###############################################################################
class GasdynBC(CuseBC):
"""
Basic BC class for gas dynamics.
"""
from solvcon.dependency import getcdll
__clib_gasdynb_c = {
2: getcdll('gasdynb2d_c', raise_on_fail=False),
3: getcdll('gasdynb3d_c', raise_on_fail=False),
}
__clib_gasdynb_cu = {
2: getcdll('gasdynb2d_cu', raise_on_fail=False),
3: getcdll('gasdynb3d_cu', raise_on_fail=False),
}
del getcdll
@property
def _clib_gasdynb_c(self):
return self.__clib_gasdynb_c[self.svr.ndim]
@property
def _clib_gasdynb_cu(self):
return self.__clib_gasdynb_cu[self.svr.ndim]
class GasdynWall(GasdynBC):
_ghostgeom_ = 'mirror'
def soln(self):
from ctypes import byref
svr = self.svr
if svr.scu:
self._clib_gasdynb_cu.bound_wall_soln(svr.ncuth,
svr.cumgr.gexd.gptr, self.facn.shape[0], self.cufacn.gptr)
else:
self._clib_gasdynb_c.bound_wall_soln(byref(svr.exd),
self.facn.shape[0], self.facn.ctypes._as_parameter_)
def dsoln(self):
from ctypes import byref
svr = self.svr
if svr.scu:
self._clib_gasdynb_cu.bound_wall_dsoln(svr.ncuth,
svr.cumgr.gexd.gptr, self.facn.shape[0], self.cufacn.gptr)
else:
self._clib_gasdynb_c.bound_wall_dsoln(byref(svr.exd),
self.facn.shape[0], self.facn.ctypes._as_parameter_)
class GasdynNswall(GasdynWall):
def soln(self):
from ctypes import byref
svr = self.svr
if svr.scu:
self._clib_gasdynb_cu.bound_nswall_soln(svr.ncuth,
svr.cumgr.gexd.gptr, self.facn.shape[0], self.cufacn.gptr)
else:
self._clib_gasdynb_c.bound_nswall_soln(byref(svr.exd),
self.facn.shape[0], self.facn.ctypes._as_parameter_)
def dsoln(self):
from ctypes import byref
svr = self.svr
if svr.scu:
self._clib_gasdynb_cu.bound_nswall_dsoln(svr.ncuth,
svr.cumgr.gexd.gptr, self.facn.shape[0], self.cufacn.gptr)
else:
self._clib_gasdynb_c.bound_nswall_dsoln(byref(svr.exd),
self.facn.shape[0], self.facn.ctypes._as_parameter_)
class GasdynInlet(GasdynBC):
vnames = ['rho', 'v1', 'v2', 'v3', 'p', 'gamma']
vdefaults = {
'rho': 1.0, 'p': 1.0, 'gamma': 1.4, 'v1': 0.0, 'v2': 0.0, 'v3': 0.0,
}
_ghostgeom_ = 'mirror'
def soln(self):
from ctypes import byref
svr = self.svr
if svr.scu:
self._clib_gasdynb_cu.bound_inlet_soln(svr.ncuth,
svr.cumgr.gexd.gptr, self.facn.shape[0], self.cufacn.gptr,
self.value.shape[1], self.cuvalue.gptr)
else:
self._clib_gasdynb_c.bound_inlet_soln(byref(svr.exd),
self.facn.shape[0], self.facn.ctypes._as_parameter_,
self.value.shape[1], self.value.ctypes._as_parameter_)
def dsoln(self):
from ctypes import byref
svr = self.svr
if svr.scu:
self._clib_gasdynb_cu.bound_inlet_dsoln(svr.ncuth,
svr.cumgr.gexd.gptr, self.facn.shape[0], self.cufacn.gptr)
else:
self._clib_gasdynb_c.bound_inlet_dsoln(byref(svr.exd),
self.facn.shape[0], self.facn.ctypes._as_parameter_)
###############################################################################
# Anchors.
###############################################################################
class GasdynIAnchor(Anchor):
"""
Basic initializing anchor class of GasdynSolver.
"""
def __init__(self, svr, **kw):
assert isinstance(svr, GasdynSolver)
self.gamma = float(kw.pop('gamma'))
super(GasdynIAnchor, self).__init__(svr, **kw)
def provide(self):
from solvcon.solver_legacy import ALMOST_ZERO
svr = self.svr
svr.amsca.fill(self.gamma)
svr.sol.fill(ALMOST_ZERO)
svr.soln.fill(ALMOST_ZERO)
svr.dsol.fill(ALMOST_ZERO)
svr.dsoln.fill(ALMOST_ZERO)
class UniformIAnchor(GasdynIAnchor):
def __init__(self, svr, **kw):
self.rho = float(kw.pop('rho'))
self.v1 = float(kw.pop('v1'))
self.v2 = float(kw.pop('v2'))
self.v3 = float(kw.pop('v3'))
self.p = float(kw.pop('p'))
super(UniformIAnchor, self).__init__(svr, **kw)
def provide(self):
super(UniformIAnchor, self).provide()
gamma = self.gamma
svr = self.svr
svr.soln[:,0].fill(self.rho)
svr.soln[:,1].fill(self.rho*self.v1)
svr.soln[:,2].fill(self.rho*self.v2)
vs = self.v1**2 + self.v2**2
if svr.ndim == 3:
vs += self.v3**2
svr.soln[:,3].fill(self.rho*self.v3)
svr.soln[:,svr.ndim+1].fill(self.rho*vs/2 + self.p/(gamma-1))
svr.sol[:] = svr.soln[:]
class GasdynOAnchor(Anchor):
"""
Calculates physical quantities for output. Implements (i) provide() and
(ii) postfull() methods.
@ivar gasconst: gas constant.
@itype gasconst: float.
"""
_varlist_ = ['v', 'rho', 'p', 'T', 'ke', 'a', 'M', 'sch']
def __init__(self, svr, **kw):
self.rsteps = kw.pop('rsteps', 1)
self.gasconst = kw.pop('gasconst', 1.0)
self.schk = kw.pop('schk', 1.0)
self.schk0 = kw.pop('schk0', 0.0)
self.schk1 = kw.pop('schk1', 1.0)
super(GasdynOAnchor, self).__init__(svr, **kw)
def _calculate_physics(self):
from ctypes import byref, c_double
svr = self.svr
der = svr.der
svr._clib_gasdyn_c.process_physics(byref(svr.exd),
c_double(self.gasconst),
der['v'].ctypes._as_parameter_,
der['w'].ctypes._as_parameter_,
der['wm'].ctypes._as_parameter_,
der['rho'].ctypes._as_parameter_,
der['p'].ctypes._as_parameter_,
der['T'].ctypes._as_parameter_,
der['ke'].ctypes._as_parameter_,
der['a'].ctypes._as_parameter_,
der['M'].ctypes._as_parameter_,
)
def _calculate_schlieren(self):
from ctypes import byref, c_double
svr = self.svr
sch = svr.der['sch']
svr._clib_gasdyn_c.process_schlieren_rhog(byref(svr.exd),
sch.ctypes._as_parameter_)
rhogmax = sch[svr.ngstcell:].max()
svr._clib_gasdyn_c.process_schlieren_sch(byref(svr.exd),
c_double(self.schk), c_double(self.schk0), c_double(self.schk1),
c_double(rhogmax), sch.ctypes._as_parameter_,
)
def provide(self):
from numpy import empty
svr = self.svr
der = svr.der
nelm = svr.ngstcell + svr.ncell
der['v'] = empty((nelm, svr.ndim), dtype=svr.fpdtype)
der['w'] = empty((nelm, svr.ndim), dtype=svr.fpdtype)
der['wm'] = empty(nelm, dtype=svr.fpdtype)
der['rho'] = empty(nelm, dtype=svr.fpdtype)
der['p'] = empty(nelm, dtype=svr.fpdtype)
der['T'] = empty(nelm, dtype=svr.fpdtype)
der['ke'] = empty(nelm, dtype=svr.fpdtype)
der['a'] = empty(nelm, dtype=svr.fpdtype)
der['M'] = empty(nelm, dtype=svr.fpdtype)
der['sch'] = empty(nelm, dtype=svr.fpdtype)
self._calculate_physics()
self._calculate_schlieren()
def postfull(self):
svr = self.svr
istep = self.svr.step_global
rsteps = self.rsteps
if istep > 0 and istep%rsteps == 0:
if svr.scu:
svr.cumgr.arr_from_gpu('amsca', 'soln', 'dsoln')
self._calculate_physics()
self._calculate_schlieren()
|
[
"ctypes.c_double",
"math.sqrt",
"ctypes.byref",
"numpy.empty",
"solvcon.dependency.getcdll"
] |
[((2530, 2592), 'math.sqrt', 'sqrt', (['(((ga - 1) * Ms ** 2 + 2) / (2 * ga * Ms ** 2 - (ga - 1)))'], {}), '(((ga - 1) * Ms ** 2 + 2) / (2 * ga * Ms ** 2 - (ga - 1)))\n', (2534, 2592), False, 'from math import sqrt\n'), ((2773, 2791), 'math.sqrt', 'sqrt', (['self.ratio_T'], {}), '(self.ratio_T)\n', (2777, 2791), False, 'from math import sqrt\n'), ((3534, 3552), 'math.sqrt', 'sqrt', (['(ga * p / rho)'], {}), '(ga * p / rho)\n', (3538, 3552), False, 'from math import sqrt\n'), ((4573, 4615), 'solvcon.dependency.getcdll', 'getcdll', (['"""gasdyn2d_c"""'], {'raise_on_fail': '(False)'}), "('gasdyn2d_c', raise_on_fail=False)\n", (4580, 4615), False, 'from solvcon.dependency import getcdll\n'), ((4628, 4670), 'solvcon.dependency.getcdll', 'getcdll', (['"""gasdyn3d_c"""'], {'raise_on_fail': '(False)'}), "('gasdyn3d_c', raise_on_fail=False)\n", (4635, 4670), False, 'from solvcon.dependency import getcdll\n'), ((4714, 4757), 'solvcon.dependency.getcdll', 'getcdll', (['"""gasdyn2d_cu"""'], {'raise_on_fail': '(False)'}), "('gasdyn2d_cu', raise_on_fail=False)\n", (4721, 4757), False, 'from solvcon.dependency import getcdll\n'), ((4770, 4813), 'solvcon.dependency.getcdll', 'getcdll', (['"""gasdyn3d_cu"""'], {'raise_on_fail': '(False)'}), "('gasdyn3d_cu', raise_on_fail=False)\n", (4777, 4813), False, 'from solvcon.dependency import getcdll\n'), ((6473, 6516), 'solvcon.dependency.getcdll', 'getcdll', (['"""gasdynb2d_c"""'], {'raise_on_fail': '(False)'}), "('gasdynb2d_c', raise_on_fail=False)\n", (6480, 6516), False, 'from solvcon.dependency import getcdll\n'), ((6529, 6572), 'solvcon.dependency.getcdll', 'getcdll', (['"""gasdynb3d_c"""'], {'raise_on_fail': '(False)'}), "('gasdynb3d_c', raise_on_fail=False)\n", (6536, 6572), False, 'from solvcon.dependency import getcdll\n'), ((6617, 6661), 'solvcon.dependency.getcdll', 'getcdll', (['"""gasdynb2d_cu"""'], {'raise_on_fail': '(False)'}), "('gasdynb2d_cu', raise_on_fail=False)\n", (6624, 6661), False, 'from solvcon.dependency import getcdll\n'), ((6674, 6718), 'solvcon.dependency.getcdll', 'getcdll', (['"""gasdynb3d_cu"""'], {'raise_on_fail': '(False)'}), "('gasdynb3d_cu', raise_on_fail=False)\n", (6681, 6718), False, 'from solvcon.dependency import getcdll\n'), ((13057, 13099), 'numpy.empty', 'empty', (['(nelm, svr.ndim)'], {'dtype': 'svr.fpdtype'}), '((nelm, svr.ndim), dtype=svr.fpdtype)\n', (13062, 13099), False, 'from numpy import empty\n'), ((13119, 13161), 'numpy.empty', 'empty', (['(nelm, svr.ndim)'], {'dtype': 'svr.fpdtype'}), '((nelm, svr.ndim), dtype=svr.fpdtype)\n', (13124, 13161), False, 'from numpy import empty\n'), ((13182, 13212), 'numpy.empty', 'empty', (['nelm'], {'dtype': 'svr.fpdtype'}), '(nelm, dtype=svr.fpdtype)\n', (13187, 13212), False, 'from numpy import empty\n'), ((13234, 13264), 'numpy.empty', 'empty', (['nelm'], {'dtype': 'svr.fpdtype'}), '(nelm, dtype=svr.fpdtype)\n', (13239, 13264), False, 'from numpy import empty\n'), ((13284, 13314), 'numpy.empty', 'empty', (['nelm'], {'dtype': 'svr.fpdtype'}), '(nelm, dtype=svr.fpdtype)\n', (13289, 13314), False, 'from numpy import empty\n'), ((13334, 13364), 'numpy.empty', 'empty', (['nelm'], {'dtype': 'svr.fpdtype'}), '(nelm, dtype=svr.fpdtype)\n', (13339, 13364), False, 'from numpy import empty\n'), ((13385, 13415), 'numpy.empty', 'empty', (['nelm'], {'dtype': 'svr.fpdtype'}), '(nelm, dtype=svr.fpdtype)\n', (13390, 13415), False, 'from numpy import empty\n'), ((13435, 13465), 'numpy.empty', 'empty', (['nelm'], {'dtype': 'svr.fpdtype'}), '(nelm, dtype=svr.fpdtype)\n', (13440, 13465), False, 'from numpy import empty\n'), ((13485, 13515), 'numpy.empty', 'empty', (['nelm'], {'dtype': 'svr.fpdtype'}), '(nelm, dtype=svr.fpdtype)\n', (13490, 13515), False, 'from numpy import empty\n'), ((13537, 13567), 'numpy.empty', 'empty', (['nelm'], {'dtype': 'svr.fpdtype'}), '(nelm, dtype=svr.fpdtype)\n', (13542, 13567), False, 'from numpy import empty\n'), ((11946, 11960), 'ctypes.byref', 'byref', (['svr.exd'], {}), '(svr.exd)\n', (11951, 11960), False, 'from ctypes import byref, c_double\n'), ((11974, 11997), 'ctypes.c_double', 'c_double', (['self.gasconst'], {}), '(self.gasconst)\n', (11982, 11997), False, 'from ctypes import byref, c_double\n'), ((12590, 12604), 'ctypes.byref', 'byref', (['svr.exd'], {}), '(svr.exd)\n', (12595, 12604), False, 'from ctypes import byref, c_double\n'), ((12737, 12751), 'ctypes.byref', 'byref', (['svr.exd'], {}), '(svr.exd)\n', (12742, 12751), False, 'from ctypes import byref, c_double\n'), ((12765, 12784), 'ctypes.c_double', 'c_double', (['self.schk'], {}), '(self.schk)\n', (12773, 12784), False, 'from ctypes import byref, c_double\n'), ((12786, 12806), 'ctypes.c_double', 'c_double', (['self.schk0'], {}), '(self.schk0)\n', (12794, 12806), False, 'from ctypes import byref, c_double\n'), ((12808, 12828), 'ctypes.c_double', 'c_double', (['self.schk1'], {}), '(self.schk1)\n', (12816, 12828), False, 'from ctypes import byref, c_double\n'), ((12842, 12859), 'ctypes.c_double', 'c_double', (['rhogmax'], {}), '(rhogmax)\n', (12850, 12859), False, 'from ctypes import byref, c_double\n'), ((5369, 5390), 'ctypes.byref', 'byref', (['self.cumgr.exd'], {}), '(self.cumgr.exd)\n', (5374, 5390), False, 'from ctypes import byref, c_double\n'), ((5469, 5484), 'ctypes.byref', 'byref', (['self.exd'], {}), '(self.exd)\n', (5474, 5484), False, 'from ctypes import byref, c_double\n'), ((7289, 7303), 'ctypes.byref', 'byref', (['svr.exd'], {}), '(svr.exd)\n', (7294, 7303), False, 'from ctypes import byref, c_double\n'), ((7672, 7686), 'ctypes.byref', 'byref', (['svr.exd'], {}), '(svr.exd)\n', (7677, 7686), False, 'from ctypes import byref, c_double\n'), ((8089, 8103), 'ctypes.byref', 'byref', (['svr.exd'], {}), '(svr.exd)\n', (8094, 8103), False, 'from ctypes import byref, c_double\n'), ((8476, 8490), 'ctypes.byref', 'byref', (['svr.exd'], {}), '(svr.exd)\n', (8481, 8490), False, 'from ctypes import byref, c_double\n'), ((9125, 9139), 'ctypes.byref', 'byref', (['svr.exd'], {}), '(svr.exd)\n', (9130, 9139), False, 'from ctypes import byref, c_double\n'), ((9581, 9595), 'ctypes.byref', 'byref', (['svr.exd'], {}), '(svr.exd)\n', (9586, 9595), False, 'from ctypes import byref, c_double\n')]
|
#%%
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import matplotlib.cm as cm
from tqdm import trange, tqdm
from sklearn.metrics import adjusted_rand_score
from argparse import ArgumentParser
from util.config_parser import ConfigParser_with_eval
#%% parse arguments
def arg_check(value, default):
return value if value else default
default_hypparams_model = "hypparams/model.config"
parser = ArgumentParser()
parser.add_argument("--model", help=f"hyper parameters of model, default is [{default_hypparams_model}]")
args = parser.parse_args()
hypparams_model = arg_check(args.model, default_hypparams_model)
#%%
def load_config(filename):
cp = ConfigParser_with_eval()
cp.read(filename)
return cp
#%%
def get_names():
return np.loadtxt("files.txt", dtype=str)
def get_datas_and_length(names):
datas = [np.loadtxt("DATA/" + name + ".txt") for name in names]
length = [len(d) for d in datas]
return datas, length
def get_results_of_word(names, length):
return _joblib_get_results(names, length, "s")
def get_results_of_letter(names, length):
return _joblib_get_results(names, length, "l")
def get_results_of_duration(names, length):
return _joblib_get_results(names, length, "d")
def _get_results(names, lengths, c):
return [np.loadtxt("results/" + name + "_" + c + ".txt").reshape((-1, l)) for name, l in zip(names, lengths)]
def _joblib_get_results(names, lengths, c):
from joblib import Parallel, delayed
def _component(name, length, c):
return np.loadtxt("results/" + name + "_" + c + ".txt").reshape((-1, length))
return Parallel(n_jobs=-1)([delayed(_component)(n, l, c) for n, l in zip(names, lengths)])
def _plot_discreate_sequence(feature, title, sample_data, cmap=None):
ax = plt.subplot2grid((2, 1), (0, 0))
plt.sca(ax)
ax.plot(feature)
ax.set_xlim((0, feature.shape[0]-1))
plt.ylabel('Feature')
#label matrix
ax = plt.subplot2grid((2, 1), (1, 0))
plt.suptitle(title)
plt.sca(ax)
ax.matshow(sample_data, aspect = 'auto', cmap=cmap)
#write x&y label
plt.xlabel('Frame')
plt.ylabel('Iteration')
plt.xticks(())
Path("figures").mkdir(exist_ok=True)
Path("summary_files").mkdir(exist_ok=True)
#%% config parse
print("Loading model config...")
config_parser = load_config(hypparams_model)
section = config_parser["model"]
word_num = section["word_num"]
letter_num = section["letter_num"]
print("Done!")
#%%
print("Loading results....")
names = get_names()
datas, length = get_datas_and_length(names)
l_results = get_results_of_letter(names, length)
w_results = get_results_of_word(names, length)
d_results = get_results_of_duration(names, length)
log_likelihood = np.loadtxt("summary_files/log_likelihood.txt")
resample_times = np.loadtxt("summary_files/resample_times.txt")
print("Done!")
train_iter = l_results[0].shape[0]
#%%
lcolors = ListedColormap([cm.tab20(float(i)/letter_num) for i in range(letter_num)])
wcolors = ListedColormap([cm.tab20(float(i)/word_num) for i in range(word_num)])
#%%
print("Plot results...")
for i, name in enumerate(tqdm(names)):
plt.clf()
_plot_discreate_sequence(datas[i], name + "_l", l_results[i], cmap=lcolors)
plt.savefig("figures/" + name + "_l.png")
plt.clf()
_plot_discreate_sequence(datas[i], name + "_s", w_results[i], cmap=wcolors)
plt.savefig("figures/" + name + "_s.png")
plt.clf()
_plot_discreate_sequence(datas[i], name + "_d", d_results[i], cmap=cm.binary)
plt.savefig("figures/" + name + "_d.png")
print("Done!")
#%%
plt.clf()
plt.title("Log likelihood")
plt.plot(range(train_iter+1), log_likelihood, ".-")
plt.savefig("figures/Log_likelihood.png")
#%%
plt.clf()
plt.title("Resample times")
plt.plot(range(train_iter), resample_times, ".-")
plt.savefig("figures/Resample_times.png")
#%%
with open("summary_files/Sum_of_resample_times.txt", "w") as f:
f.write(str(np.sum(resample_times)))
|
[
"matplotlib.pyplot.title",
"tqdm.tqdm",
"util.config_parser.ConfigParser_with_eval",
"numpy.sum",
"argparse.ArgumentParser",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.subplot2grid",
"joblib.Parallel",
"numpy.loadtxt",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.sca",
"matplotlib.pyplot.ylabel",
"joblib.delayed",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((449, 465), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (463, 465), False, 'from argparse import ArgumentParser\n'), ((2757, 2803), 'numpy.loadtxt', 'np.loadtxt', (['"""summary_files/log_likelihood.txt"""'], {}), "('summary_files/log_likelihood.txt')\n", (2767, 2803), True, 'import numpy as np\n'), ((2821, 2867), 'numpy.loadtxt', 'np.loadtxt', (['"""summary_files/resample_times.txt"""'], {}), "('summary_files/resample_times.txt')\n", (2831, 2867), True, 'import numpy as np\n'), ((3601, 3610), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3608, 3610), True, 'import matplotlib.pyplot as plt\n'), ((3611, 3638), 'matplotlib.pyplot.title', 'plt.title', (['"""Log likelihood"""'], {}), "('Log likelihood')\n", (3620, 3638), True, 'import matplotlib.pyplot as plt\n'), ((3691, 3732), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/Log_likelihood.png"""'], {}), "('figures/Log_likelihood.png')\n", (3702, 3732), True, 'import matplotlib.pyplot as plt\n'), ((3738, 3747), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3745, 3747), True, 'import matplotlib.pyplot as plt\n'), ((3748, 3775), 'matplotlib.pyplot.title', 'plt.title', (['"""Resample times"""'], {}), "('Resample times')\n", (3757, 3775), True, 'import matplotlib.pyplot as plt\n'), ((3826, 3867), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/Resample_times.png"""'], {}), "('figures/Resample_times.png')\n", (3837, 3867), True, 'import matplotlib.pyplot as plt\n'), ((706, 730), 'util.config_parser.ConfigParser_with_eval', 'ConfigParser_with_eval', ([], {}), '()\n', (728, 730), False, 'from util.config_parser import ConfigParser_with_eval\n'), ((800, 834), 'numpy.loadtxt', 'np.loadtxt', (['"""files.txt"""'], {'dtype': 'str'}), "('files.txt', dtype=str)\n", (810, 834), True, 'import numpy as np\n'), ((1817, 1849), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, 1)', '(0, 0)'], {}), '((2, 1), (0, 0))\n', (1833, 1849), True, 'import matplotlib.pyplot as plt\n'), ((1854, 1865), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (1861, 1865), True, 'import matplotlib.pyplot as plt\n'), ((1932, 1953), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Feature"""'], {}), "('Feature')\n", (1942, 1953), True, 'import matplotlib.pyplot as plt\n'), ((1981, 2013), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, 1)', '(1, 0)'], {}), '((2, 1), (1, 0))\n', (1997, 2013), True, 'import matplotlib.pyplot as plt\n'), ((2018, 2037), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title'], {}), '(title)\n', (2030, 2037), True, 'import matplotlib.pyplot as plt\n'), ((2042, 2053), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (2049, 2053), True, 'import matplotlib.pyplot as plt\n'), ((2135, 2154), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frame"""'], {}), "('Frame')\n", (2145, 2154), True, 'import matplotlib.pyplot as plt\n'), ((2159, 2182), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Iteration"""'], {}), "('Iteration')\n", (2169, 2182), True, 'import matplotlib.pyplot as plt\n'), ((2187, 2201), 'matplotlib.pyplot.xticks', 'plt.xticks', (['()'], {}), '(())\n', (2197, 2201), True, 'import matplotlib.pyplot as plt\n'), ((3145, 3156), 'tqdm.tqdm', 'tqdm', (['names'], {}), '(names)\n', (3149, 3156), False, 'from tqdm import trange, tqdm\n'), ((3163, 3172), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3170, 3172), True, 'import matplotlib.pyplot as plt\n'), ((3257, 3298), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('figures/' + name + '_l.png')"], {}), "('figures/' + name + '_l.png')\n", (3268, 3298), True, 'import matplotlib.pyplot as plt\n'), ((3303, 3312), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3310, 3312), True, 'import matplotlib.pyplot as plt\n'), ((3397, 3438), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('figures/' + name + '_s.png')"], {}), "('figures/' + name + '_s.png')\n", (3408, 3438), True, 'import matplotlib.pyplot as plt\n'), ((3443, 3452), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3450, 3452), True, 'import matplotlib.pyplot as plt\n'), ((3539, 3580), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('figures/' + name + '_d.png')"], {}), "('figures/' + name + '_d.png')\n", (3550, 3580), True, 'import matplotlib.pyplot as plt\n'), ((882, 917), 'numpy.loadtxt', 'np.loadtxt', (["('DATA/' + name + '.txt')"], {}), "('DATA/' + name + '.txt')\n", (892, 917), True, 'import numpy as np\n'), ((1653, 1672), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (1661, 1672), False, 'from joblib import Parallel, delayed\n'), ((3953, 3975), 'numpy.sum', 'np.sum', (['resample_times'], {}), '(resample_times)\n', (3959, 3975), True, 'import numpy as np\n'), ((1331, 1379), 'numpy.loadtxt', 'np.loadtxt', (["('results/' + name + '_' + c + '.txt')"], {}), "('results/' + name + '_' + c + '.txt')\n", (1341, 1379), True, 'import numpy as np\n'), ((1571, 1619), 'numpy.loadtxt', 'np.loadtxt', (["('results/' + name + '_' + c + '.txt')"], {}), "('results/' + name + '_' + c + '.txt')\n", (1581, 1619), True, 'import numpy as np\n'), ((1674, 1693), 'joblib.delayed', 'delayed', (['_component'], {}), '(_component)\n', (1681, 1693), False, 'from joblib import Parallel, delayed\n')]
|
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
import netCDF4 as nc
import pickle as pk
import pandas as pd
import datetime
import os
import numpy as np
import sys
src_dir = os.path.join(os.getcwd(), 'src/data')
sys.path.append(src_dir)
from helper import save_pkl, load_pkl, min_max_norm
src_dir = os.path.join(os.getcwd(), 'src/models')
sys.path.append(src_dir)
# from competition_model_class import Seq2Seq_Class # during Debug and Developing
from seq2seq_class import Seq2Seq_Class # during the game and competition
def train(processed_path, train_data, val_data, model_save_path, model_name):
train_dict = load_pkl(processed_path, train_data)
val_dict = load_pkl(processed_path, val_data)
print(train_dict.keys())
print('Original input_obs data shape:')
print(train_dict['input_obs'].shape)
print(val_dict['input_obs'].shape)
print('After clipping the 9 days, input_obs data shape:')
train_dict['input_obs'] = train_dict['input_obs'][:,:-9,:,:]
val_dict['input_obs'] = val_dict['input_obs'][:,:-9,:,:]
print(train_dict['input_obs'].shape)
print(val_dict['input_obs'].shape)
enc_dec = Seq2Seq_Class(model_save_path=model_save_path,
model_structure_name=model_name,
model_weights_name=model_name,
model_name=model_name)
enc_dec.build_graph()
val_size=val_dict['input_ruitu'].shape[0] # 87 val samples
val_ids=[]
val_times=[]
for i in range(10):
val_ids.append(np.ones(shape=(val_size,37))*i)
val_ids = np.stack(val_ids, axis=-1)
print('val_ids.shape is:', val_ids.shape)
val_times = np.array(range(37))
val_times = np.tile(val_times,(val_size,1))
print('val_times.shape is:',val_times.shape)
enc_dec.fit(train_dict['input_obs'], train_dict['input_ruitu'], train_dict['ground_truth'],
val_dict['input_obs'], val_dict['input_ruitu'], val_dict['ground_truth'], val_ids = val_ids, val_times=val_times,
iterations=10000, batch_size=512, validation=True)
print('Training finished!')
@click.command()
@click.argument('processed_path', type=click.Path(exists=True))
@click.option('--train_data', type=str)
@click.option('--val_data', type=str)
@click.argument('model_save_path', type=click.Path(exists=True))
@click.option('--model_name', type=str)
def main(processed_path, train_data, val_data, model_save_path, model_name):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
train(processed_path, train_data, val_data, model_save_path, model_name)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
|
[
"sys.path.append",
"numpy.stack",
"logging.basicConfig",
"dotenv.find_dotenv",
"os.getcwd",
"click.option",
"numpy.ones",
"click.command",
"pathlib.Path",
"click.Path",
"numpy.tile",
"seq2seq_class.Seq2Seq_Class",
"helper.load_pkl",
"logging.getLogger"
] |
[((287, 311), 'sys.path.append', 'sys.path.append', (['src_dir'], {}), '(src_dir)\n', (302, 311), False, 'import sys\n'), ((414, 438), 'sys.path.append', 'sys.path.append', (['src_dir'], {}), '(src_dir)\n', (429, 438), False, 'import sys\n'), ((2158, 2173), 'click.command', 'click.command', ([], {}), '()\n', (2171, 2173), False, 'import click\n'), ((2239, 2277), 'click.option', 'click.option', (['"""--train_data"""'], {'type': 'str'}), "('--train_data', type=str)\n", (2251, 2277), False, 'import click\n'), ((2279, 2315), 'click.option', 'click.option', (['"""--val_data"""'], {'type': 'str'}), "('--val_data', type=str)\n", (2291, 2315), False, 'import click\n'), ((2382, 2420), 'click.option', 'click.option', (['"""--model_name"""'], {'type': 'str'}), "('--model_name', type=str)\n", (2394, 2420), False, 'import click\n'), ((262, 273), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (271, 273), False, 'import os\n'), ((387, 398), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (396, 398), False, 'import os\n'), ((692, 728), 'helper.load_pkl', 'load_pkl', (['processed_path', 'train_data'], {}), '(processed_path, train_data)\n', (700, 728), False, 'from helper import save_pkl, load_pkl, min_max_norm\n'), ((744, 778), 'helper.load_pkl', 'load_pkl', (['processed_path', 'val_data'], {}), '(processed_path, val_data)\n', (752, 778), False, 'from helper import save_pkl, load_pkl, min_max_norm\n'), ((1218, 1356), 'seq2seq_class.Seq2Seq_Class', 'Seq2Seq_Class', ([], {'model_save_path': 'model_save_path', 'model_structure_name': 'model_name', 'model_weights_name': 'model_name', 'model_name': 'model_name'}), '(model_save_path=model_save_path, model_structure_name=\n model_name, model_weights_name=model_name, model_name=model_name)\n', (1231, 1356), False, 'from seq2seq_class import Seq2Seq_Class\n'), ((1632, 1658), 'numpy.stack', 'np.stack', (['val_ids'], {'axis': '(-1)'}), '(val_ids, axis=-1)\n', (1640, 1658), True, 'import numpy as np\n'), ((1757, 1790), 'numpy.tile', 'np.tile', (['val_times', '(val_size, 1)'], {}), '(val_times, (val_size, 1))\n', (1764, 1790), True, 'import numpy as np\n'), ((2660, 2687), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2677, 2687), False, 'import logging\n'), ((2871, 2926), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': 'log_fmt'}), '(level=logging.INFO, format=log_fmt)\n', (2890, 2926), False, 'import logging\n'), ((2213, 2236), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (2223, 2236), False, 'import click\n'), ((2356, 2379), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (2366, 2379), False, 'import click\n'), ((3205, 3218), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (3216, 3218), False, 'from dotenv import find_dotenv, load_dotenv\n'), ((1586, 1615), 'numpy.ones', 'np.ones', ([], {'shape': '(val_size, 37)'}), '(shape=(val_size, 37))\n', (1593, 1615), True, 'import numpy as np\n'), ((3017, 3031), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (3021, 3031), False, 'from pathlib import Path\n')]
|
import pickle
import numpy as np
import pandas as pd
import shap
import matplotlib.pyplot as pl
shap.initjs()
json_path = "response.json"
model_path = "xgboost_primary_model.pkl"
AGE_GROUP_CUTOFFS = [0, 17, 30, 40, 50, 60, 70, 120]
AGE_GROUPS_TRANSFORMER = {1: 10, 2: 25, 3: 35, 4: 45, 5: 55, 6: 65, 7: 75}
AGE_COL = 'age_group'
X_COLS = ["gender", AGE_COL, "condition_any", "symptom_well", "symptom_sore_throat", "symptom_cough",
"symptom_shortness_of_breath", "symptom_smell_or_taste_loss", "symptom_fever"]
def add_age_group(df):
df[AGE_COL] = pd.cut(df['age'], bins=AGE_GROUP_CUTOFFS, labels=AGE_GROUPS_TRANSFORMER.values(), include_lowest=True, right=True)
df[AGE_COL] = df[AGE_COL].astype(int)
return df
def get_prediction(json_path, model_path):
response_df = pd.read_json(json_path, lines=True)
response_df = add_age_group(response_df)
response_df = response_df[X_COLS].sort_index(axis=1)
model = pickle.load(open(model_path, "rb"))
predictions = model.predict_proba(response_df)
predicted_probability = np.round(predictions[:, 1][0], 3)
return predicted_probability
if __name__ == '__main__':
print("The response probability to test positive according to our model is:", get_prediction(json_path, model_path))
model = pickle.load(open('xgboost_primary_model.pkl', "rb"))
explainer = shap.TreeExplainer(model)
data = pd.read_csv('../creating_the_models/primary model.csv')
BASE_MODEL_X_COLS = ['gender', 'age_group']
X_COLS = BASE_MODEL_X_COLS + \
['symptom_well',
'symptom_sore_throat',
'symptom_cough',
'symptom_shortness_of_breath',
'symptom_smell_or_taste_loss',
'symptom_fever',
'condition_any']
X = data[X_COLS].sort_index(axis=1)
y = data['label'].values.ravel()
shap_values = explainer.shap_values(X)
shap.force_plot(explainer.expected_value, shap_values[0, :], X.iloc[0, :])
shap.summary_plot(shap_values, X)
|
[
"pandas.read_csv",
"shap.summary_plot",
"pandas.read_json",
"shap.initjs",
"shap.TreeExplainer",
"shap.force_plot",
"numpy.round"
] |
[((98, 111), 'shap.initjs', 'shap.initjs', ([], {}), '()\n', (109, 111), False, 'import shap\n'), ((802, 837), 'pandas.read_json', 'pd.read_json', (['json_path'], {'lines': '(True)'}), '(json_path, lines=True)\n', (814, 837), True, 'import pandas as pd\n'), ((1068, 1101), 'numpy.round', 'np.round', (['predictions[:, 1][0]', '(3)'], {}), '(predictions[:, 1][0], 3)\n', (1076, 1101), True, 'import numpy as np\n'), ((1366, 1391), 'shap.TreeExplainer', 'shap.TreeExplainer', (['model'], {}), '(model)\n', (1384, 1391), False, 'import shap\n'), ((1403, 1458), 'pandas.read_csv', 'pd.read_csv', (['"""../creating_the_models/primary model.csv"""'], {}), "('../creating_the_models/primary model.csv')\n", (1414, 1458), True, 'import pandas as pd\n'), ((1916, 1990), 'shap.force_plot', 'shap.force_plot', (['explainer.expected_value', 'shap_values[0, :]', 'X.iloc[0, :]'], {}), '(explainer.expected_value, shap_values[0, :], X.iloc[0, :])\n', (1931, 1990), False, 'import shap\n'), ((1995, 2028), 'shap.summary_plot', 'shap.summary_plot', (['shap_values', 'X'], {}), '(shap_values, X)\n', (2012, 2028), False, 'import shap\n')]
|
from IMLearn.learners import UnivariateGaussian, MultivariateGaussian
import numpy as np
import plotly.graph_objects as go
import plotly.io as pio
from matplotlib import pyplot as plt
pio.templates.default = "simple_white"
SAMPLES = 1000
QUESTION_ONE_MEAN = 10
QUESTION_ONE_VAR = 1
QUESTION_ONE_SAMPLES_SKIP = 10
QUESTION_TWO_RESOLUTION = 200
QUESTION_TWO_GRID_SIZE = 10
def test_univariate_gaussian():
# Question 1 - Draw samples and print fitted model
samples = np.random.normal(QUESTION_ONE_MEAN, QUESTION_ONE_VAR, size=SAMPLES)
univariate_gaussian = UnivariateGaussian()
univariate_gaussian.fit(samples)
print(f"({univariate_gaussian.mu_}, {univariate_gaussian.var_})")
# Question 2 - Empirically showing sample mean is consistent
x = np.arange(QUESTION_ONE_MEAN, SAMPLES + 1, QUESTION_ONE_SAMPLES_SKIP)
estimate_mean_dis = np.vectorize(lambda last_index: np.abs(np.mean(samples[:last_index]) - QUESTION_ONE_MEAN))
fig = go.Figure(
[go.Scatter(x=x, y=estimate_mean_dis(x), mode='markers', name=r'$\left|\hat{\mu}(m)-10\right|$',
showlegend=True)], layout=go.Layout(
title={
"text": r"$\text{Distance Between The Estimated-And True Value Of The Expectations}\\"
r"\text{As Function Of Number Of Samples}$",
'y': 0.84, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'},
xaxis_title=r"$\text{Number of samples} [m]$", yaxis_title=r"$\left|\hat{\mu}(m)-10\right|$", height=400))
fig.show()
# fig.write_image("estimate_distance.svg")
# Question 3 - Plotting Empirical PDF of fitted model
fig = go.Figure(
[go.Scatter(x=samples, y=univariate_gaussian.pdf(samples), mode='markers',
showlegend=False, marker=dict(size=2))], layout=go.Layout(
title={
"text": r"$\text{Probability Density As Function Of Samples Values}$",
'y': 0.84, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'},
xaxis_title=r"$\text{Sample value}$", yaxis_title=r"$\text{Probability density}$", height=400))
fig.show()
# fig.write_image("pdf_q1.svg")
def test_multivariate_gaussian():
# Question 4 - Draw samples and print fitted model
mu = np.array([0, 0, 4, 0])
sigma = np.array([[1, 0.2, 0, 0.5],
[0.2, 2, 0, 0],
[0, 0, 1, 0],
[0.5, 0, 0, 1]])
samples = np.random.multivariate_normal(mu, sigma, SAMPLES)
multivariate_gaussian = MultivariateGaussian()
multivariate_gaussian.fit(samples)
print(multivariate_gaussian.mu_)
print(multivariate_gaussian.cov_)
# Question 5 - Likelihood evaluation
f1 = np.linspace(-QUESTION_TWO_GRID_SIZE, QUESTION_TWO_GRID_SIZE, QUESTION_TWO_RESOLUTION)
grid_tuples = np.transpose(np.array([np.repeat(f1, len(f1)), np.tile(f1, len(f1))]))
calc_log_likelihood = lambda x1, x3: multivariate_gaussian.log_likelihood(np.array([x1, 0, x3, 0]), sigma, samples)
Z = np.vectorize(calc_log_likelihood)(grid_tuples[:, 0], grid_tuples[:, 1]).reshape(QUESTION_TWO_RESOLUTION,
QUESTION_TWO_RESOLUTION)
fig, ax = plt.subplots()
heat_map = ax.pcolormesh(f1, f1, Z)
fig.colorbar(heat_map, format='%.e')
ax.set_title("log-likelihood for " + r"$\mu=\left[f_{1},0,f_{3},0\right]{}^{T}$")
ax.set_xlabel("$f_{3}$")
ax.set_ylabel("$f_{1}$")
plt.show()
# Question 6 - Maximum likelihood
max_coordinates = np.where(Z == np.amax(Z))
print(f"({round(f1[max_coordinates[0]][0], 3)}, {round(f1[max_coordinates[1]][0], 3)})")
if __name__ == '__main__':
np.random.seed(0)
test_univariate_gaussian()
test_multivariate_gaussian()
|
[
"IMLearn.learners.UnivariateGaussian",
"matplotlib.pyplot.show",
"numpy.random.seed",
"numpy.vectorize",
"IMLearn.learners.MultivariateGaussian",
"numpy.amax",
"numpy.mean",
"numpy.random.multivariate_normal",
"numpy.arange",
"numpy.array",
"numpy.random.normal",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"plotly.graph_objects.Layout"
] |
[((476, 543), 'numpy.random.normal', 'np.random.normal', (['QUESTION_ONE_MEAN', 'QUESTION_ONE_VAR'], {'size': 'SAMPLES'}), '(QUESTION_ONE_MEAN, QUESTION_ONE_VAR, size=SAMPLES)\n', (492, 543), True, 'import numpy as np\n'), ((570, 590), 'IMLearn.learners.UnivariateGaussian', 'UnivariateGaussian', ([], {}), '()\n', (588, 590), False, 'from IMLearn.learners import UnivariateGaussian, MultivariateGaussian\n'), ((772, 840), 'numpy.arange', 'np.arange', (['QUESTION_ONE_MEAN', '(SAMPLES + 1)', 'QUESTION_ONE_SAMPLES_SKIP'], {}), '(QUESTION_ONE_MEAN, SAMPLES + 1, QUESTION_ONE_SAMPLES_SKIP)\n', (781, 840), True, 'import numpy as np\n'), ((2274, 2296), 'numpy.array', 'np.array', (['[0, 0, 4, 0]'], {}), '([0, 0, 4, 0])\n', (2282, 2296), True, 'import numpy as np\n'), ((2309, 2383), 'numpy.array', 'np.array', (['[[1, 0.2, 0, 0.5], [0.2, 2, 0, 0], [0, 0, 1, 0], [0.5, 0, 0, 1]]'], {}), '([[1, 0.2, 0, 0.5], [0.2, 2, 0, 0], [0, 0, 1, 0], [0.5, 0, 0, 1]])\n', (2317, 2383), True, 'import numpy as np\n'), ((2464, 2513), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mu', 'sigma', 'SAMPLES'], {}), '(mu, sigma, SAMPLES)\n', (2493, 2513), True, 'import numpy as np\n'), ((2542, 2564), 'IMLearn.learners.MultivariateGaussian', 'MultivariateGaussian', ([], {}), '()\n', (2562, 2564), False, 'from IMLearn.learners import UnivariateGaussian, MultivariateGaussian\n'), ((2730, 2819), 'numpy.linspace', 'np.linspace', (['(-QUESTION_TWO_GRID_SIZE)', 'QUESTION_TWO_GRID_SIZE', 'QUESTION_TWO_RESOLUTION'], {}), '(-QUESTION_TWO_GRID_SIZE, QUESTION_TWO_GRID_SIZE,\n QUESTION_TWO_RESOLUTION)\n', (2741, 2819), True, 'import numpy as np\n'), ((3265, 3279), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3277, 3279), True, 'from matplotlib import pyplot as plt\n'), ((3509, 3519), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3517, 3519), True, 'from matplotlib import pyplot as plt\n'), ((3734, 3751), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3748, 3751), True, 'import numpy as np\n'), ((1128, 1463), 'plotly.graph_objects.Layout', 'go.Layout', ([], {'title': "{'text':\n '$\\\\text{Distance Between The Estimated-And True Value Of The Expectations}\\\\\\\\\\\\text{As Function Of Number Of Samples}$'\n , 'y': 0.84, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'}", 'xaxis_title': '"""$\\\\text{Number of samples} [m]$"""', 'yaxis_title': '"""$\\\\left|\\\\hat{\\\\mu}(m)-10\\\\right|$"""', 'height': '(400)'}), "(title={'text':\n '$\\\\text{Distance Between The Estimated-And True Value Of The Expectations}\\\\\\\\\\\\text{As Function Of Number Of Samples}$'\n , 'y': 0.84, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'},\n xaxis_title='$\\\\text{Number of samples} [m]$', yaxis_title=\n '$\\\\left|\\\\hat{\\\\mu}(m)-10\\\\right|$', height=400)\n", (1137, 1463), True, 'import plotly.graph_objects as go\n'), ((1820, 2081), 'plotly.graph_objects.Layout', 'go.Layout', ([], {'title': "{'text': '$\\\\text{Probability Density As Function Of Samples Values}$', 'y':\n 0.84, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'}", 'xaxis_title': '"""$\\\\text{Sample value}$"""', 'yaxis_title': '"""$\\\\text{Probability density}$"""', 'height': '(400)'}), "(title={'text':\n '$\\\\text{Probability Density As Function Of Samples Values}$', 'y': \n 0.84, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'}, xaxis_title=\n '$\\\\text{Sample value}$', yaxis_title='$\\\\text{Probability density}$',\n height=400)\n", (1829, 2081), True, 'import plotly.graph_objects as go\n'), ((2983, 3007), 'numpy.array', 'np.array', (['[x1, 0, x3, 0]'], {}), '([x1, 0, x3, 0])\n', (2991, 3007), True, 'import numpy as np\n'), ((3596, 3606), 'numpy.amax', 'np.amax', (['Z'], {}), '(Z)\n', (3603, 3606), True, 'import numpy as np\n'), ((3033, 3066), 'numpy.vectorize', 'np.vectorize', (['calc_log_likelihood'], {}), '(calc_log_likelihood)\n', (3045, 3066), True, 'import numpy as np\n'), ((904, 933), 'numpy.mean', 'np.mean', (['samples[:last_index]'], {}), '(samples[:last_index])\n', (911, 933), True, 'import numpy as np\n')]
|
import numpy as np
from lmfit.model import Model
class PDFdecayModel(Model):
r"""A model to describe the product of a decaying exponential and a Gaussian
with three parameters: ``amplitude``, ``xi``, and ``sigma``
.. math::
f(x; A, \xi, \sigma) = A e^{[-{|x|}/\xi]} e^{[{-{x^2}/{{2\sigma}^2}}]}
where the parameter ``amplitude`` corresponds to :math:`A`, ``xi`` to
:math:`\xi`, and ``sigma`` to :math:`\sigma`.
"""
def __init__(self, **kwargs):
def pdfdecay(x, amplitude=1.0, xi=1.0, sigma=1.0):
return amplitude * np.exp(-abs(x)/xi) * np.exp(-x**2/(2*sigma**2))
super().__init__(pdfdecay, **kwargs)
def guess(self, data, x=None, negative=False, **kwargs):
"""Estimate initial model parameter values from data."""
sigma = np.sqrt(np.fabs((x**2*data).sum() / data.sum()))
return self.make_params(amplitude=data.max(), xi=sigma, sigma=sigma)
|
[
"numpy.exp"
] |
[((601, 635), 'numpy.exp', 'np.exp', (['(-x ** 2 / (2 * sigma ** 2))'], {}), '(-x ** 2 / (2 * sigma ** 2))\n', (607, 635), True, 'import numpy as np\n')]
|
import pygame
import numpy as np
from collections import OrderedDict
from Utility.shape import Rectangle
from Utility import ui
from Level.generic_level import GenericLevel
class Level(GenericLevel):
def __init__(self, player, **kwargs):
super().__init__(**kwargs)
self.player = player
self.hurdle_cords = [
(130, 30, 30, self.gameDimension[1] - 60),
(200, 0, 30, self.gameDimension[1] // 2 - 20),
(200, self.gameDimension[1] // 2 + 20, 30, self.gameDimension[1] // 2 - 15)
]
self.hurdle_cords.append((160, self.gameDimension[1] // 2 + 20, 40, 40))
for i in range(1, 3):
self.hurdle_cords.append((130 + i * 180, 30, 30, self.gameDimension[1] - 60))
self.hurdle_cords.append((130 + i * 180 + 70, 0, 30, self.gameDimension[1] // 2 - 20))
self.hurdle_cords.append((130 + i * 180 + 70, self.gameDimension[1] // 2 + 20,
30, self.gameDimension[1] // 2 - 15))
# todo: not hard code it duh.
self.hurdle_cords.append((340, 140, 40, 40))
self.hurdle_cords.append((520, 220, 40, 40))
self.hurdle = [Rectangle(x, y, l, w, (190, 220, 220)) for x, y, w, l in self.hurdle_cords]
self.food_exists = True
self.food_cords = [Rectangle(x=640, y=190, length=30, width=30, color=None)]
self.food = pygame.transform.scale(pygame.image.load(r"Resources/Food/banana.png"), (self.food_cords[0].width,
self.food_cords[0].length))
def draw_hurdle(self):
for hurdle in self.hurdle:
pygame.draw.rect(self.gameDisplay, hurdle.color, (hurdle.x, hurdle.y, hurdle.width, hurdle.length))
size = hurdle.length // hurdle.width
for y in range(size):
pygame.draw.line(self.gameDisplay, self.grid_lines, (hurdle.x, hurdle.y + y*hurdle.width),
(hurdle.x + hurdle.width, hurdle.y + y*hurdle.width))
pygame.draw.circle(self.gameDisplay, (220, 50, 50), (hurdle.x + hurdle.width // 2, hurdle.y + y*hurdle.width + hurdle.width // 2), 3)
pygame.draw.circle(self.gameDisplay, (220, 239, 0), (hurdle.x + + hurdle.width // 2, hurdle.y + y * hurdle.width + + hurdle.width // 2),
1)
def show_player(self, draw=True):
if draw:
pygame.draw.rect(self.gameDisplay, self.player.color,
(self.player.x, self.player.y, self.player.length, self.player.length))
return
blit_img = self.player.characterDefault
if not (self.player.left or self.player.right or self.player.up or self.player.down):
blit_img = self.player.characterDefault
self.player.r_img = self.player.u_img = self.player.d_img = self.player.l_img = 0
elif self.player.left:
blit_img = self.player.movements['Left'][self.player.l_img]
self.player.l_img = (self.player.l_img + 1) % 4
self.player.r_img = self.player.u_img = self.player.d_img = 0
elif self.player.right:
blit_img = self.player.movements['Right'][self.player.r_img]
self.player.r_img = (self.player.r_img + 1) % 4
self.player.l_img = self.player.u_img = self.player.d_img = 0
elif self.player.up:
blit_img = self.player.movements['Up'][self.player.u_img]
self.player.u_img = (self.player.u_img + 1) % 4
self.player.r_img = self.player.l_img = self.player.d_img = 0
elif self.player.down:
blit_img = self.player.movements['Down'][self.player.d_img]
self.player.d_img = (self.player.d_img + 1) % 4
self.player.r_img = self.player.u_img = self.player.l_img = 0
self.gameDisplay.blit(blit_img, (self.player.x, self.player.y))
def draw_food(self):
if self.food_exists:
self.gameDisplay.blit(self.food, (self.food_cords[0].x, self.food_cords[0].y))
def show(self, *args):
self.gameDisplay.fill(self.background)
self.draw_grids(*args)
self.draw_hurdle()
self.draw_food()
self.show_player(draw=self.player.draw)
def pause_game(self, *args):
resume = False
while not resume:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_s:
resume = True
self.gameDisplay.fill((255, 255, 255))
ui.message(gameDisplay=self.gameDisplay,msg="Press, S to Start", x=self.gameDimension[0] // 2 - 50, y=self.gameDimension[1] // 2)
pygame.display.update()
self.clock.tick(30)
def dynamics(self, *args):
self.player.move()
def wall_logic(self):
if self.player.x < 0:
self.player.x = 0
self.player.left = False
elif self.player.x + self.player.width > self.gameDimension[0]:
self.player.x = self.gameDimension[0] - self.player.width
self.player.right = False
if self.player.y < 0:
self.player.y = 0
self.player.up = False
elif self.player.y + self.player.length > self.gameDimension[1]:
self.player.y = self.gameDimension[1] - self.player.length
self.player.down = False
def hurdle_contact(self, blocks):
for hurdle in blocks:
if hurdle.x > self.player.x + self.player.width:
continue
if ((hurdle.x < self.player.x + self.player.width < hurdle.x + hurdle.width)
or (hurdle.x < self.player.x < hurdle.x + hurdle.width)
or (hurdle.x < self.player.x + self.player.width // 2 < hurdle.x + hurdle.width)) \
and \
((hurdle.y < self.player.y + self.player.length < hurdle.y + hurdle.length)
or (hurdle.y < self.player.y < hurdle.y + hurdle.length)
or (hurdle.y < self.player.y + self.player.length // 2 < hurdle.y + hurdle.length)):
return hurdle
return None
def hurdle_logic(self):
cord = self.hurdle_contact(self.hurdle)
if cord is None:
return
if self.player.right:
self.player.x = cord.x - self.player.width
self.player.right = False
elif self.player.left:
self.player.x = cord.x + cord.width
self.player.left = False
if self.player.down:
self.player.y = cord.y - self.player.length
self.player.down = False
elif self.player.up:
self.player.y = cord.y + cord.length
self.player.up = False
def food_logic(self):
if self.hurdle_contact(self.food_cords):
self.player.gotFood = True
self.food_exists = False
self.player.characterDefault = self.player.winDefault
self.player.left = self.player.right = self.player.up = self.player.down = False
def collision(self, *args):
self.wall_logic()
self.hurdle_logic()
self.food_logic()
def have_won(self, *args):
self.show(*args)
ui.message(gameDisplay=self.gameDisplay,msg="Yeah.!", x=self.gameDimension[0] // 2 - 50, y=self.gameDimension[1] // 2 - 50,
color=(100, 200, 100), font_size=50)
pygame.display.flip()
def have_died(self, *args):
pass
def start_game(self, *args):
# self.pause_game()
while self.player.alive and not self.player.gotFood:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
self.player.left = True
self.player.right = self.player.up = self.player.down = False
if event.key == pygame.K_RIGHT:
self.player.right = True
self.player.left = self.player.up = self.player.down = False
if event.key == pygame.K_UP:
self.player.up = True
self.player.right = self.player.left = self.player.down = False
if event.key == pygame.K_DOWN:
self.player.down = True
self.player.right = self.player.up = self.player.left = False
if event.key == pygame.K_p:
self.pause_game()
self.show()
self.dynamics()
self.collision()
pygame.display.flip()
if self.screen_capt:
self.read_screen(stream=self.stream, gray=self.gray, maxi=self.maxi, store=self.store,
player=self.player, vision_limit=50)
self.clock.tick(30)
if self.player.alive and self.player.gotFood:
self.have_won()
pygame.time.wait(2000)
class Level_PathFinding(Level):
def __init__(self, player, **kwargs):
super().__init__(player, **kwargs)
self.wall = np.zeros((self.gameDimension[1] // 10, self.gameDimension[0] // 10))
for hurdle in self.hurdle:
for x in range(hurdle.x // 10, hurdle.x // 10 + hurdle.width // 10):
for y in range(hurdle.y // 10, hurdle.y // 10 + hurdle.length // 10):
self.wall[y, x] = 1
self.wall[y - 1, x] = 1
self.wall[y - 2, x] = 1
# self.wall[y - 3, x] = 1
self.wall[y, x - 1] = 1
self.wall[y, x - 2] = 1
self.wall[y, x - 3] = 1
self.wall[y - 1, x - 1] = 1
self.wall[y - 2, x - 2] = 1
# self.wall[y - 3, x - 3] = 1
self.f_score = np.full(self.wall.shape, np.inf)
self.g_score = np.zeros(self.wall.shape)
self.not_visited = list()
self.visited = list()
self.neighbour = OrderedDict()
self.came_from = OrderedDict()
self.cur_idx =None
for i in range(self.wall.shape[0]):
for j in range(self.wall.shape[1]):
self.neighbour[(i, j)] = self.get_neighbours(i, j)
self.start_pos = (self.player.y // 10, self.player.x // 10)
self.end_pos = (self.food_cords[0].y // 10, self.food_cords[0].x // 10)
def get_neighbours(self, i, j):
possible_neighbours = []
if i > 0:
possible_neighbours.append((i-1, j))
if i < self.wall.shape[0] - 1:
possible_neighbours.append((i + 1, j))
if j > 0:
possible_neighbours.append((i, j - 1))
if j < self.wall.shape[1] - 1:
possible_neighbours.append((i, j + 1))
# if i > 0 and j > 0:
# possible_neighbours.append((i-1, j-1))
# if i < self.wall.shape[0] - 1 and j < self.wall.shape[1] - 1:
# possible_neighbours.append((i + 1, j+1))
# if j > 0 and i < self.wall.shape[0] - 1:
# possible_neighbours.append((i+1, j - 1))
# if j < self.wall.shape[1] - 1 and i > 0:
# possible_neighbours.append((i-1, j + 1))
return possible_neighbours
def find_path_a_star(self):
self.not_visited += [self.start_pos]
while len(self.not_visited) > 0:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
cur_idx = self.not_visited[0]
for i, j in self.not_visited:
if self.f_score[(i, j)] < self.f_score[cur_idx]:
cur_idx = (i, j)
if cur_idx == self.end_pos:
# pygame.time.wait(3000)
self.cur_idx = cur_idx
return
self.not_visited.remove(cur_idx)
self.visited.append(cur_idx)
for neighbour in self.neighbour[cur_idx]:
if neighbour not in self.visited and self.wall[neighbour] == 0:
estimated_g_score = self.g_score[neighbour] + 10
if neighbour not in self.not_visited:
self.not_visited.append(neighbour)
elif self.g_score[neighbour] < estimated_g_score:
continue
self.g_score[neighbour] = estimated_g_score
# self.f_score[neighbour] = estimated_g_score + (abs(self.end_pos[0] - neighbour[0])*10 +
# abs(self.end_pos[1] - neighbour[1])*10)
self.f_score[neighbour] = estimated_g_score + np.sqrt((self.end_pos[0]*10 - neighbour[0]*10)**2 +
(self.end_pos[1]*10 - neighbour[1]*10)**2)
self.came_from[neighbour] = cur_idx
self.show(cur_idx)
pygame.display.update()
self.clock.tick(30)
print("No Path")
def draw_grids(self, current):
for point in self.not_visited:
pygame.draw.rect(self.gameDisplay, (200, 200, 200), (point[1] * 10, point[0] * 10, 10, 10))
for point in self.visited:
pygame.draw.rect(self.gameDisplay, (120, 120, 120), (point[1] * 10, point[0] * 10, 10, 10))
to_draw = list()
to_draw.append(current)
while current in self.came_from.keys():
current = self.came_from[current]
to_draw.append(current)
for point in to_draw:
pygame.draw.rect(self.gameDisplay, (0, 0, 250), (point[1] * 10, point[0] * 10, 10, 10))
for x in range(0, self.gameDimension[0], 10):
pygame.draw.line(self.gameDisplay, self.grid_lines, (x, 0), (x, self.gameDimension[1]))
for y in range(0, self.gameDimension[1], 10):
pygame.draw.line(self.gameDisplay, self.grid_lines, (0, y), (self.gameDimension[0], y))
# def draw_grids_path(self, current):
# for point in self.not_visited:
# pygame.draw.rect(self.gameDisplay, (0, 200, 0), (point[0] * 10, point[1] * 10, 10, 10))
# for point in self.visited:
# pygame.draw.rect(self.gameDisplay, (255, 0, 0), (point[0] * 10, point[1] * 10, 10, 10))
#
# to_draw = list()
# to_draw.append(current)
# while current in self.came_from.keys():
# current = self.came_from[current]
# to_draw.append(current)
#
# for point in to_draw:
# pygame.draw.rect(self.gameDisplay, (0, 0, 250), (point[0] * 10, point[1] * 10, 10, 10))
def start_game(self):
# self.pause_game()
self.find_path_a_star()
current = self.cur_idx
prev = current
# 0 - l, 1 - r, 2 - u, 3 - d
moves = []
c = 0
while current in self.came_from.keys():
c += 1
current = self.came_from[current]
if current[0] > prev[0] and current[1] == prev[1]:
moves.insert(0, 2)
if current[0] < prev[0] and current[1] == prev[1]:
moves.insert(0, 3)
if current[1] < prev[1] and current[0] == prev[0]:
moves.insert(0, 1)
if current[1] > prev[1] and current[0] == prev[0]:
moves.insert(0, 0)
prev = current
move_idx = 0
while self.player.alive and not self.player.gotFood and move_idx < len(moves):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_p:
self.pause_game()
if moves[move_idx] == 0:
self.player.left = True
self.player.right = self.player.up = self.player.down = False
if moves[move_idx] == 1:
self.player.right = True
self.player.left = self.player.up = self.player.down = False
if moves[move_idx] == 2:
self.player.up = True
self.player.right = self.player.left = self.player.down = False
if moves[move_idx] == 3:
self.player.down = True
self.player.right = self.player.up = self.player.left = False
self.show(self.cur_idx)
pygame.draw.rect(self.gameDisplay, (200, 250,190), (self.player.x,self.player.y, 10, 10))
self.dynamics()
self.collision()
pygame.display.flip()
if self.screen_capt:
self.read_screen(stream=self.stream, gray=self.gray, maxi=self.maxi, store=self.store,
player=self.player, vision_limit=50)
self.clock.tick(30)
move_idx += 1
if self.player.alive and self.player.gotFood:
self.have_won(self.cur_idx)
pygame.time.wait(5_000)
if __name__ == "__main__":
lvl = Level(None, (600, 350))
lvl.start_game()
|
[
"numpy.full",
"pygame.quit",
"pygame.image.load",
"pygame.draw.line",
"pygame.draw.circle",
"pygame.draw.rect",
"pygame.event.get",
"numpy.zeros",
"Utility.ui.message",
"pygame.display.flip",
"pygame.time.wait",
"pygame.display.update",
"collections.OrderedDict",
"Utility.shape.Rectangle",
"numpy.sqrt"
] |
[((7700, 7870), 'Utility.ui.message', 'ui.message', ([], {'gameDisplay': 'self.gameDisplay', 'msg': '"""Yeah.!"""', 'x': '(self.gameDimension[0] // 2 - 50)', 'y': '(self.gameDimension[1] // 2 - 50)', 'color': '(100, 200, 100)', 'font_size': '(50)'}), "(gameDisplay=self.gameDisplay, msg='Yeah.!', x=self.gameDimension\n [0] // 2 - 50, y=self.gameDimension[1] // 2 - 50, color=(100, 200, 100),\n font_size=50)\n", (7710, 7870), False, 'from Utility import ui\n'), ((7892, 7913), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (7911, 7913), False, 'import pygame\n'), ((9613, 9635), 'pygame.time.wait', 'pygame.time.wait', (['(2000)'], {}), '(2000)\n', (9629, 9635), False, 'import pygame\n'), ((9781, 9849), 'numpy.zeros', 'np.zeros', (['(self.gameDimension[1] // 10, self.gameDimension[0] // 10)'], {}), '((self.gameDimension[1] // 10, self.gameDimension[0] // 10))\n', (9789, 9849), True, 'import numpy as np\n'), ((10547, 10579), 'numpy.full', 'np.full', (['self.wall.shape', 'np.inf'], {}), '(self.wall.shape, np.inf)\n', (10554, 10579), True, 'import numpy as np\n'), ((10604, 10629), 'numpy.zeros', 'np.zeros', (['self.wall.shape'], {}), '(self.wall.shape)\n', (10612, 10629), True, 'import numpy as np\n'), ((10722, 10735), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10733, 10735), False, 'from collections import OrderedDict\n'), ((10762, 10775), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10773, 10775), False, 'from collections import OrderedDict\n'), ((17923, 17945), 'pygame.time.wait', 'pygame.time.wait', (['(5000)'], {}), '(5000)\n', (17939, 17945), False, 'import pygame\n'), ((1290, 1328), 'Utility.shape.Rectangle', 'Rectangle', (['x', 'y', 'l', 'w', '(190, 220, 220)'], {}), '(x, y, l, w, (190, 220, 220))\n', (1299, 1328), False, 'from Utility.shape import Rectangle\n'), ((1429, 1485), 'Utility.shape.Rectangle', 'Rectangle', ([], {'x': '(640)', 'y': '(190)', 'length': '(30)', 'width': '(30)', 'color': 'None'}), '(x=640, y=190, length=30, width=30, color=None)\n', (1438, 1485), False, 'from Utility.shape import Rectangle\n'), ((1531, 1577), 'pygame.image.load', 'pygame.image.load', (['"""Resources/Food/banana.png"""'], {}), "('Resources/Food/banana.png')\n", (1548, 1577), False, 'import pygame\n'), ((1808, 1911), 'pygame.draw.rect', 'pygame.draw.rect', (['self.gameDisplay', 'hurdle.color', '(hurdle.x, hurdle.y, hurdle.width, hurdle.length)'], {}), '(self.gameDisplay, hurdle.color, (hurdle.x, hurdle.y,\n hurdle.width, hurdle.length))\n', (1824, 1911), False, 'import pygame\n'), ((2605, 2735), 'pygame.draw.rect', 'pygame.draw.rect', (['self.gameDisplay', 'self.player.color', '(self.player.x, self.player.y, self.player.length, self.player.length)'], {}), '(self.gameDisplay, self.player.color, (self.player.x, self.\n player.y, self.player.length, self.player.length))\n', (2621, 2735), False, 'import pygame\n'), ((4584, 4602), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (4600, 4602), False, 'import pygame\n'), ((4919, 5054), 'Utility.ui.message', 'ui.message', ([], {'gameDisplay': 'self.gameDisplay', 'msg': '"""Press, S to Start"""', 'x': '(self.gameDimension[0] // 2 - 50)', 'y': '(self.gameDimension[1] // 2)'}), "(gameDisplay=self.gameDisplay, msg='Press, S to Start', x=self.\n gameDimension[0] // 2 - 50, y=self.gameDimension[1] // 2)\n", (4929, 5054), False, 'from Utility import ui\n'), ((5062, 5085), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (5083, 5085), False, 'import pygame\n'), ((8120, 8138), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (8136, 8138), False, 'import pygame\n'), ((9256, 9277), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (9275, 9277), False, 'import pygame\n'), ((12134, 12152), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (12150, 12152), False, 'import pygame\n'), ((13751, 13774), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (13772, 13774), False, 'import pygame\n'), ((13925, 14021), 'pygame.draw.rect', 'pygame.draw.rect', (['self.gameDisplay', '(200, 200, 200)', '(point[1] * 10, point[0] * 10, 10, 10)'], {}), '(self.gameDisplay, (200, 200, 200), (point[1] * 10, point[0\n ] * 10, 10, 10))\n', (13941, 14021), False, 'import pygame\n'), ((14066, 14162), 'pygame.draw.rect', 'pygame.draw.rect', (['self.gameDisplay', '(120, 120, 120)', '(point[1] * 10, point[0] * 10, 10, 10)'], {}), '(self.gameDisplay, (120, 120, 120), (point[1] * 10, point[0\n ] * 10, 10, 10))\n', (14082, 14162), False, 'import pygame\n'), ((14398, 14490), 'pygame.draw.rect', 'pygame.draw.rect', (['self.gameDisplay', '(0, 0, 250)', '(point[1] * 10, point[0] * 10, 10, 10)'], {}), '(self.gameDisplay, (0, 0, 250), (point[1] * 10, point[0] * \n 10, 10, 10))\n', (14414, 14490), False, 'import pygame\n'), ((14554, 14646), 'pygame.draw.line', 'pygame.draw.line', (['self.gameDisplay', 'self.grid_lines', '(x, 0)', '(x, self.gameDimension[1])'], {}), '(self.gameDisplay, self.grid_lines, (x, 0), (x, self.\n gameDimension[1]))\n', (14570, 14646), False, 'import pygame\n'), ((14710, 14802), 'pygame.draw.line', 'pygame.draw.line', (['self.gameDisplay', 'self.grid_lines', '(0, y)', '(self.gameDimension[0], y)'], {}), '(self.gameDisplay, self.grid_lines, (0, y), (self.\n gameDimension[0], y))\n', (14726, 14802), False, 'import pygame\n'), ((16393, 16411), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (16409, 16411), False, 'import pygame\n'), ((17363, 17459), 'pygame.draw.rect', 'pygame.draw.rect', (['self.gameDisplay', '(200, 250, 190)', '(self.player.x, self.player.y, 10, 10)'], {}), '(self.gameDisplay, (200, 250, 190), (self.player.x, self.\n player.y, 10, 10))\n', (17379, 17459), False, 'import pygame\n'), ((17525, 17546), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (17544, 17546), False, 'import pygame\n'), ((2010, 2162), 'pygame.draw.line', 'pygame.draw.line', (['self.gameDisplay', 'self.grid_lines', '(hurdle.x, hurdle.y + y * hurdle.width)', '(hurdle.x + hurdle.width, hurdle.y + y * hurdle.width)'], {}), '(self.gameDisplay, self.grid_lines, (hurdle.x, hurdle.y + y *\n hurdle.width), (hurdle.x + hurdle.width, hurdle.y + y * hurdle.width))\n', (2026, 2162), False, 'import pygame\n'), ((2206, 2346), 'pygame.draw.circle', 'pygame.draw.circle', (['self.gameDisplay', '(220, 50, 50)', '(hurdle.x + hurdle.width // 2, hurdle.y + y * hurdle.width + hurdle.width // 2)', '(3)'], {}), '(self.gameDisplay, (220, 50, 50), (hurdle.x + hurdle.\n width // 2, hurdle.y + y * hurdle.width + hurdle.width // 2), 3)\n', (2224, 2346), False, 'import pygame\n'), ((2357, 2499), 'pygame.draw.circle', 'pygame.draw.circle', (['self.gameDisplay', '(220, 239, 0)', '(hurdle.x + +hurdle.width // 2, hurdle.y + y * hurdle.width + +hurdle.width //\n 2)', '(1)'], {}), '(self.gameDisplay, (220, 239, 0), (hurdle.x + +hurdle.\n width // 2, hurdle.y + y * hurdle.width + +hurdle.width // 2), 1)\n', (2375, 2499), False, 'import pygame\n'), ((4672, 4685), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (4683, 4685), False, 'import pygame\n'), ((8210, 8223), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (8221, 8223), False, 'import pygame\n'), ((12222, 12235), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (12233, 12235), False, 'import pygame\n'), ((16483, 16496), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (16494, 16496), False, 'import pygame\n'), ((13484, 13594), 'numpy.sqrt', 'np.sqrt', (['((self.end_pos[0] * 10 - neighbour[0] * 10) ** 2 + (self.end_pos[1] * 10 - \n neighbour[1] * 10) ** 2)'], {}), '((self.end_pos[0] * 10 - neighbour[0] * 10) ** 2 + (self.end_pos[1] *\n 10 - neighbour[1] * 10) ** 2)\n', (13491, 13594), True, 'import numpy as np\n')]
|
"""Sample program that runs a sweep and records results."""
from pathlib import Path
from typing import Sequence
import numpy as np
from absl import app
from absl import flags
from differential_value_iteration import utils
from differential_value_iteration.algorithms import algorithms
from differential_value_iteration.environments import garet
from differential_value_iteration.environments import micro
FLAGS = flags.FLAGS
flags.DEFINE_string(name='plot_dir', default='plots', help='path to plot dir')
flags.DEFINE_integer('max_iters', 100000, 'Maximum iterations per algorithm.')
flags.DEFINE_float('epsilon', 1e-7, 'Tolerance for convergence.')
flags.DEFINE_bool('mrp', True, 'Run mrp experiments.')
flags.DEFINE_bool('mdp', True, 'Run mdp experiments.')
def main(argv):
del argv
alphas = [1.0, 0.999, 0.99, 0.9, 0.7, 0.5, 0.3, 0.1, 0.01, 0.001]
betas = [1.0, 0.999, 0.99, 0.9, 0.7, 0.5, 0.3, 0.1, 0.01, 0.001]
max_iters = FLAGS.max_iters
epsilon = FLAGS.epsilon
plot_dir = FLAGS.plot_dir
if plot_dir[-1] != '/':
plot_dir += '/'
Path(plot_dir).mkdir(parents=True, exist_ok=True)
if FLAGS.mrp:
run_mrps(alphas=alphas,
betas=betas,
max_iters=max_iters,
epsilon=epsilon,
plot_dir=plot_dir)
if FLAGS.mdp:
run_mdps(alphas=alphas,
betas=betas,
max_iters=max_iters,
epsilon=epsilon,
plot_dir=plot_dir)
def run_mrps(
alphas: Sequence[float],
betas: Sequence[float],
max_iters: int,
epsilon: float,
plot_dir: str):
envs = [
micro.create_mrp1(dtype=np.float32),
micro.create_mrp2(dtype=np.float32),
micro.create_mrp3(dtype=np.float32),
]
for env in envs:
init_v = np.zeros(env.num_states)
init_r_bar_scalar = 0
init_r_bar_vec = np.zeros(env.num_states)
results = exp_RVI_Evaluation(env, 'exec_sync', alphas, init_v, max_iters,
epsilon, ref_idx=0)
utils.draw(results, plot_dir + env.name + '_RVI_Evaluation_sync', alphas)
results = exp_RVI_Evaluation(env, 'exec_async', alphas, init_v, max_iters,
epsilon, ref_idx=0)
utils.draw(results, plot_dir + env.name + '_RVI_Evaluation_async', alphas)
results = exp_DVI_Evaluation(env, 'exec_sync', alphas, betas, init_v,
init_r_bar_scalar, max_iters, epsilon)
utils.draw(results, plot_dir + env.name + '_DVI_Evaluation_sync', alphas,
betas)
results = exp_DVI_Evaluation(env, 'exec_async', alphas, betas, init_v,
init_r_bar_scalar, max_iters, epsilon)
utils.draw(results, plot_dir + env.name + '_DVI_Evaluation_async', alphas,
betas)
results = exp_MDVI_Evaluation(env, 'exec_sync', alphas, betas, init_v,
init_r_bar_vec, max_iters, epsilon)
utils.draw(results, plot_dir + env.name + '_MDVI_Evaluation_sync', alphas,
betas)
results = exp_MDVI_Evaluation(env, 'exec_async', alphas, betas, init_v,
init_r_bar_vec, max_iters, epsilon)
utils.draw(results, plot_dir + env.name + '_MDVI_Evaluation_async',
alphas,
betas)
def run_mdps(alphas: Sequence[float], betas: Sequence[float], max_iters: int,
epsilon: float, plot_dir: str):
garet_env = garet.create(seed=42,
num_states=10,
num_actions=2,
branching_factor=3)
envs = [garet_env, micro.mdp2]
for env in envs:
init_v = np.zeros(env.num_states)
init_r_bar_scalar = 0
init_r_bar_vec = np.zeros(env.num_states)
results = exp_RVI_Control(env, 'exec_sync', alphas, init_v, max_iters,
epsilon, ref_idx=0)
utils.draw(results, plot_dir + env.name + '_RVI_Control_sync', alphas)
results = exp_RVI_Control(env, 'exec_async', alphas, init_v, max_iters,
epsilon, ref_idx=0)
utils.draw(results, plot_dir + env.name + '_RVI_Control_async', alphas)
results = exp_DVI_Control(env, 'exec_sync', alphas, betas, init_v,
init_r_bar_scalar, max_iters, epsilon)
utils.draw(results, plot_dir + env.name + '_DVI_Control_sync', alphas,
betas)
results = exp_DVI_Control(env, 'exec_async', alphas, betas, init_v,
init_r_bar_scalar, max_iters, epsilon)
utils.draw(results, plot_dir + env.name + '_DVI_Control_async', alphas,
betas)
results = exp_MDVI_Control1(env, 'exec_sync', alphas, betas, init_v,
init_r_bar_vec, max_iters, epsilon)
utils.draw(results, plot_dir + env.name + '_MDVI_Control1_sync', alphas,
betas)
results = exp_MDVI_Control1(env, 'exec_async', alphas, betas, init_v,
init_r_bar_vec, max_iters, epsilon)
utils.draw(results, plot_dir + env.name + '_MDVI_Control1_async', alphas,
betas)
results = exp_MDVI_Control2(env, 'exec_sync', alphas, betas, init_v,
init_r_bar_vec, max_iters, epsilon)
utils.draw(results, plot_dir + env.name + '_MDVI_Control2_sync', alphas,
betas)
results = exp_MDVI_Control2(env, 'exec_async', alphas, betas, init_v,
init_r_bar_vec, max_iters, epsilon)
utils.draw(results, plot_dir + env.name + '_MDVI_Control2_async', alphas,
betas)
def exp_RVI_Evaluation(env, update_rule, alphas, init_v, max_iters, epsilon,
ref_idx=0):
convergence_flags = np.zeros(len(alphas))
for alpha_idx, alpha in enumerate(alphas):
alg = algorithms.RVI_Evaluation(env, init_v, alpha, ref_idx)
print(f'{env.name} RVI Evaluation {update_rule} alpha:{alpha}', end=' ')
convergence = utils.run_alg(alg, update_rule, max_iters, epsilon)
print(f'Converged? {convergence}')
convergence_flags[alpha_idx] = convergence
return convergence_flags
def exp_RVI_Control(env, update_rule, alphas, init_v, max_iters, epsilon,
ref_idx=0):
convergence_flags = np.zeros(len(alphas))
for alpha_idx, alpha in enumerate(alphas):
alg = algorithms.RVI_Control(env, init_v, alpha, ref_idx)
print(f'{env.name} RVI Control {update_rule} alpha:{alpha}', end=' ')
convergence = utils.run_alg(alg, update_rule, max_iters, epsilon)
print(f'Converged? {convergence}')
convergence_flags[alpha_idx] = convergence
return convergence_flags
def exp_DVI_Evaluation(env, update_rule, alphas, betas, init_v, init_r_bar,
max_iters, epsilon):
convergence_flags = np.zeros((len(alphas), len(betas)))
for alpha_idx, alpha in enumerate(alphas):
for beta_idx, beta in enumerate(betas):
alg = algorithms.DVI_Evaluation(env, init_v, init_r_bar, alpha, beta)
print(
f'{env.name} DVI Evaluation {update_rule} alpha:{alpha} beta:{beta}',
end=' ')
convergence = utils.run_alg(alg, update_rule, max_iters, epsilon)
print(f'Converged? {convergence}')
convergence_flags[alpha_idx, beta_idx] = convergence
return convergence_flags
def exp_DVI_Control(env, update_rule, alphas, betas, init_v, init_r_bar,
max_iters, epsilon):
convergence_flags = np.zeros((len(alphas), len(betas)))
for alpha_idx, alpha in enumerate(alphas):
for beta_idx, beta in enumerate(betas):
alg = algorithms.DVI_Control(env, init_v, init_r_bar, alpha, beta)
print(f'{env.name} DVI Control {update_rule} alpha:{alpha} beta:{beta}',
end=' ')
convergence = utils.run_alg(alg, update_rule, max_iters, epsilon)
print(f'Converged? {convergence}')
convergence_flags[alpha_idx, beta_idx] = convergence
return convergence_flags
def exp_MDVI_Evaluation(env, update_rule, alphas, betas, init_v, init_r_bar,
max_iters, epsilon):
convergence_flags = np.zeros((len(alphas), len(betas)))
for alpha_idx, alpha in enumerate(alphas):
for beta_idx, beta in enumerate(betas):
alg = algorithms.MDVI_Evaluation(env, init_v, init_r_bar, alpha, beta)
print(
f'{env.name} MDVI Evaluation {update_rule} alpha:{alpha} beta:{beta}',
end=' ')
convergence = utils.run_alg(alg, update_rule, max_iters, epsilon)
print(f'Converged? {convergence}')
convergence_flags[alpha_idx, beta_idx] = convergence
return convergence_flags
def exp_MDVI_Control1(env, update_rule, alphas, betas, init_v, init_r_bar,
max_iters, epsilon):
convergence_flags = np.zeros((len(alphas), len(betas)))
for alpha_idx, alpha in enumerate(alphas):
for beta_idx, beta in enumerate(betas):
alg = algorithms.MDVI_Control1(env, init_v, init_r_bar, alpha, beta)
print(f'{env.name} MDVI Control1 {update_rule} alpha:{alpha} beta:{beta}',
end=' ')
convergence = utils.run_alg(alg, update_rule, max_iters, epsilon)
print(f'Converged? {convergence}')
convergence_flags[alpha_idx, beta_idx] = convergence
return convergence_flags
def exp_MDVI_Control2(env, update_rule, alphas, betas, init_v, init_r_bar,
max_iters, epsilon):
convergence_flags = np.zeros((len(alphas), len(betas)))
for alpha_idx, alpha in enumerate(alphas):
for beta_idx, beta in enumerate(betas):
alg = algorithms.MDVI_Control2(env, init_v, init_r_bar, alpha, beta)
print(f'{env.name} MDVI Control2 {update_rule} alpha:{alpha} beta:{beta}',
end=' ')
convergence = utils.run_alg(alg, update_rule, max_iters, epsilon)
print(f'Converged? {convergence}')
convergence_flags[alpha_idx, beta_idx] = convergence
return convergence_flags
if __name__ == '__main__':
app.run(main)
|
[
"differential_value_iteration.environments.micro.create_mrp1",
"differential_value_iteration.environments.micro.create_mrp2",
"differential_value_iteration.algorithms.algorithms.MDVI_Evaluation",
"pathlib.Path",
"differential_value_iteration.utils.run_alg",
"absl.flags.DEFINE_bool",
"differential_value_iteration.environments.garet.create",
"differential_value_iteration.algorithms.algorithms.MDVI_Control2",
"differential_value_iteration.algorithms.algorithms.DVI_Control",
"differential_value_iteration.algorithms.algorithms.MDVI_Control1",
"absl.flags.DEFINE_integer",
"absl.flags.DEFINE_float",
"differential_value_iteration.algorithms.algorithms.RVI_Evaluation",
"differential_value_iteration.utils.draw",
"differential_value_iteration.algorithms.algorithms.RVI_Control",
"differential_value_iteration.algorithms.algorithms.DVI_Evaluation",
"numpy.zeros",
"absl.flags.DEFINE_string",
"absl.app.run",
"differential_value_iteration.environments.micro.create_mrp3"
] |
[((428, 506), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', ([], {'name': '"""plot_dir"""', 'default': '"""plots"""', 'help': '"""path to plot dir"""'}), "(name='plot_dir', default='plots', help='path to plot dir')\n", (447, 506), False, 'from absl import flags\n'), ((507, 585), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""max_iters"""', '(100000)', '"""Maximum iterations per algorithm."""'], {}), "('max_iters', 100000, 'Maximum iterations per algorithm.')\n", (527, 585), False, 'from absl import flags\n'), ((586, 652), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""epsilon"""', '(1e-07)', '"""Tolerance for convergence."""'], {}), "('epsilon', 1e-07, 'Tolerance for convergence.')\n", (604, 652), False, 'from absl import flags\n'), ((652, 706), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""mrp"""', '(True)', '"""Run mrp experiments."""'], {}), "('mrp', True, 'Run mrp experiments.')\n", (669, 706), False, 'from absl import flags\n'), ((707, 761), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""mdp"""', '(True)', '"""Run mdp experiments."""'], {}), "('mdp', True, 'Run mdp experiments.')\n", (724, 761), False, 'from absl import flags\n'), ((3400, 3471), 'differential_value_iteration.environments.garet.create', 'garet.create', ([], {'seed': '(42)', 'num_states': '(10)', 'num_actions': '(2)', 'branching_factor': '(3)'}), '(seed=42, num_states=10, num_actions=2, branching_factor=3)\n', (3412, 3471), False, 'from differential_value_iteration.environments import garet\n'), ((9743, 9756), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (9750, 9756), False, 'from absl import app\n'), ((1591, 1626), 'differential_value_iteration.environments.micro.create_mrp1', 'micro.create_mrp1', ([], {'dtype': 'np.float32'}), '(dtype=np.float32)\n', (1608, 1626), False, 'from differential_value_iteration.environments import micro\n'), ((1634, 1669), 'differential_value_iteration.environments.micro.create_mrp2', 'micro.create_mrp2', ([], {'dtype': 'np.float32'}), '(dtype=np.float32)\n', (1651, 1669), False, 'from differential_value_iteration.environments import micro\n'), ((1677, 1712), 'differential_value_iteration.environments.micro.create_mrp3', 'micro.create_mrp3', ([], {'dtype': 'np.float32'}), '(dtype=np.float32)\n', (1694, 1712), False, 'from differential_value_iteration.environments import micro\n'), ((1750, 1774), 'numpy.zeros', 'np.zeros', (['env.num_states'], {}), '(env.num_states)\n', (1758, 1774), True, 'import numpy as np\n'), ((1822, 1846), 'numpy.zeros', 'np.zeros', (['env.num_states'], {}), '(env.num_states)\n', (1830, 1846), True, 'import numpy as np\n'), ((1982, 2055), 'differential_value_iteration.utils.draw', 'utils.draw', (['results', "(plot_dir + env.name + '_RVI_Evaluation_sync')", 'alphas'], {}), "(results, plot_dir + env.name + '_RVI_Evaluation_sync', alphas)\n", (1992, 2055), False, 'from differential_value_iteration import utils\n'), ((2192, 2266), 'differential_value_iteration.utils.draw', 'utils.draw', (['results', "(plot_dir + env.name + '_RVI_Evaluation_async')", 'alphas'], {}), "(results, plot_dir + env.name + '_RVI_Evaluation_async', alphas)\n", (2202, 2266), False, 'from differential_value_iteration import utils\n'), ((2417, 2502), 'differential_value_iteration.utils.draw', 'utils.draw', (['results', "(plot_dir + env.name + '_DVI_Evaluation_sync')", 'alphas', 'betas'], {}), "(results, plot_dir + env.name + '_DVI_Evaluation_sync', alphas, betas\n )\n", (2427, 2502), False, 'from differential_value_iteration import utils\n'), ((2664, 2749), 'differential_value_iteration.utils.draw', 'utils.draw', (['results', "(plot_dir + env.name + '_DVI_Evaluation_async')", 'alphas', 'betas'], {}), "(results, plot_dir + env.name + '_DVI_Evaluation_async', alphas,\n betas)\n", (2674, 2749), False, 'from differential_value_iteration import utils\n'), ((2910, 2995), 'differential_value_iteration.utils.draw', 'utils.draw', (['results', "(plot_dir + env.name + '_MDVI_Evaluation_sync')", 'alphas', 'betas'], {}), "(results, plot_dir + env.name + '_MDVI_Evaluation_sync', alphas,\n betas)\n", (2920, 2995), False, 'from differential_value_iteration import utils\n'), ((3157, 3243), 'differential_value_iteration.utils.draw', 'utils.draw', (['results', "(plot_dir + env.name + '_MDVI_Evaluation_async')", 'alphas', 'betas'], {}), "(results, plot_dir + env.name + '_MDVI_Evaluation_async', alphas,\n betas)\n", (3167, 3243), False, 'from differential_value_iteration import utils\n'), ((3618, 3642), 'numpy.zeros', 'np.zeros', (['env.num_states'], {}), '(env.num_states)\n', (3626, 3642), True, 'import numpy as np\n'), ((3690, 3714), 'numpy.zeros', 'np.zeros', (['env.num_states'], {}), '(env.num_states)\n', (3698, 3714), True, 'import numpy as np\n'), ((3844, 3914), 'differential_value_iteration.utils.draw', 'utils.draw', (['results', "(plot_dir + env.name + '_RVI_Control_sync')", 'alphas'], {}), "(results, plot_dir + env.name + '_RVI_Control_sync', alphas)\n", (3854, 3914), False, 'from differential_value_iteration import utils\n'), ((4045, 4116), 'differential_value_iteration.utils.draw', 'utils.draw', (['results', "(plot_dir + env.name + '_RVI_Control_async')", 'alphas'], {}), "(results, plot_dir + env.name + '_RVI_Control_async', alphas)\n", (4055, 4116), False, 'from differential_value_iteration import utils\n'), ((4261, 4338), 'differential_value_iteration.utils.draw', 'utils.draw', (['results', "(plot_dir + env.name + '_DVI_Control_sync')", 'alphas', 'betas'], {}), "(results, plot_dir + env.name + '_DVI_Control_sync', alphas, betas)\n", (4271, 4338), False, 'from differential_value_iteration import utils\n'), ((4499, 4577), 'differential_value_iteration.utils.draw', 'utils.draw', (['results', "(plot_dir + env.name + '_DVI_Control_async')", 'alphas', 'betas'], {}), "(results, plot_dir + env.name + '_DVI_Control_async', alphas, betas)\n", (4509, 4577), False, 'from differential_value_iteration import utils\n'), ((4738, 4817), 'differential_value_iteration.utils.draw', 'utils.draw', (['results', "(plot_dir + env.name + '_MDVI_Control1_sync')", 'alphas', 'betas'], {}), "(results, plot_dir + env.name + '_MDVI_Control1_sync', alphas, betas)\n", (4748, 4817), False, 'from differential_value_iteration import utils\n'), ((4979, 5064), 'differential_value_iteration.utils.draw', 'utils.draw', (['results', "(plot_dir + env.name + '_MDVI_Control1_async')", 'alphas', 'betas'], {}), "(results, plot_dir + env.name + '_MDVI_Control1_async', alphas, betas\n )\n", (4989, 5064), False, 'from differential_value_iteration import utils\n'), ((5220, 5299), 'differential_value_iteration.utils.draw', 'utils.draw', (['results', "(plot_dir + env.name + '_MDVI_Control2_sync')", 'alphas', 'betas'], {}), "(results, plot_dir + env.name + '_MDVI_Control2_sync', alphas, betas)\n", (5230, 5299), False, 'from differential_value_iteration import utils\n'), ((5461, 5546), 'differential_value_iteration.utils.draw', 'utils.draw', (['results', "(plot_dir + env.name + '_MDVI_Control2_async')", 'alphas', 'betas'], {}), "(results, plot_dir + env.name + '_MDVI_Control2_async', alphas, betas\n )\n", (5471, 5546), False, 'from differential_value_iteration import utils\n'), ((5751, 5805), 'differential_value_iteration.algorithms.algorithms.RVI_Evaluation', 'algorithms.RVI_Evaluation', (['env', 'init_v', 'alpha', 'ref_idx'], {}), '(env, init_v, alpha, ref_idx)\n', (5776, 5805), False, 'from differential_value_iteration.algorithms import algorithms\n'), ((5901, 5952), 'differential_value_iteration.utils.run_alg', 'utils.run_alg', (['alg', 'update_rule', 'max_iters', 'epsilon'], {}), '(alg, update_rule, max_iters, epsilon)\n', (5914, 5952), False, 'from differential_value_iteration import utils\n'), ((6257, 6308), 'differential_value_iteration.algorithms.algorithms.RVI_Control', 'algorithms.RVI_Control', (['env', 'init_v', 'alpha', 'ref_idx'], {}), '(env, init_v, alpha, ref_idx)\n', (6279, 6308), False, 'from differential_value_iteration.algorithms import algorithms\n'), ((6401, 6452), 'differential_value_iteration.utils.run_alg', 'utils.run_alg', (['alg', 'update_rule', 'max_iters', 'epsilon'], {}), '(alg, update_rule, max_iters, epsilon)\n', (6414, 6452), False, 'from differential_value_iteration import utils\n'), ((1059, 1073), 'pathlib.Path', 'Path', (['plot_dir'], {}), '(plot_dir)\n', (1063, 1073), False, 'from pathlib import Path\n'), ((6828, 6891), 'differential_value_iteration.algorithms.algorithms.DVI_Evaluation', 'algorithms.DVI_Evaluation', (['env', 'init_v', 'init_r_bar', 'alpha', 'beta'], {}), '(env, init_v, init_r_bar, alpha, beta)\n', (6853, 6891), False, 'from differential_value_iteration.algorithms import algorithms\n'), ((7024, 7075), 'differential_value_iteration.utils.run_alg', 'utils.run_alg', (['alg', 'update_rule', 'max_iters', 'epsilon'], {}), '(alg, update_rule, max_iters, epsilon)\n', (7037, 7075), False, 'from differential_value_iteration import utils\n'), ((7462, 7522), 'differential_value_iteration.algorithms.algorithms.DVI_Control', 'algorithms.DVI_Control', (['env', 'init_v', 'init_r_bar', 'alpha', 'beta'], {}), '(env, init_v, init_r_bar, alpha, beta)\n', (7484, 7522), False, 'from differential_value_iteration.algorithms import algorithms\n'), ((7643, 7694), 'differential_value_iteration.utils.run_alg', 'utils.run_alg', (['alg', 'update_rule', 'max_iters', 'epsilon'], {}), '(alg, update_rule, max_iters, epsilon)\n', (7656, 7694), False, 'from differential_value_iteration import utils\n'), ((8085, 8149), 'differential_value_iteration.algorithms.algorithms.MDVI_Evaluation', 'algorithms.MDVI_Evaluation', (['env', 'init_v', 'init_r_bar', 'alpha', 'beta'], {}), '(env, init_v, init_r_bar, alpha, beta)\n', (8111, 8149), False, 'from differential_value_iteration.algorithms import algorithms\n'), ((8283, 8334), 'differential_value_iteration.utils.run_alg', 'utils.run_alg', (['alg', 'update_rule', 'max_iters', 'epsilon'], {}), '(alg, update_rule, max_iters, epsilon)\n', (8296, 8334), False, 'from differential_value_iteration import utils\n'), ((8723, 8785), 'differential_value_iteration.algorithms.algorithms.MDVI_Control1', 'algorithms.MDVI_Control1', (['env', 'init_v', 'init_r_bar', 'alpha', 'beta'], {}), '(env, init_v, init_r_bar, alpha, beta)\n', (8747, 8785), False, 'from differential_value_iteration.algorithms import algorithms\n'), ((8908, 8959), 'differential_value_iteration.utils.run_alg', 'utils.run_alg', (['alg', 'update_rule', 'max_iters', 'epsilon'], {}), '(alg, update_rule, max_iters, epsilon)\n', (8921, 8959), False, 'from differential_value_iteration import utils\n'), ((9348, 9410), 'differential_value_iteration.algorithms.algorithms.MDVI_Control2', 'algorithms.MDVI_Control2', (['env', 'init_v', 'init_r_bar', 'alpha', 'beta'], {}), '(env, init_v, init_r_bar, alpha, beta)\n', (9372, 9410), False, 'from differential_value_iteration.algorithms import algorithms\n'), ((9533, 9584), 'differential_value_iteration.utils.run_alg', 'utils.run_alg', (['alg', 'update_rule', 'max_iters', 'epsilon'], {}), '(alg, update_rule, max_iters, epsilon)\n', (9546, 9584), False, 'from differential_value_iteration import utils\n')]
|
import fastNLP as FN
import argparse
import os
import random
import numpy
import torch
def get_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('--lr', type=float, required=True)
parser.add_argument('--w_decay', type=float, required=True)
parser.add_argument('--lr_decay', type=float, required=True)
parser.add_argument('--bsz', type=int, required=True)
parser.add_argument('--ep', type=int, required=True)
parser.add_argument('--drop', type=float, required=True)
parser.add_argument('--gpu', type=str, required=True)
parser.add_argument('--log', type=str, default=None)
return parser
def add_model_args(parser):
parser.add_argument('--nhead', type=int, default=6)
parser.add_argument('--hdim', type=int, default=50)
parser.add_argument('--hidden', type=int, default=300)
return parser
def set_gpu(gpu_str):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_str
def set_rng_seeds(seed=None):
if seed is None:
seed = numpy.random.randint(0, 65536)
random.seed(seed)
numpy.random.seed(seed)
torch.random.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# print('RNG_SEED {}'.format(seed))
return seed
class TensorboardCallback(FN.Callback):
"""
接受以下一个或多个字符串作为参数:
- "model"
- "loss"
- "metric"
"""
def __init__(self, *options):
super(TensorboardCallback, self).__init__()
args = {"model", "loss", "metric"}
for opt in options:
if opt not in args:
raise ValueError(
"Unrecognized argument {}. Expect one of {}".format(opt, args))
self.options = options
self._summary_writer = None
self.graph_added = False
def on_train_begin(self):
save_dir = self.trainer.save_path
if save_dir is None:
path = os.path.join(
"./", 'tensorboard_logs_{}'.format(self.trainer.start_time))
else:
path = os.path.join(
save_dir, 'tensorboard_logs_{}'.format(self.trainer.start_time))
self._summary_writer = SummaryWriter(path)
def on_batch_begin(self, batch_x, batch_y, indices):
if "model" in self.options and self.graph_added is False:
# tesorboardX 这里有大bug,暂时没法画模型图
# from fastNLP.core.utils import _build_args
# inputs = _build_args(self.trainer.model, **batch_x)
# args = tuple([value for value in inputs.values()])
# args = args[0] if len(args) == 1 else args
# self._summary_writer.add_graph(self.trainer.model, torch.zeros(32, 2))
self.graph_added = True
def on_backward_begin(self, loss):
if "loss" in self.options:
self._summary_writer.add_scalar(
"loss", loss.item(), global_step=self.trainer.step)
if "model" in self.options:
for name, param in self.trainer.model.named_parameters():
if param.requires_grad:
self._summary_writer.add_scalar(
name + "_mean", param.mean(), global_step=self.trainer.step)
# self._summary_writer.add_scalar(name + "_std", param.std(), global_step=self.trainer.step)
self._summary_writer.add_scalar(name + "_grad_mean", param.grad.mean(),
global_step=self.trainer.step)
def on_valid_end(self, eval_result, metric_key):
if "metric" in self.options:
for name, metric in eval_result.items():
for metric_key, metric_val in metric.items():
self._summary_writer.add_scalar("valid_{}_{}".format(name, metric_key), metric_val,
global_step=self.trainer.step)
def on_train_end(self):
self._summary_writer.close()
del self._summary_writer
def on_exception(self, exception):
if hasattr(self, "_summary_writer"):
self._summary_writer.close()
del self._summary_writer
|
[
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.random.manual_seed",
"torch.cuda.manual_seed_all",
"numpy.random.randint",
"random.seed"
] |
[((123, 148), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (146, 148), False, 'import argparse\n'), ((1092, 1109), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1103, 1109), False, 'import random\n'), ((1114, 1137), 'numpy.random.seed', 'numpy.random.seed', (['seed'], {}), '(seed)\n', (1131, 1137), False, 'import numpy\n'), ((1142, 1172), 'torch.random.manual_seed', 'torch.random.manual_seed', (['seed'], {}), '(seed)\n', (1166, 1172), False, 'import torch\n'), ((1177, 1209), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (1203, 1209), False, 'import torch\n'), ((1057, 1087), 'numpy.random.randint', 'numpy.random.randint', (['(0)', '(65536)'], {}), '(0, 65536)\n', (1077, 1087), False, 'import numpy\n')]
|
#!/usr/env/python python3
# -*- coding: utf-8 -*-
# @File : vad_util.py
# @Time : 2018/8/29 13:37
# @Software : PyCharm
import numpy as np
from math import log
import librosa
def mse(data):
return ((data ** 2).mean()) ** 0.5
def dBFS(data):
mse_data = mse(data)
if mse_data == 0.0:
return 0
max_possible_val = 2 ** 16 / 2
return 20 * log(mse_data / max_possible_val, 10)
def cut_wav(data, per_f=150):
num_f = int(len(data) / per_f)
data = data[:num_f * per_f]
data = data.reshape((num_f, per_f))
return data
def remove_silence(source_sound, common_sound, silence_threshold=140, chunk_size=148):
source_sounds = cut_wav(source_sound, chunk_size)
common_sounds = cut_wav(common_sound, chunk_size)
y = []
for i in range(common_sounds.shape[0]):
db = -dBFS(common_sounds[i, ...])
if db < silence_threshold:
y.append(source_sounds[i])
# print("db", i, db)
y = np.array(y)
y = y.flatten()
return y
def comman(sound):
abs_sound = np.abs(sound)
return sound / np.max(abs_sound)
if __name__ == '__main__':
wav_data, rate = librosa.load("BAC009S0908W0161.wav", sr=16000)
y = remove_silence(wav_data, wav_data, 139, 300)
librosa.output.write_wav("c.wav", y, sr=16000)
|
[
"numpy.abs",
"librosa.output.write_wav",
"numpy.max",
"numpy.array",
"librosa.load",
"math.log"
] |
[((973, 984), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (981, 984), True, 'import numpy as np\n'), ((1055, 1068), 'numpy.abs', 'np.abs', (['sound'], {}), '(sound)\n', (1061, 1068), True, 'import numpy as np\n'), ((1157, 1203), 'librosa.load', 'librosa.load', (['"""BAC009S0908W0161.wav"""'], {'sr': '(16000)'}), "('BAC009S0908W0161.wav', sr=16000)\n", (1169, 1203), False, 'import librosa\n'), ((1262, 1308), 'librosa.output.write_wav', 'librosa.output.write_wav', (['"""c.wav"""', 'y'], {'sr': '(16000)'}), "('c.wav', y, sr=16000)\n", (1286, 1308), False, 'import librosa\n'), ((375, 411), 'math.log', 'log', (['(mse_data / max_possible_val)', '(10)'], {}), '(mse_data / max_possible_val, 10)\n', (378, 411), False, 'from math import log\n'), ((1088, 1105), 'numpy.max', 'np.max', (['abs_sound'], {}), '(abs_sound)\n', (1094, 1105), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""Demonstrations of setting up models and visualising outputs."""
from __future__ import division
__authors__ = '<NAME>'
__license__ = 'MIT'
import sys
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.animation import FuncAnimation
import numpy as np
from pompy import models, processors
DEFAULT_SEED = 20181108
def set_up_figure(fig_size=(10, 5)):
"""Set up Matplotlib figure with simulation time title text.
Parameters
----------
title_text : string
Text to set figure title to.
fig_size : tuple
Figure dimensions in inches in order `(width, height)`.
"""
fig, ax = plt.subplots(1, 1, figsize=fig_size)
title = ax.set_title('Simulation time = ---- seconds')
return fig, ax, title
def update_decorator(dt, title, steps_per_frame, models):
"""Decorator for animation update methods."""
def inner_decorator(update_function):
def wrapped_update(i):
for j in range(steps_per_frame):
for model in models:
model.update(dt)
t = i * steps_per_frame * dt
title.set_text('Simulation time = {0:.3f} seconds'.format(t))
return [title] + update_function(i)
return wrapped_update
return inner_decorator
def wind_model_demo(dt=0.01, t_max=100, steps_per_frame=20, seed=DEFAULT_SEED):
"""Set up wind model and animate velocity field with quiver plot.
Parameters
----------
dt : float
Simulation timestep.
t_max : float
End time to simulate to.
steps_per_frame: integer
Number of simulation time steps to perform between animation frames.
seed : integer
Seed for random number generator.
Returns
-------
fig : Figure
Matplotlib figure object.
ax : AxesSubplot
Matplotlib axis object.
anim : FuncAnimation
Matplotlib animation object.
"""
rng = np.random.RandomState(seed)
# define simulation region
wind_region = models.Rectangle(x_min=0., x_max=100., y_min=-25., y_max=25.)
# set up wind model
wind_model = models.WindModel(wind_region, 21, 11, rng=rng)
# let simulation run for 10s to equilibrate wind model
for t in np.arange(0, 10, dt):
wind_model.update(dt)
# generate figure and attach close event
fig, ax, title = set_up_figure()
# create quiver plot of initial velocity field
vf_plot = ax.quiver(wind_model.x_points, wind_model.y_points,
wind_model.velocity_field.T[0],
wind_model.velocity_field.T[1], width=0.003)
# expand axis limits to make vectors at boundary of field visible
ax.axis(ax.axis() + np.array([-0.25, 0.25, -0.25, 0.25]))
ax.set_xlabel('x-coordinate / m')
ax.set_ylabel('y-coordinate / m')
ax.set_aspect(1)
fig.tight_layout()
# define update function
@update_decorator(dt, title, steps_per_frame, [wind_model])
def update(i):
vf_plot.set_UVC(
wind_model.velocity_field.T[0], wind_model.velocity_field.T[1])
return [vf_plot]
# create animation object
n_frame = int(t_max / (dt * steps_per_frame) + 0.5)
anim = FuncAnimation(fig, update, n_frame, blit=True)
return fig, ax, anim
def plume_model_demo(dt=0.01, t_max=100, steps_per_frame=200,
seed=DEFAULT_SEED):
"""Set up plume model and animate puffs overlayed over velocity field.
Puff positions displayed using Matplotlib `scatter` plot function and
velocity field displayed using `quiver` plot function.
plot and quiver functions.
Parameters
----------
dt : float
Simulation timestep.
t_max : float
End time to simulate to.
steps_per_frame: integer
Number of simulation time steps to perform between animation frames.
seed : integer
Seed for random number generator.
Returns
-------
fig : Figure
Matplotlib figure object.
ax : AxesSubplot
Matplotlib axis object.
anim : FuncAnimation
Matplotlib animation object.
"""
rng = np.random.RandomState(seed)
# define simulation region
sim_region = models.Rectangle(x_min=0., x_max=100, y_min=-25., y_max=25.)
# set up wind model
wind_model = models.WindModel(sim_region, 21, 11, rng=rng)
# let simulation run for 10s to equilibrate wind model
for t in np.arange(0, 10, dt):
wind_model.update(dt)
# set up plume model
plume_model = models.PlumeModel(
sim_region, (5., 0., 0.), wind_model, rng=rng)
# set up figure window
fig, ax, title = set_up_figure()
# create quiver plot of initial velocity field
# quiver expects first array dimension (rows) to correspond to y-axis
# therefore need to transpose
vf_plot = plt.quiver(
wind_model.x_points, wind_model.y_points,
wind_model.velocity_field.T[0], wind_model.velocity_field.T[1],
width=0.003)
# expand axis limits to make vectors at boundary of field visible
ax.axis(ax.axis() + np.array([-0.25, 0.25, -0.25, 0.25]))
# draw initial puff positions with scatter plot
radius_mult = 200
pp_plot = plt.scatter(
plume_model.puff_array[:, 0], plume_model.puff_array[:, 1],
radius_mult * plume_model.puff_array[:, 3]**0.5, c='r',
edgecolors='none')
ax.set_xlabel('x-coordinate / m')
ax.set_ylabel('y-coordinate / m')
ax.set_aspect(1)
fig.tight_layout()
# define update function
@update_decorator(dt, title, steps_per_frame, [wind_model, plume_model])
def update(i):
# update velocity field quiver plot data
vf_plot.set_UVC(wind_model.velocity_field[:, :, 0].T,
wind_model.velocity_field[:, :, 1].T)
# update puff position scatter plot positions and sizes
pp_plot.set_offsets(plume_model.puff_array[:, :2])
pp_plot._sizes = radius_mult * plume_model.puff_array[:, 3]**0.5
return [vf_plot, pp_plot]
# create animation object
n_frame = int(t_max / (dt * steps_per_frame) + 0.5)
anim = FuncAnimation(fig, update, frames=n_frame, blit=True)
return fig, ax, anim
def conc_point_val_demo(dt=0.01, t_max=5, steps_per_frame=1, x=10., y=0.0,
seed=DEFAULT_SEED):
"""Set up plume model and animate concentration at a point as time series.
Demonstration of setting up plume model and processing the outputted
puff arrays with the ConcentrationPointValueCalculator class, the
resulting concentration time course at a point in the odour plume being
displayed with the Matplotlib `plot` function.
Parameters
----------
dt : float
Simulation timestep.
t_max : float
End time to simulate to.
steps_per_frame: integer
Number of simulation time steps to perform between animation frames.
x : float
x-coordinate of point to measure concentration at.
y : float
y-coordinate of point to measure concentration at.
seed : integer
Seed for random number generator.
Returns
-------
fig : Figure
Matplotlib figure object.
ax : AxesSubplot
Matplotlib axis object.
anim : FuncAnimation
Matplotlib animation object.
"""
rng = np.random.RandomState(seed)
# define simulation region
sim_region = models.Rectangle(x_min=0., x_max=100, y_min=-25., y_max=25.)
# set up wind model
wind_model = models.WindModel(sim_region, 21, 11, rng=rng)
# set up plume model
plume_model = models.PlumeModel(
sim_region, (5., 0., 0.), wind_model, rng=rng)
# let simulation run for 10s to initialise models
for t in np.arange(0, 10, dt):
wind_model.update(dt)
plume_model.update(dt)
# set up concentration point value calculator
val_calc = processors.ConcentrationValueCalculator(1.)
conc_vals = []
conc_vals.append(val_calc.calc_conc_point(plume_model.puff_array, x, y))
ts = [0.]
# set up figure
fig, ax, title = set_up_figure()
# display initial concentration field as image
conc_line, = plt.plot(ts, conc_vals)
ax.set_xlim(0., t_max)
ax.set_ylim(0., 150.)
ax.set_xlabel('Time / s')
ax.set_ylabel('Normalised concentration')
ax.grid(True)
fig.tight_layout()
# define update function
@update_decorator(dt, title, steps_per_frame, [wind_model, plume_model])
def update(i):
ts.append(dt * i * steps_per_frame)
conc_vals.append(
val_calc.calc_conc_point(plume_model.puff_array, x, y))
conc_line.set_data(ts, conc_vals)
return [conc_line]
# create animation object
n_frame = int(t_max / (dt * steps_per_frame) + 0.5)
anim = FuncAnimation(fig, update, frames=n_frame, blit=True)
return fig, ax, anim
def concentration_array_demo(dt=0.01, t_max=100, steps_per_frame=50,
seed=DEFAULT_SEED):
"""Set up plume model and animate concentration fields.
Demonstration of setting up plume model and processing the outputted
puff arrays with the `ConcentrationArrayGenerator` class, the resulting
arrays being displayed with the Matplotlib `imshow` function.
Parameters
----------
dt : float
Simulation timestep.
t_max : float
End time to simulate to.
steps_per_frame: integer
Number of simulation time steps to perform between animation frames.
seed : integer
Seed for random number generator.
Returns
-------
fig : Figure
Matplotlib figure object.
ax : AxesSubplot
Matplotlib axis object.
anim : FuncAnimation
Matplotlib animation object.
"""
rng = np.random.RandomState(seed)
# define simulation region
sim_region = models.Rectangle(x_min=0., x_max=100, y_min=-25., y_max=25.)
# set up wind model
wind_model = models.WindModel(sim_region, 21, 11, rng=rng)
# set up plume model
plume_model = models.PlumeModel(
sim_region, (5., 0., 0.), wind_model, rng=rng)
# let simulation run for 10s to initialise models
for t in np.arange(0, 10, dt):
wind_model.update(dt)
plume_model.update(dt)
# set up concentration array generator
array_gen = processors.ConcentrationArrayGenerator(
sim_region, 0.01, 500, 250, 1.)
# set up figure
fig, ax, title = set_up_figure()
# display initial concentration field as image
conc_array = array_gen.generate_single_array(plume_model.puff_array)
conc_im = plt.imshow(conc_array.T, extent=sim_region, cmap='Reds',
vmin=0., vmax=1.)
ax.set_xlabel('x-coordinate / m')
ax.set_ylabel('y-coordinate / m')
ax.set_aspect(1)
fig.tight_layout()
# define update function
@update_decorator(dt, title, steps_per_frame, [wind_model, plume_model])
def update(i):
conc_im.set_data(
array_gen.generate_single_array(plume_model.puff_array).T)
return [conc_im]
# create animation object
n_frame = int(t_max / (dt * steps_per_frame) + 0.5)
anim = FuncAnimation(fig, update, frames=n_frame, blit=True)
return fig, ax, anim
|
[
"matplotlib.pyplot.plot",
"pompy.processors.ConcentrationArrayGenerator",
"matplotlib.pyplot.scatter",
"pompy.processors.ConcentrationValueCalculator",
"matplotlib.pyplot.quiver",
"pompy.models.Rectangle",
"matplotlib.pyplot.imshow",
"numpy.random.RandomState",
"matplotlib.animation.FuncAnimation",
"numpy.arange",
"numpy.array",
"pompy.models.PlumeModel",
"matplotlib.pyplot.subplots",
"pompy.models.WindModel"
] |
[((672, 708), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': 'fig_size'}), '(1, 1, figsize=fig_size)\n', (684, 708), True, 'import matplotlib.pyplot as plt\n'), ((1970, 1997), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (1991, 1997), True, 'import numpy as np\n'), ((2047, 2112), 'pompy.models.Rectangle', 'models.Rectangle', ([], {'x_min': '(0.0)', 'x_max': '(100.0)', 'y_min': '(-25.0)', 'y_max': '(25.0)'}), '(x_min=0.0, x_max=100.0, y_min=-25.0, y_max=25.0)\n', (2063, 2112), False, 'from pompy import models, processors\n'), ((2150, 2196), 'pompy.models.WindModel', 'models.WindModel', (['wind_region', '(21)', '(11)'], {'rng': 'rng'}), '(wind_region, 21, 11, rng=rng)\n', (2166, 2196), False, 'from pompy import models, processors\n'), ((2269, 2289), 'numpy.arange', 'np.arange', (['(0)', '(10)', 'dt'], {}), '(0, 10, dt)\n', (2278, 2289), True, 'import numpy as np\n'), ((3234, 3280), 'matplotlib.animation.FuncAnimation', 'FuncAnimation', (['fig', 'update', 'n_frame'], {'blit': '(True)'}), '(fig, update, n_frame, blit=True)\n', (3247, 3280), False, 'from matplotlib.animation import FuncAnimation\n'), ((4153, 4180), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (4174, 4180), True, 'import numpy as np\n'), ((4229, 4292), 'pompy.models.Rectangle', 'models.Rectangle', ([], {'x_min': '(0.0)', 'x_max': '(100)', 'y_min': '(-25.0)', 'y_max': '(25.0)'}), '(x_min=0.0, x_max=100, y_min=-25.0, y_max=25.0)\n', (4245, 4292), False, 'from pompy import models, processors\n'), ((4331, 4376), 'pompy.models.WindModel', 'models.WindModel', (['sim_region', '(21)', '(11)'], {'rng': 'rng'}), '(sim_region, 21, 11, rng=rng)\n', (4347, 4376), False, 'from pompy import models, processors\n'), ((4449, 4469), 'numpy.arange', 'np.arange', (['(0)', '(10)', 'dt'], {}), '(0, 10, dt)\n', (4458, 4469), True, 'import numpy as np\n'), ((4544, 4611), 'pompy.models.PlumeModel', 'models.PlumeModel', (['sim_region', '(5.0, 0.0, 0.0)', 'wind_model'], {'rng': 'rng'}), '(sim_region, (5.0, 0.0, 0.0), wind_model, rng=rng)\n', (4561, 4611), False, 'from pompy import models, processors\n'), ((4855, 4989), 'matplotlib.pyplot.quiver', 'plt.quiver', (['wind_model.x_points', 'wind_model.y_points', 'wind_model.velocity_field.T[0]', 'wind_model.velocity_field.T[1]'], {'width': '(0.003)'}), '(wind_model.x_points, wind_model.y_points, wind_model.\n velocity_field.T[0], wind_model.velocity_field.T[1], width=0.003)\n', (4865, 4989), True, 'import matplotlib.pyplot as plt\n'), ((5230, 5388), 'matplotlib.pyplot.scatter', 'plt.scatter', (['plume_model.puff_array[:, 0]', 'plume_model.puff_array[:, 1]', '(radius_mult * plume_model.puff_array[:, 3] ** 0.5)'], {'c': '"""r"""', 'edgecolors': '"""none"""'}), "(plume_model.puff_array[:, 0], plume_model.puff_array[:, 1], \n radius_mult * plume_model.puff_array[:, 3] ** 0.5, c='r', edgecolors='none'\n )\n", (5241, 5388), True, 'import matplotlib.pyplot as plt\n'), ((6149, 6202), 'matplotlib.animation.FuncAnimation', 'FuncAnimation', (['fig', 'update'], {'frames': 'n_frame', 'blit': '(True)'}), '(fig, update, frames=n_frame, blit=True)\n', (6162, 6202), False, 'from matplotlib.animation import FuncAnimation\n'), ((7347, 7374), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (7368, 7374), True, 'import numpy as np\n'), ((7423, 7486), 'pompy.models.Rectangle', 'models.Rectangle', ([], {'x_min': '(0.0)', 'x_max': '(100)', 'y_min': '(-25.0)', 'y_max': '(25.0)'}), '(x_min=0.0, x_max=100, y_min=-25.0, y_max=25.0)\n', (7439, 7486), False, 'from pompy import models, processors\n'), ((7525, 7570), 'pompy.models.WindModel', 'models.WindModel', (['sim_region', '(21)', '(11)'], {'rng': 'rng'}), '(sim_region, 21, 11, rng=rng)\n', (7541, 7570), False, 'from pompy import models, processors\n'), ((7614, 7681), 'pompy.models.PlumeModel', 'models.PlumeModel', (['sim_region', '(5.0, 0.0, 0.0)', 'wind_model'], {'rng': 'rng'}), '(sim_region, (5.0, 0.0, 0.0), wind_model, rng=rng)\n', (7631, 7681), False, 'from pompy import models, processors\n'), ((7755, 7775), 'numpy.arange', 'np.arange', (['(0)', '(10)', 'dt'], {}), '(0, 10, dt)\n', (7764, 7775), True, 'import numpy as np\n'), ((7903, 7947), 'pompy.processors.ConcentrationValueCalculator', 'processors.ConcentrationValueCalculator', (['(1.0)'], {}), '(1.0)\n', (7942, 7947), False, 'from pompy import models, processors\n'), ((8182, 8205), 'matplotlib.pyplot.plot', 'plt.plot', (['ts', 'conc_vals'], {}), '(ts, conc_vals)\n', (8190, 8205), True, 'import matplotlib.pyplot as plt\n'), ((8807, 8860), 'matplotlib.animation.FuncAnimation', 'FuncAnimation', (['fig', 'update'], {'frames': 'n_frame', 'blit': '(True)'}), '(fig, update, frames=n_frame, blit=True)\n', (8820, 8860), False, 'from matplotlib.animation import FuncAnimation\n'), ((9784, 9811), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (9805, 9811), True, 'import numpy as np\n'), ((9860, 9923), 'pompy.models.Rectangle', 'models.Rectangle', ([], {'x_min': '(0.0)', 'x_max': '(100)', 'y_min': '(-25.0)', 'y_max': '(25.0)'}), '(x_min=0.0, x_max=100, y_min=-25.0, y_max=25.0)\n', (9876, 9923), False, 'from pompy import models, processors\n'), ((9962, 10007), 'pompy.models.WindModel', 'models.WindModel', (['sim_region', '(21)', '(11)'], {'rng': 'rng'}), '(sim_region, 21, 11, rng=rng)\n', (9978, 10007), False, 'from pompy import models, processors\n'), ((10051, 10118), 'pompy.models.PlumeModel', 'models.PlumeModel', (['sim_region', '(5.0, 0.0, 0.0)', 'wind_model'], {'rng': 'rng'}), '(sim_region, (5.0, 0.0, 0.0), wind_model, rng=rng)\n', (10068, 10118), False, 'from pompy import models, processors\n'), ((10192, 10212), 'numpy.arange', 'np.arange', (['(0)', '(10)', 'dt'], {}), '(0, 10, dt)\n', (10201, 10212), True, 'import numpy as np\n'), ((10334, 10405), 'pompy.processors.ConcentrationArrayGenerator', 'processors.ConcentrationArrayGenerator', (['sim_region', '(0.01)', '(500)', '(250)', '(1.0)'], {}), '(sim_region, 0.01, 500, 250, 1.0)\n', (10372, 10405), False, 'from pompy import models, processors\n'), ((10609, 10685), 'matplotlib.pyplot.imshow', 'plt.imshow', (['conc_array.T'], {'extent': 'sim_region', 'cmap': '"""Reds"""', 'vmin': '(0.0)', 'vmax': '(1.0)'}), "(conc_array.T, extent=sim_region, cmap='Reds', vmin=0.0, vmax=1.0)\n", (10619, 10685), True, 'import matplotlib.pyplot as plt\n'), ((11175, 11228), 'matplotlib.animation.FuncAnimation', 'FuncAnimation', (['fig', 'update'], {'frames': 'n_frame', 'blit': '(True)'}), '(fig, update, frames=n_frame, blit=True)\n', (11188, 11228), False, 'from matplotlib.animation import FuncAnimation\n'), ((2739, 2775), 'numpy.array', 'np.array', (['[-0.25, 0.25, -0.25, 0.25]'], {}), '([-0.25, 0.25, -0.25, 0.25])\n', (2747, 2775), True, 'import numpy as np\n'), ((5104, 5140), 'numpy.array', 'np.array', (['[-0.25, 0.25, -0.25, 0.25]'], {}), '([-0.25, 0.25, -0.25, 0.25])\n', (5112, 5140), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import os
from .leafs import leafs
print("Reloaded preprocessing!")
def normalize(dataset):
'''normalize data so that over all imeges the pixels on place (x/y) have mean = 0 and are standart distributed'''
# calculate the mean
mean=np.zeros(dataset[0].image.shape)
for lea in dataset:
mean=mean+lea.image
mean/=len(dataset)
#calculating the variance
var=np.zeros(dataset[0].image.shape)
for lea in dataset:
var=var+(lea.image-mean)**2
var/=len(dataset)
f=0.1
var=(var-f>=0)*(var-f)+f # caps the minimal
for lea in dataset:
lea.image=(lea.image-mean)/var
def createTrainingAndTestingList(directory, shuffle = True):
'''
Takes as Input the matrices from collectData and creates a training and a testing list'''
l_train = []
l_test = []
for n in range (7):
matrices = np.load(os.path.join(directory, str(n)+'.npy'))
for i in range(759): # 2x800 for training
l_train += [leafs.Leaf(i+n*1000, n, matrices[i]/255)]
for i in range(760,839): # 2x80 for testing
l_test += [leafs.Leaf(i+n*1000, n, matrices[i]/255)]
if shuffle:
np.random.shuffle(l_train)
np.random.shuffle(l_test)
return([l_train,l_test])
def collectData(root_path, save_path, cfactor, overwrite = False):
'''processes images from root_path one-by-one and save them in same directory
collect them tree by tree, set their labels and return a training and a testing list'''
sizeOfMatrixes = int(2000//cfactor)
#processing images to arrays one-by-one and save inplace
iid = 0
for (root, dirnames, filenames) in os.walk(root_path, topdown = True):
for f in filenames:
if f.endswith('.JPG'):
savepath = os.path.join(root, os.path.splitext(f)[0])
savepath += ('_' + str(sizeOfMatrixes) + 'x' + str(sizeOfMatrixes)) # for example + _50x50
if(not(os.path.isfile(savepath+'.npy')) or overwrite):
matriX = centr_cut_compress(os.path.join(root, f), cfactor)
np.save(savepath, matriX, allow_pickle=False)
iid += 1
# collecting all arrays from tree i into one big folder calld i.npy
for i in range (0,8):
tree_path = os.path.join(root_path, str(i))
tree_save_path = os.path.join(save_path, str(sizeOfMatrixes) + 'x' + str(sizeOfMatrixes) ,str(i))
leaf_list = []
for (root, dirnames, filenames) in os.walk(tree_path , topdown=True):
for f in filenames:
if f.endswith('_' + str(sizeOfMatrixes) + 'x' + str(sizeOfMatrixes) + '.npy'):
leaf_list.append(np.load(os.path.join(root, f)))
leaf_array = np.array(leaf_list)
np.save(tree_save_path, leaf_array, allow_pickle=False)
def desired_output(label):
res = -1 * np.ones((7,1,1))
res[label, 0, 0] = +1
return res
def centr_cut_compress(path, cfactor = 50, square_side = 2000, debug=False):
'''centers, cuts and compresses a picture
Input: path, compressionfactor = 50, squareside of new image= 2000, debug=False
Output: matrix that can be use as a CNN Input
'''
im = center_leaf(path, square_side)
new_shape = im.size[0] // cfactor
new_im = im.resize((new_shape, new_shape)) # makes the resolution smaller
matriz = np.array(new_im) # convert image to numpy matrix
matriz ^= 0xFF # invert matrix
oneD_matriz = matriz[:, :, 1] # only looking at one dimension, 1 = green
if debug:
print('Image “',path,'“ opened with size:',im.size,'and mode:',im.mode)
print('compressed the square-image with lenght :',
oneD_matriz.shape[0], ' with factor:', cfactor)
print('output matrix has shape:', oneD_matriz.shape)
plt.imshow(oneD_matriz)
plt.tight_layout()
plt.show()
return oneD_matriz
def center_leaf(path, square_side=2000):
'''
region we look at, because of the border we found with overlappingcenters a square on the leaf
input: path of image square_side of matriz thats cut away
output: cut image
ATTENTION: the cutting borders are fixed
'''
up = 500
down = 2900
left = 400
right = 4000
s = square_side // 2
im = Image.open(path).convert('RGB')
matriz = np.array(im) # convert image to numpy matrix
matriz ^= 0xFF # invert matrix
oneD_matriz = matriz[up:down,left:right,1] #only look at the green canal 1
indices = np.argwhere(oneD_matriz >= 180) # give all pixel cordinates where the value is higer than 179
meanx = np.average(indices[:,0]) + up
meany = np.average(indices[:,1]) + left
# select new area of the matrix, that is the input for CNN
box = (meany - s, meanx - s, meany + s , meanx + s)
new_image = im.crop(box) # crop is Pill function
im.close()
return new_image
def find_overlap(root_path):
'''function to overlap all pictures
creates a image of all overlayed pictures so the interesting area of the picture can manually be classified
the size of the imaage has to bee ajusted
'''
maximum = np.zeros((3456, 4608))
for root, dirs, files in os.walk(root_path, topdown=False):
for name in files:
im_path = (os.path.join(root, name))
if name[0] == 'I': #making sure its an image, because there are some other files in the directory
image = Image.open(im_path)
image.convert('RGB')
matriz = np.array(image)
maximum = np.maximum(maximum, matriz[:, :, 0])
maximum = np.maximum(maximum, matriz[:, :, 1])
maximum = np.maximum(maximum, matriz[:, :, 2])
image.close()
return maximum
|
[
"numpy.save",
"matplotlib.pyplot.show",
"numpy.average",
"os.path.join",
"numpy.maximum",
"matplotlib.pyplot.imshow",
"os.walk",
"numpy.zeros",
"numpy.ones",
"PIL.Image.open",
"os.path.isfile",
"numpy.array",
"os.path.splitext",
"numpy.argwhere",
"matplotlib.pyplot.tight_layout",
"numpy.random.shuffle"
] |
[((318, 350), 'numpy.zeros', 'np.zeros', (['dataset[0].image.shape'], {}), '(dataset[0].image.shape)\n', (326, 350), True, 'import numpy as np\n'), ((469, 501), 'numpy.zeros', 'np.zeros', (['dataset[0].image.shape'], {}), '(dataset[0].image.shape)\n', (477, 501), True, 'import numpy as np\n'), ((1774, 1806), 'os.walk', 'os.walk', (['root_path'], {'topdown': '(True)'}), '(root_path, topdown=True)\n', (1781, 1806), False, 'import os\n'), ((3510, 3526), 'numpy.array', 'np.array', (['new_im'], {}), '(new_im)\n', (3518, 3526), True, 'import numpy as np\n'), ((4492, 4504), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (4500, 4504), True, 'import numpy as np\n'), ((4676, 4707), 'numpy.argwhere', 'np.argwhere', (['(oneD_matriz >= 180)'], {}), '(oneD_matriz >= 180)\n', (4687, 4707), True, 'import numpy as np\n'), ((5321, 5343), 'numpy.zeros', 'np.zeros', (['(3456, 4608)'], {}), '((3456, 4608))\n', (5329, 5343), True, 'import numpy as np\n'), ((5374, 5407), 'os.walk', 'os.walk', (['root_path'], {'topdown': '(False)'}), '(root_path, topdown=False)\n', (5381, 5407), False, 'import os\n'), ((1263, 1289), 'numpy.random.shuffle', 'np.random.shuffle', (['l_train'], {}), '(l_train)\n', (1280, 1289), True, 'import numpy as np\n'), ((1298, 1323), 'numpy.random.shuffle', 'np.random.shuffle', (['l_test'], {}), '(l_test)\n', (1315, 1323), True, 'import numpy as np\n'), ((2631, 2663), 'os.walk', 'os.walk', (['tree_path'], {'topdown': '(True)'}), '(tree_path, topdown=True)\n', (2638, 2663), False, 'import os\n'), ((2885, 2904), 'numpy.array', 'np.array', (['leaf_list'], {}), '(leaf_list)\n', (2893, 2904), True, 'import numpy as np\n'), ((2913, 2968), 'numpy.save', 'np.save', (['tree_save_path', 'leaf_array'], {'allow_pickle': '(False)'}), '(tree_save_path, leaf_array, allow_pickle=False)\n', (2920, 2968), True, 'import numpy as np\n'), ((3012, 3030), 'numpy.ones', 'np.ones', (['(7, 1, 1)'], {}), '((7, 1, 1))\n', (3019, 3030), True, 'import numpy as np\n'), ((3959, 3982), 'matplotlib.pyplot.imshow', 'plt.imshow', (['oneD_matriz'], {}), '(oneD_matriz)\n', (3969, 3982), True, 'import matplotlib.pyplot as plt\n'), ((3991, 4009), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4007, 4009), True, 'import matplotlib.pyplot as plt\n'), ((4018, 4028), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4026, 4028), True, 'import matplotlib.pyplot as plt\n'), ((4782, 4807), 'numpy.average', 'np.average', (['indices[:, 0]'], {}), '(indices[:, 0])\n', (4792, 4807), True, 'import numpy as np\n'), ((4824, 4849), 'numpy.average', 'np.average', (['indices[:, 1]'], {}), '(indices[:, 1])\n', (4834, 4849), True, 'import numpy as np\n'), ((4447, 4463), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (4457, 4463), False, 'from PIL import Image\n'), ((5459, 5483), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (5471, 5483), False, 'import os\n'), ((5620, 5639), 'PIL.Image.open', 'Image.open', (['im_path'], {}), '(im_path)\n', (5630, 5639), False, 'from PIL import Image\n'), ((5702, 5717), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (5710, 5717), True, 'import numpy as np\n'), ((5761, 5797), 'numpy.maximum', 'np.maximum', (['maximum', 'matriz[:, :, 0]'], {}), '(maximum, matriz[:, :, 0])\n', (5771, 5797), True, 'import numpy as np\n'), ((5824, 5860), 'numpy.maximum', 'np.maximum', (['maximum', 'matriz[:, :, 1]'], {}), '(maximum, matriz[:, :, 1])\n', (5834, 5860), True, 'import numpy as np\n'), ((5887, 5923), 'numpy.maximum', 'np.maximum', (['maximum', 'matriz[:, :, 2]'], {}), '(maximum, matriz[:, :, 2])\n', (5897, 5923), True, 'import numpy as np\n'), ((2221, 2266), 'numpy.save', 'np.save', (['savepath', 'matriX'], {'allow_pickle': '(False)'}), '(savepath, matriX, allow_pickle=False)\n', (2228, 2266), True, 'import numpy as np\n'), ((1919, 1938), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (1935, 1938), False, 'import os\n'), ((2073, 2106), 'os.path.isfile', 'os.path.isfile', (["(savepath + '.npy')"], {}), "(savepath + '.npy')\n", (2087, 2106), False, 'import os\n'), ((2169, 2190), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (2181, 2190), False, 'import os\n'), ((2840, 2861), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (2852, 2861), False, 'import os\n')]
|
import random
import numpy as np
from utils import splitPoly
import matplotlib.patches as patches
import matplotlib.path as path
from matplotlib.transforms import Bbox
import cartopy.crs as ccrs
from spot import Spot
class Star:
# Stellar Radius in RSun, inclincation in degrees
# Limb darkening grid resolution (pixel*pixel grid)
# Rotation period in days
def __init__(self, params):
self.radius = params.rad_star
self.inc = params.sinc
self.res = params.res
self.period = params.prot
self.u = params.u
self.spots = None
self.initial_band = params.high_band
self.low_band = params.low_band
self.cycle = params.stellar_cycle
self.active_region = list(self.initial_band)
self.active_region_vel = [-(params.high_band[0]-params.low_band[0])/self.cycle, -(params.high_band[1] - params.low_band[1])/self.cycle]
self.params = params # Needed for new spot generation
# Create globe structure and set up initial projections
self.globe = ccrs.Globe(semimajor_axis=self.radius, semiminor_axis=self.radius, ellipse='sphere', flattening=1e-9)
self.rotated_proj = ccrs.RotatedPole(pole_longitude=180, pole_latitude=90-self.inc, central_rotated_longitude=0, globe=self.globe)
self.geodetic_proj = ccrs.Geodetic(globe=self.globe)
self.orth_proj = ccrs.Orthographic(globe=self.globe, central_latitude = self.inc, central_longitude=0)
# Visible surface
edge = 90
self.lon1, self.lat1, self.lon2, self.lat2 = -edge, -edge, edge, edge
# Circular grid for limb darkening formula scaled to unity
x = np.linspace(-1,1,self.res)
x, y = np.meshgrid(x,x)
self.grid = np.sqrt(x**2 + y**2)
self.greater_mask = np.ma.masked_greater(self.grid,1).mask
self.grid[self.greater_mask] = np.nan
self.totalGridSquares = self.res**2 - self.greater_mask.sum()
self.grid_x, self.grid_y = (x*self.radius, y*self.radius) # Re-scale grid back to given star radius
# Unspotted Flux
self.unspottedFlux = self.limbDarken()
self.totalUnspottedFlux = self.totalFlux(self.unspottedFlux)
# Spotted Flux
self.spottedFlux = None
self.totalSpottedFlux = None
# Apply quadratic limb darkening to model
def limbDarken(self):
mu = np.sqrt(1-self.grid**2)
mu_1 = 1-mu
u1 = self.u[0]
u2 = self.u[1]
unspottedFlux = 1-u1*mu_1-u2*(mu_1**2)
return unspottedFlux
# Add spots
def addSpots(self, spots):
self.spots = spots
self.spottedFlux = self.mapSpots()
self.totalSpottedFlux = self.totalFlux(self.spottedFlux)
# Life Cycle management
def update(self, cur_phase, t):
# Update projections
cur_long = 360*((cur_phase)%1)
self.updateProjections(cur_long)
# If spots, update them
if not self.spots == None:
self.updateSpots(t)
self.spottedFlux = self.mapSpots()
self.totalSpottedFlux = self.totalFlux(self.spottedFlux)
def updateProjections(self, cur_long):
# Calculte Projections based on current rotation
self.rotated_proj = ccrs.RotatedPole(pole_longitude=cur_long-180, pole_latitude=90-self.inc, central_rotated_longitude=0, globe=self.globe)
self.orth_proj = ccrs.Orthographic(globe=self.globe, central_latitude = self.inc, central_longitude=cur_long)
def updateSpots(self, t, dt=0):
# If no spots then ignore
if not self.spots == None:
# Update active latitudes first
if dt > 0: self.updateActiveRegion(dt)
# Update spots and remove if dead
doCull = []
for spot in self.spots:
if dt > 0: spot.update(self, t, dt)
if spot.dead: doCull.append(spot)
# Remove dead spots and replace
if len(doCull) > 0:
spotsToAddBack = len(doCull)
for obj in doCull:
self.spots.remove(obj)
for i in range(spotsToAddBack):
self.spots.append(Spot.gen_spot(self.params, self, t))
def updateActiveRegion(self, dt):
self.active_region[0] += dt*self.active_region_vel[0]
self.active_region[1] += dt*self.active_region_vel[1]
# Reset when lower than lower band limit
if self.active_region[0] < self.low_band[0] or self.active_region[1] < self.low_band[1]:
self.active_region = list(self.initial_band)
# Spot masking and mapping
def maskPixels(self, path):
XY = np.dstack((self.grid_x, self.grid_y))
XY_flat = XY.reshape((-1, 2))
mask_flat = path.contains_points(XY_flat)
mask = mask_flat.reshape(self.grid_x.shape)
return mask
def mapSpots(self):
# Create new flux array
spottedFlux = self.unspottedFlux*np.ones(self.unspottedFlux.shape)
# Map Spots
for i, spot in enumerate(self.spots):
# Get polygon
spotPoly = spot.poly
# Transform spot coords from Geodetic coord system to rotated projection
spot_vs = self.rotated_proj.transform_points(self.geodetic_proj, spotPoly.vertices[:,0], spotPoly.vertices[:,1])[:,0:2]
# Split poly to avoid issues at boundary
polys = splitPoly(spot_vs, 180)
for poly in polys:
# Get vertices of spot/tissot polygon
spot_vs = poly.get_xy()
# Mask in rotated projection (use mpl.Path.clip_to_bbox function)
spot_path = patches.Path(spot_vs).clip_to_bbox(Bbox([[self.lon1,self.lat1],[self.lon2,self.lat2]]))
# If spot in visible area calculate flux change
if len(spot_path.vertices):
# Transform masked path to orth projection as this is coordinate space LD grid is in
spot_vs = self.orth_proj.transform_points(self.rotated_proj, spot_path.vertices[:,0], spot_path.vertices[:,1])[:,0:2]
spot_path = patches.Path(spot_vs)
# Find pixels contained in mask and multiply by spot brightnesss
mask = self.maskPixels(spot_path)
spottedFlux[mask] = spottedFlux[mask]*spot.brightness
return spottedFlux
# Manage transit
def transit(self, planet, time, dt):
I = []
D = []
Time = []
planetPoly = patches.CirclePolygon((0,0),1,100)
while (planet.isTransiting(time)):
# Carry on now integrating planet across surface but don't rotate star
planetFlux = self.unspottedFlux*np.ones(self.unspottedFlux.shape) if self.spottedFlux is None else self.spottedFlux*np.ones(self.spottedFlux.shape)
# Find position of planet and scale to star's radius
X, Y = planet.skyPosAtTime(time)
planet_vx = self.radius*(planetPoly.get_path().vertices[:,0]*planet.rad + X)
planet_vy = self.radius*(planetPoly.get_path().vertices[:,1]*planet.rad + Y)
planet_path = path.Path(np.column_stack((planet_vx,planet_vy)))
# Find pixles contained within planet's disk and set to 0
mask = self.maskPixels(planet_path)
planetFlux[mask] = 0
totalTransitFlux = self.totalFlux(planetFlux)
I.append(totalTransitFlux)
if self.spots is None:
D.append(self.totalUnspottedFlux - totalTransitFlux)
else:
D.append(self.totalSpottedFlux - totalTransitFlux)
Time.append(time)
time += dt
return I, D, Time, time
# Helper func to sum over grid of flux values
def totalFlux(self, flx):
totalFlux = flx[~self.greater_mask].sum()/self.totalGridSquares
return totalFlux
|
[
"numpy.dstack",
"utils.splitPoly",
"matplotlib.patches.Path",
"cartopy.crs.RotatedPole",
"cartopy.crs.Geodetic",
"numpy.meshgrid",
"matplotlib.path.contains_points",
"matplotlib.transforms.Bbox",
"numpy.ma.masked_greater",
"numpy.column_stack",
"spot.Spot.gen_spot",
"numpy.ones",
"matplotlib.patches.CirclePolygon",
"numpy.linspace",
"cartopy.crs.Globe",
"cartopy.crs.Orthographic",
"numpy.sqrt"
] |
[((1059, 1166), 'cartopy.crs.Globe', 'ccrs.Globe', ([], {'semimajor_axis': 'self.radius', 'semiminor_axis': 'self.radius', 'ellipse': '"""sphere"""', 'flattening': '(1e-09)'}), "(semimajor_axis=self.radius, semiminor_axis=self.radius, ellipse=\n 'sphere', flattening=1e-09)\n", (1069, 1166), True, 'import cartopy.crs as ccrs\n'), ((1189, 1305), 'cartopy.crs.RotatedPole', 'ccrs.RotatedPole', ([], {'pole_longitude': '(180)', 'pole_latitude': '(90 - self.inc)', 'central_rotated_longitude': '(0)', 'globe': 'self.globe'}), '(pole_longitude=180, pole_latitude=90 - self.inc,\n central_rotated_longitude=0, globe=self.globe)\n', (1205, 1305), True, 'import cartopy.crs as ccrs\n'), ((1329, 1360), 'cartopy.crs.Geodetic', 'ccrs.Geodetic', ([], {'globe': 'self.globe'}), '(globe=self.globe)\n', (1342, 1360), True, 'import cartopy.crs as ccrs\n'), ((1386, 1473), 'cartopy.crs.Orthographic', 'ccrs.Orthographic', ([], {'globe': 'self.globe', 'central_latitude': 'self.inc', 'central_longitude': '(0)'}), '(globe=self.globe, central_latitude=self.inc,\n central_longitude=0)\n', (1403, 1473), True, 'import cartopy.crs as ccrs\n'), ((1683, 1711), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'self.res'], {}), '(-1, 1, self.res)\n', (1694, 1711), True, 'import numpy as np\n'), ((1725, 1742), 'numpy.meshgrid', 'np.meshgrid', (['x', 'x'], {}), '(x, x)\n', (1736, 1742), True, 'import numpy as np\n'), ((1762, 1786), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (1769, 1786), True, 'import numpy as np\n'), ((2411, 2438), 'numpy.sqrt', 'np.sqrt', (['(1 - self.grid ** 2)'], {}), '(1 - self.grid ** 2)\n', (2418, 2438), True, 'import numpy as np\n'), ((3300, 3427), 'cartopy.crs.RotatedPole', 'ccrs.RotatedPole', ([], {'pole_longitude': '(cur_long - 180)', 'pole_latitude': '(90 - self.inc)', 'central_rotated_longitude': '(0)', 'globe': 'self.globe'}), '(pole_longitude=cur_long - 180, pole_latitude=90 - self.inc,\n central_rotated_longitude=0, globe=self.globe)\n', (3316, 3427), True, 'import cartopy.crs as ccrs\n'), ((3445, 3539), 'cartopy.crs.Orthographic', 'ccrs.Orthographic', ([], {'globe': 'self.globe', 'central_latitude': 'self.inc', 'central_longitude': 'cur_long'}), '(globe=self.globe, central_latitude=self.inc,\n central_longitude=cur_long)\n', (3462, 3539), True, 'import cartopy.crs as ccrs\n'), ((4750, 4787), 'numpy.dstack', 'np.dstack', (['(self.grid_x, self.grid_y)'], {}), '((self.grid_x, self.grid_y))\n', (4759, 4787), True, 'import numpy as np\n'), ((4846, 4875), 'matplotlib.path.contains_points', 'path.contains_points', (['XY_flat'], {}), '(XY_flat)\n', (4866, 4875), True, 'import matplotlib.path as path\n'), ((6663, 6700), 'matplotlib.patches.CirclePolygon', 'patches.CirclePolygon', (['(0, 0)', '(1)', '(100)'], {}), '((0, 0), 1, 100)\n', (6684, 6700), True, 'import matplotlib.patches as patches\n'), ((1811, 1845), 'numpy.ma.masked_greater', 'np.ma.masked_greater', (['self.grid', '(1)'], {}), '(self.grid, 1)\n', (1831, 1845), True, 'import numpy as np\n'), ((5050, 5083), 'numpy.ones', 'np.ones', (['self.unspottedFlux.shape'], {}), '(self.unspottedFlux.shape)\n', (5057, 5083), True, 'import numpy as np\n'), ((5514, 5537), 'utils.splitPoly', 'splitPoly', (['spot_vs', '(180)'], {}), '(spot_vs, 180)\n', (5523, 5537), False, 'from utils import splitPoly\n'), ((7309, 7348), 'numpy.column_stack', 'np.column_stack', (['(planet_vx, planet_vy)'], {}), '((planet_vx, planet_vy))\n', (7324, 7348), True, 'import numpy as np\n'), ((5822, 5876), 'matplotlib.transforms.Bbox', 'Bbox', (['[[self.lon1, self.lat1], [self.lon2, self.lat2]]'], {}), '([[self.lon1, self.lat1], [self.lon2, self.lat2]])\n', (5826, 5876), False, 'from matplotlib.transforms import Bbox\n'), ((6259, 6280), 'matplotlib.patches.Path', 'patches.Path', (['spot_vs'], {}), '(spot_vs)\n', (6271, 6280), True, 'import matplotlib.patches as patches\n'), ((6868, 6901), 'numpy.ones', 'np.ones', (['self.unspottedFlux.shape'], {}), '(self.unspottedFlux.shape)\n', (6875, 6901), True, 'import numpy as np\n'), ((6952, 6983), 'numpy.ones', 'np.ones', (['self.spottedFlux.shape'], {}), '(self.spottedFlux.shape)\n', (6959, 6983), True, 'import numpy as np\n'), ((4262, 4297), 'spot.Spot.gen_spot', 'Spot.gen_spot', (['self.params', 'self', 't'], {}), '(self.params, self, t)\n', (4275, 4297), False, 'from spot import Spot\n'), ((5787, 5808), 'matplotlib.patches.Path', 'patches.Path', (['spot_vs'], {}), '(spot_vs)\n', (5799, 5808), True, 'import matplotlib.patches as patches\n')]
|
"""
This file contains source code from another GitHub project. The comments made there apply. The source code
was licensed under the MIT License. The license text and a detailed reference can be found in the license
subfolder at models/east_open_cv/license. Many thanks to the author of the code.
For reasons of clarity unneeded parts of the original code were not taken over. The original project can
be found on the https://github.com/ZER-0-NE/EAST-Detector-for-text-detection-using-OpenCV page.
For a better understanding the documentation has been supplemented in parts. Code completely or predominantly
taken from the source was marked with "External code".
"""
import time
import cv2
import numpy as np
from imutils.object_detection import non_max_suppression
import bridges_config as config
class EastOpenCvBridge:
"""A bridge class for connecting to a text detector
"""
def __init__(self):
"""The constructor
"""
self.load_model()
def load_model(self):
"""Loads the underlying model together with its pre-trained weights.
"""
try:
self.model = cv2.dnn.readNet(config.EAST_OPENCV_MODEL_PATH)
except:
print('Error in method {0} in module {1}'.format('load_model', 'east_open_cv_bridge.py'))
def scann(self, image):
"""External code (add try...except and an extension)
Examines the passed image for text regions and returns them as a collection of boxes in the
form of a NumPy array. The passed image must be a raster image.
:param image:The image to be examined.
:return:A NumPy array of predicted text areas.
"""
try:
# load the input image and grab the image dimensions
self.orig = image.copy()
(H, W) = image.shape[:2]
# set the new width and height and then determine the ratio in change
# for both the width and height, should be multiple of 32
(newW, newH) = (320, 320)
rW = W / float(newW)
rH = H / float(newH)
# resize the image and grab the new image dimensions
image = cv2.resize(image, (newW, newH))
(H, W) = image.shape[:2]
# define the two output layer names for the EAST detector model that
# we are interested -- the first is the output probabilities and the
# second can be used to derive the bounding box coordinates of text
self.layerNames = [
"feature_fusion/Conv_7/Sigmoid",
"feature_fusion/concat_3"]
# construct a blob from the image and then perform a forward pass of
# the model to obtain the two output layer sets
blob = cv2.dnn.blobFromImage(image, 1.0, (W, H),
(123.68, 116.78, 103.94), swapRB=True, crop=False)
start = time.time()
self.model.setInput(blob)
(scores, geometry) = self.model.forward(self.layerNames)
end = time.time()
# grab the number of rows and columns from the scores volume, then
# initialize our set of bounding box rectangles and corresponding
# confidence scores
(numRows, numCols) = scores.shape[2:4]
rects = [] # stores the bounding box coordiantes for text regions
confidences = [] # stores the probability associated with each bounding box region in rects
# loop over the number of rows
for y in range(0, numRows):
# extract the scores (probabilities), followed by the geometrical
# data used to derive potential bounding box coordinates that
# surround text
scoresData = scores[0, 0, y]
xData0 = geometry[0, 0, y]
xData1 = geometry[0, 1, y]
xData2 = geometry[0, 2, y]
xData3 = geometry[0, 3, y]
anglesData = geometry[0, 4, y]
# loop over the number of columns
for x in range(0, numCols):
# if our score does not have sufficient probability, ignore it
if scoresData[x] < 0.5:
continue
# compute the offset factor as our resulting feature maps will
# be 4x smaller than the input image
(offsetX, offsetY) = (x * 4.0, y * 4.0)
# extract the rotation angle for the prediction and then
# compute the sin and cosine
angle = anglesData[x]
cos = np.cos(angle)
sin = np.sin(angle)
# use the geometry volume to derive the width and height of
# the bounding box
h = xData0[x] + xData2[x]
w = xData1[x] + xData3[x]
# compute both the starting and ending (x, y)-coordinates for
# the text prediction bounding box
endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))
endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))
startX = int(endX - w)
startY = int(endY - h)
# add the bounding box coordinates and probability score to
# our respective lists
rects.append((startX, startY, endX, endY))
confidences.append(scoresData[x])
# apply non-maxima suppression to suppress weak, overlapping bounding boxes
boxes = non_max_suppression(np.array(rects), probs=confidences)
"""
Extension to the original code to return a usable format.
"""
newboxes = []
# loop over the bounding boxes
for (startX, startY, endX, endY) in boxes:
# scale the bounding box coordinates based on the respective ratios
startX = int(startX * rW)
startY = int(startY * rH)
endX = int(endX * rW)
endY = int(endY * rH)
box = []
box.append([startX, startY])
box.append([endX, startY])
box.append([endX, endY])
box.append([startX, endY])
newboxes.append(box)
return np.asarray(newboxes)
except:
print('Error in method {0} in module {1}'.format('scann', 'east_open_cv_bridge.py'))
return None
|
[
"numpy.asarray",
"cv2.dnn.blobFromImage",
"time.time",
"cv2.dnn.readNet",
"numpy.sin",
"numpy.array",
"numpy.cos",
"cv2.resize"
] |
[((1172, 1218), 'cv2.dnn.readNet', 'cv2.dnn.readNet', (['config.EAST_OPENCV_MODEL_PATH'], {}), '(config.EAST_OPENCV_MODEL_PATH)\n', (1187, 1218), False, 'import cv2\n'), ((2235, 2266), 'cv2.resize', 'cv2.resize', (['image', '(newW, newH)'], {}), '(image, (newW, newH))\n', (2245, 2266), False, 'import cv2\n'), ((2844, 2941), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['image', '(1.0)', '(W, H)', '(123.68, 116.78, 103.94)'], {'swapRB': '(True)', 'crop': '(False)'}), '(image, 1.0, (W, H), (123.68, 116.78, 103.94), swapRB=\n True, crop=False)\n', (2865, 2941), False, 'import cv2\n'), ((3000, 3011), 'time.time', 'time.time', ([], {}), '()\n', (3009, 3011), False, 'import time\n'), ((3140, 3151), 'time.time', 'time.time', ([], {}), '()\n', (3149, 3151), False, 'import time\n'), ((6630, 6650), 'numpy.asarray', 'np.asarray', (['newboxes'], {}), '(newboxes)\n', (6640, 6650), True, 'import numpy as np\n'), ((5843, 5858), 'numpy.array', 'np.array', (['rects'], {}), '(rects)\n', (5851, 5858), True, 'import numpy as np\n'), ((4802, 4815), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (4808, 4815), True, 'import numpy as np\n'), ((4843, 4856), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (4849, 4856), True, 'import numpy as np\n')]
|
# Copyright - Transporation, Bots, and Disability Lab - Carnegie Mellon University
# Released under MIT License
"""
Common Operations/Codes that are re-written on Baxter
"""
import numpy as np
from pyquaternion import Quaternion
from alloy.math import *
__all__ = [
'convert_joint_angles_to_numpy','transform_pose_into_rotation_matrix',
'calculate_pose_difference'
]
def convert_joint_angles_to_numpy(joint_angles, joint_names):
"""Convert the dictionary based joint angles given by baxter interface to
a numpy array according to the given joint names
"""
arr = np.zeros(7)
for i, key in enumerate(joint_names):
arr[i] = joint_angles[key]
return arr
def transform_pose_into_rotation_matrix(pose_np):
#pose_np = pose_to_numpy(pose)
translation_comp = pose_np[0:3]
trans_mat = Quaternion(pose_np[3:]).transformation_matrix
trans_mat[0:3,3] = translation_comp
return trans_mat
def calculate_pose_difference(p1, p2):
"""Calculate the pose error from p1 to p2. Note the resulting
error is calculated in the frame of p1 and not the base frame
do p[0:3] = p[0:3] - np.cross(x[0:3],p[3:])
"""
error = np.zeros(6,)
#the position error is just the difference in position
error[0:3] = p2[0:3] - p1[0:3]
#orientation error is more tricky
desire_q = Quaternion(p2[3:])
error_q = desire_q * Quaternion(p1[3:]).inverse
error[3:] = error_q.axis * error_q.angle
return error
#transform_quaternion = Quaternion(pose_np[3:]). Quaternion(pose_np[3:])
# def calculate_pose_difference(p1, p2):
# """Calculate the error from p1 to p2. Note the resulting
# error is calculated in the frame of p1 and not the base frame
# do p[0:3] = p[0:3] - np.cross(x[0:3],p[3:])
# """
# mat1 = transform_pose_into_rotation_matrix(p1)
# mat2 = transform_pose_into_rotation_matrix(p2)
# error = calculate_error_between_two_transformation_matrix(mat1, mat2)
# return calculate_error_between_two_transformation_matrix(mat1, mat2)
|
[
"pyquaternion.Quaternion",
"numpy.zeros"
] |
[((595, 606), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (603, 606), True, 'import numpy as np\n'), ((1186, 1197), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (1194, 1197), True, 'import numpy as np\n'), ((1346, 1364), 'pyquaternion.Quaternion', 'Quaternion', (['p2[3:]'], {}), '(p2[3:])\n', (1356, 1364), False, 'from pyquaternion import Quaternion\n'), ((837, 860), 'pyquaternion.Quaternion', 'Quaternion', (['pose_np[3:]'], {}), '(pose_np[3:])\n', (847, 860), False, 'from pyquaternion import Quaternion\n'), ((1390, 1408), 'pyquaternion.Quaternion', 'Quaternion', (['p1[3:]'], {}), '(p1[3:])\n', (1400, 1408), False, 'from pyquaternion import Quaternion\n')]
|
import cv2
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
import scipy.io
from scipy import optimize
from feature_func import *
from preprocess import *
from utils import *
def fit_data(gt_count, feature_data, function):
return optimize.curve_fit(function, feature_data, gt_count)
def plot_data(gt_count, feature_data, test_func=None):
plt.scatter(feature_data, gt_count, label='raw data')
if test_func != None:
params, params_var = fit_data(gt_count, feature_data, test_func)
x_linspace = np.linspace(min(feature_data), max(feature_data), num=len(feature_data))
plt.plot(x_linspace, test_func(x_linspace, *params), label='Fitted quadratic polynomial')
def test_func(x, a2, a1, a0):
return a2 * np.power(x, 2) + a1 * np.power(x, 1) + a0
def retrieve_data(image_root_path, mod=10):
# processing ucsd pedestrian dataset
sub_folder_index = 0
image_count = 0
images = []
gt_count_in_images = []
for sub_folder in image_root_path.glob('**/'):
print(sub_folder.name.split('.')[0].split('_')[-1])
if sub_folder_index == 0 or sub_folder.name.split('_')[0] != 'vidf1' or int(sub_folder.name.split('.')[0].split('_')[-1]) > 9:
sub_folder_index += 1
continue
print(sub_folder.name)
mat_path = annotation_root_path / (sub_folder.name.split('.')[0] + '_frame_full.mat')
mat = read_mat(mat_path)
for f in sub_folder.iterdir():
if not f.is_file():
continue
frame_index = int(f.name[-7:-4]) - 1
if image_count % mod == 0:
img = cv2.imread(str(f), 0)
images.append(img)
gt_count_in_images.append(mat['frame'][0][frame_index][0][0][0].shape[0])
image_count += 1
sub_folder_index += 1
return images, gt_count_in_images
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
background_image_path = '/home/osense-office/Documents/dataset/surveillance/ucsdpeds/background.png'
background_image = cv2.imread(background_image_path, 0)
image_root_dir = '/home/osense-office/Documents/dataset/surveillance/ucsdpeds/vidf'
image_root_path = Path(image_root_dir)
annotation_root_dir = '/home/osense-office/Documents/dataset/surveillance/ucsdpeds/vidf-cvpr'
annotation_root_path = Path(annotation_root_dir)
pmap = get_pmapxy('/home/osense-office/Documents/dataset/surveillance/ucsdpeds/vidf-cvpr/vidf1_33_dmap3.mat')
images, gt_count_in_images = retrieve_data(image_root_path, mod=30)
print(len(images))
edited = get_abs_diff(images, background_image)
blurred = get_foreground_mask(edited, threshold=25)
seg_peri = get_seg_perimeter(blurred)
# perspective_seg_size = get_seg_size(edited, pmapxy=pmap)
plot_data(gt_count_in_images, seg_peri, test_func)
plt.legend(loc='best')
plt.title(label='segmentation perimeter against people count')
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"numpy.power",
"scipy.optimize.curve_fit",
"cv2.imread",
"pathlib.Path"
] |
[((264, 316), 'scipy.optimize.curve_fit', 'optimize.curve_fit', (['function', 'feature_data', 'gt_count'], {}), '(function, feature_data, gt_count)\n', (282, 316), False, 'from scipy import optimize\n'), ((378, 431), 'matplotlib.pyplot.scatter', 'plt.scatter', (['feature_data', 'gt_count'], {'label': '"""raw data"""'}), "(feature_data, gt_count, label='raw data')\n", (389, 431), True, 'import matplotlib.pyplot as plt\n'), ((2120, 2156), 'cv2.imread', 'cv2.imread', (['background_image_path', '(0)'], {}), '(background_image_path, 0)\n', (2130, 2156), False, 'import cv2\n'), ((2267, 2287), 'pathlib.Path', 'Path', (['image_root_dir'], {}), '(image_root_dir)\n', (2271, 2287), False, 'from pathlib import Path\n'), ((2413, 2438), 'pathlib.Path', 'Path', (['annotation_root_dir'], {}), '(annotation_root_dir)\n', (2417, 2438), False, 'from pathlib import Path\n'), ((2923, 2945), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (2933, 2945), True, 'import matplotlib.pyplot as plt\n'), ((2950, 3012), 'matplotlib.pyplot.title', 'plt.title', ([], {'label': '"""segmentation perimeter against people count"""'}), "(label='segmentation perimeter against people count')\n", (2959, 3012), True, 'import matplotlib.pyplot as plt\n'), ((3017, 3027), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3025, 3027), True, 'import matplotlib.pyplot as plt\n'), ((771, 785), 'numpy.power', 'np.power', (['x', '(2)'], {}), '(x, 2)\n', (779, 785), True, 'import numpy as np\n'), ((793, 807), 'numpy.power', 'np.power', (['x', '(1)'], {}), '(x, 1)\n', (801, 807), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from keras.models import Sequential, Model
from keras.models import model_from_yaml
import keras.backend as K
from keras.callbacks import Callback
from ..utility.utils import path
def custom_uniform(shape, range=(-1, 1), name=None):
"""
Example of custom function for keras.
"""
min_, max_ = range
return K.variable(
np.random.uniform(low=min_, high=max_, size=shape), name=name)
# Example usage:
# net.add(Dense(10, input_dim=5, init=lambda shape,
# name: custom_uniform(shape, (-10, 5), name)))
class TestCallback(Callback):
"""
Example callback class for keras.
"""
def __init__(self, generator):
self.data_generator = generator
def on_epoch_end(self, epoch, logs={}):
x, y = next(self.data_generator)
loss, acc = self.model.evaluate(x, y, verbose=0)
print('\nTesting loss: {}, acc: {}\n'.format(loss, acc))
class Network(object):
"""
Base class for the various neural networks.
"""
def __init__(self):
self.metrics = ()
self.model = Sequential()
def first_layer_output(self, x):
weights = self.get_layer_weights(1)
W = weights[0]
b = weights[1]
return np.dot(x, W) + b
def predict_on_batch(self, x):
return self.model.predict_on_batch(x)
def get_weights(self, layer=None):
if layer is None:
return self.model.get_weights()
return self.model.layers[layer].get_weights()
def weight_shapes(self):
return self.get_weights()[0].shape, self.get_weights()[1].shape
def set_layer_weights(self, layer, weights):
self.model.layers[layer].set_weights(
[weights, self.get_weights(layer)[1]])
def set_layer_bias(self, layer, bias):
self.model.layers[layer].set_weights(
[self.get_weights(layer)[0], bias])
def set_layer_parameters(self, layer, weights, bias):
self.model.layers[layer].set_weights([weights, bias])
def get_layer_weights(self, layer):
return self.model.get_layer(index=layer).get_weights()
def train_once(self, data, batch_size):
self.model.fit(data[0], data[1], epochs=1, batch_size=batch_size)
def train_on_generator(self, training_set_generator, batches_per_epoch,
epochs, verbose):
h = self.model.fit_generator(
training_set_generator, batches_per_epoch, epochs, verbose=verbose)
loss = h.history['loss'][epochs - 1]
acc = h.history['categorical_accuracy'][epochs - 1]
self.metrics = '{0:.3g}'.format(loss), '{0:.3g}'.format(acc)
def save(self, relative_path, filename=None):
if filename is None:
filename = 'model'
absolute_path = ''.join([path(), relative_path, filename])
network_out = ''.join([absolute_path, '.yaml'])
weight_out = ''.join([absolute_path, '.h5'])
model_yaml = self.model.to_yaml()
with open(network_out, 'w') as yaml_file:
yaml_file.write(model_yaml)
self.model.save_weights(weight_out)
def load(self, relative_path, filename):
absolute_path = ''.join([path(), relative_path, filename])
network = ''.join([absolute_path, '.yaml'])
weights = ''.join([absolute_path, '.h5'])
with open(network, 'r') as yaml_file:
loaded_model_yaml = yaml_file.read()
self.model = model_from_yaml(loaded_model_yaml)
self.model.load_weights(weights)
|
[
"numpy.dot",
"numpy.random.uniform",
"keras.models.model_from_yaml",
"keras.models.Sequential"
] |
[((415, 465), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'min_', 'high': 'max_', 'size': 'shape'}), '(low=min_, high=max_, size=shape)\n', (432, 465), True, 'import numpy as np\n'), ((1139, 1151), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1149, 1151), False, 'from keras.models import Sequential, Model\n'), ((3502, 3536), 'keras.models.model_from_yaml', 'model_from_yaml', (['loaded_model_yaml'], {}), '(loaded_model_yaml)\n', (3517, 3536), False, 'from keras.models import model_from_yaml\n'), ((1296, 1308), 'numpy.dot', 'np.dot', (['x', 'W'], {}), '(x, W)\n', (1302, 1308), True, 'import numpy as np\n')]
|
import numpy as np
import sys
sys.path.insert(0, '..')
from src.utils import *
class LinearRegression:
def __init__(self):
self.params = None
def train(self, X, y, iterations=5000, learning_rate=0.01, display=False):
'''
Input parameters:
X: (mxn) array where m is the number of training examples and n is the number of features
y: (mx1) array with target values
'''
# We initialize parameters as a (1xn) array of zeros
self.params = np.zeros((X.shape[1], 1))
loss_hist = np.zeros((1,0))
for i in range(iterations):
y_hat = X.dot(self.params)
loss = MeanSquaredError.loss(y, y_hat)
loss_hist = np.append(loss_hist, loss)
self.params = BatchGradientDescent.optimize(
X, y, y_hat, self.params, learning_rate, MeanSquaredError)
if display:
show_progress(i, iterations, loss)
if display:
print('\n')
return loss_hist, loss
def predict(self, X, y):
y_hat = X.dot(self.params)
loss = MeanSquaredError.loss(y, y_hat)
return y_hat, loss
|
[
"numpy.append",
"numpy.zeros",
"sys.path.insert"
] |
[((31, 55), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (46, 55), False, 'import sys\n'), ((511, 536), 'numpy.zeros', 'np.zeros', (['(X.shape[1], 1)'], {}), '((X.shape[1], 1))\n', (519, 536), True, 'import numpy as np\n'), ((558, 574), 'numpy.zeros', 'np.zeros', (['(1, 0)'], {}), '((1, 0))\n', (566, 574), True, 'import numpy as np\n'), ((725, 751), 'numpy.append', 'np.append', (['loss_hist', 'loss'], {}), '(loss_hist, loss)\n', (734, 751), True, 'import numpy as np\n')]
|
from collections import deque
import numpy as np
class Logger:
"""Print recorded values."""
def __init__(self, name):
"""
:param name str: identifier for printed value
"""
self.name = name
def __call__(self, value):
print("{}: {}".format(self.name, value))
class WindowFilterLogger:
"""Filter and print recorded values."""
def __init__(self, name, filter_size):
"""
:param name str: identifier for printed value
:param filter_size: number of historic samples which are averaged.
No output until filter_size number of values have been recorded.
"""
self.name = name
self.values = deque(maxlen=filter_size)
def __call__(self, value):
self.values.append(value)
if len(self.values) == self.values.maxlen:
print("{}: {}".format(self.name, np.mean(self.values)))
|
[
"numpy.mean",
"collections.deque"
] |
[((724, 749), 'collections.deque', 'deque', ([], {'maxlen': 'filter_size'}), '(maxlen=filter_size)\n', (729, 749), False, 'from collections import deque\n'), ((913, 933), 'numpy.mean', 'np.mean', (['self.values'], {}), '(self.values)\n', (920, 933), True, 'import numpy as np\n')]
|
import numpy as np
import time
import pandas as pd
import matplotlib.pyplot as plt
import random
def karatsuba(x, y):
""" Recursive implementation of Karatsuba's Fast Mulciplication Algoritihm
:param x: The first integer
:param y: The second integer
:return: The product of x * y
"""
if x < 10 or y < 10:
return x*y
m = max(len(str(x)), len(str(y))) // 2
x_high = x // 10**m
x_low = x % 10**m
y_high = y // 10**m
y_low = y % 10**m
z0 = karatsuba(x_low, y_low)
z1 = karatsuba(x_low + x_high, y_low + y_high)
z2 = karatsuba(x_high, y_high)
return z2 * 10 ** (2 * m) + (z1 - z2 - z0) * 10 ** m + z0
def karat_compare(max_size, tests):
samples = []
test_sizes = np.linspace(1,max_size, tests).astype(int)
standard_results = []
karatsuba_results = []
for test_size in test_sizes:
x_str = ''
y_str = ''
for x in range(test_size):
x_str += str(random.randint(0,9))
y_str += str(random.randint(0,9))
samples.append((int(x_str), int(y_str)))
print(f"Samples Generated: {len(samples)}, with max size: {max_size}")
for sample, test_size in zip(samples, test_sizes):
print(f"Attempting numbers of 10^{test_size}")
x = sample[0]
y = sample[1]
t_start = time.perf_counter()
r = x * y
standard_results.append(time.perf_counter() - t_start)
t_start = time.perf_counter()
r = karatsuba(x, y)
karatsuba_results.append(time.perf_counter() - t_start)
plt.plot(test_size, standard_results, label="python native")
plt.plot(test_size, karatsuba_results, label="karatsuba")
plt.xlabel("10^x")
plt.ylabel("Seconds")
plt.legend()
plt.show()
def naive_matrix_multiplication_lists(a, b):
"""
Uses nested loops to calculate AB
:param a: An MxN matrix of numbers.
:param b: An NxP matrix of numbers.
:return: An MxP matrix of numbers which is the product: AB.
"""
M = len(a)
N = len(a[0])
if len(b) != N:
raise ValueError("The Matrices Provide are not the proper shape.")
P = len(b[0])
c = [[0 for i in range(P)] for j in range(M)]
for i in range(0,M):
for j in range(0,P):
for k in range(0,N):
c[i][j] += a[i][k] * b[k][j]
return c
def naive_matrix_multiplication_np(a,b):
M, N = a.shape
n, P = b.shape
if N != n:
raise ValueError("The Matrices Provide are not the proper shape.")
c = np.zeros((M,P))
for i in range(0,M):
for j in range(0,P):
for k in range(0,N):
c[i][j] += a[i][k] * b[k][j]
return c
if __name__ == "__main__":
a = [[1, 2, 5],
[3, 4, 6]]
b = [[5, 6],
[7, 8],
[1, 1]]
c = naive_matrix_multiplication_lists(a, b)
print("List Results:\n", c)
A = np.array(a)
B = np.array(b)
C = naive_matrix_multiplication_np(A, B)
print("NP Array Results:\n", C)
expected_results = np.array([[24, 27], [49, 56]])
print("Expected Results:\n", expected_results)
|
[
"matplotlib.pyplot.show",
"random.randint",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.zeros",
"time.perf_counter",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((1572, 1632), 'matplotlib.pyplot.plot', 'plt.plot', (['test_size', 'standard_results'], {'label': '"""python native"""'}), "(test_size, standard_results, label='python native')\n", (1580, 1632), True, 'import matplotlib.pyplot as plt\n'), ((1637, 1694), 'matplotlib.pyplot.plot', 'plt.plot', (['test_size', 'karatsuba_results'], {'label': '"""karatsuba"""'}), "(test_size, karatsuba_results, label='karatsuba')\n", (1645, 1694), True, 'import matplotlib.pyplot as plt\n'), ((1699, 1717), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""10^x"""'], {}), "('10^x')\n", (1709, 1717), True, 'import matplotlib.pyplot as plt\n'), ((1722, 1743), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Seconds"""'], {}), "('Seconds')\n", (1732, 1743), True, 'import matplotlib.pyplot as plt\n'), ((1748, 1760), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1758, 1760), True, 'import matplotlib.pyplot as plt\n'), ((1765, 1775), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1773, 1775), True, 'import matplotlib.pyplot as plt\n'), ((2545, 2561), 'numpy.zeros', 'np.zeros', (['(M, P)'], {}), '((M, P))\n', (2553, 2561), True, 'import numpy as np\n'), ((2920, 2931), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (2928, 2931), True, 'import numpy as np\n'), ((2940, 2951), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (2948, 2951), True, 'import numpy as np\n'), ((3057, 3087), 'numpy.array', 'np.array', (['[[24, 27], [49, 56]]'], {}), '([[24, 27], [49, 56]])\n', (3065, 3087), True, 'import numpy as np\n'), ((1335, 1354), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1352, 1354), False, 'import time\n'), ((1455, 1474), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1472, 1474), False, 'import time\n'), ((743, 774), 'numpy.linspace', 'np.linspace', (['(1)', 'max_size', 'tests'], {}), '(1, max_size, tests)\n', (754, 774), True, 'import numpy as np\n'), ((970, 990), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (984, 990), False, 'import random\n'), ((1016, 1036), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (1030, 1036), False, 'import random\n'), ((1405, 1424), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1422, 1424), False, 'import time\n'), ((1536, 1555), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1553, 1555), False, 'import time\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pylab import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import subprocess
#Creacion de la Poblacion
datos = open("Datos/Poblacion.txt","w")
datos.close()
datos = open("Datos/Estados.txt","w")
datos.close()
#generacion de coordenadas
contador = 0
for x in range(1,2000):
longitud = np.random.uniform(-108,-85,1)
latitud = np.random.uniform(14.5,25,1)
lon = longitud[0]
lat = latitud[0]
#poniendo limites
if lat < 16.3 and lon < -92.38:
pass
elif lat < 25 and lat > 18.119 and lon < -90.4 and lon > -97 :
pass
elif lon > -88 and lat > 16:
pass
elif lat > 24 and lon > -91:
pass
elif lat < 23.7 and lon < -105.5:
pass
elif lat < 18.27 and lon < -101:
pass
elif lat > 20.6 and lon > -98:
pass
elif lat < 24.39 and lon < -106.7:
pass
elif lat < 20.4 and lon < -105.3:
pass
elif lat < 18 and lon > -91:
pass
elif lat < 17.399 and lon < -98:
pass
elif lat < 19.7 and lon < -103.6:
pass
else:
contador = contador + 1
datos = open("Datos/Poblacion.txt","a")
datos.write(str(lat)+","
+str(lon)+"\n")
datos.close()
porcentajes = open("Datos/Datos.txt","r").read()
unidad = 0.7
inf = (float(porcentajes) * 0.7)/float(100) #rojo 2
sano = unidad - inf #amarillo 0
#generacion de estados
s = 0.3 #verde 1
r = 0.0 #azul 3
v = np.random.choice(4, contador, p=[sano, s, inf, r])
for i in v:
data = open("Datos/Estados.txt","a")
data.write(str(i)+"\n")
data.close()
mapa = subprocess.Popen([sys.executable, 'src/mapa.py'])
|
[
"numpy.random.uniform",
"subprocess.Popen",
"numpy.random.choice"
] |
[((1379, 1429), 'numpy.random.choice', 'np.random.choice', (['(4)', 'contador'], {'p': '[sano, s, inf, r]'}), '(4, contador, p=[sano, s, inf, r])\n', (1395, 1429), True, 'import numpy as np\n'), ((1529, 1578), 'subprocess.Popen', 'subprocess.Popen', (["[sys.executable, 'src/mapa.py']"], {}), "([sys.executable, 'src/mapa.py'])\n", (1545, 1578), False, 'import subprocess\n'), ((390, 421), 'numpy.random.uniform', 'np.random.uniform', (['(-108)', '(-85)', '(1)'], {}), '(-108, -85, 1)\n', (407, 421), True, 'import numpy as np\n'), ((431, 461), 'numpy.random.uniform', 'np.random.uniform', (['(14.5)', '(25)', '(1)'], {}), '(14.5, 25, 1)\n', (448, 461), True, 'import numpy as np\n')]
|
# This file is compatible with both Python 2 and 3
import base64
import cv2
import json
import numpy as np
from flask import Response
import time
import functools
from collections import deque
class Stream(deque):
"""
A stream stores an output sequence stream of data. It inherits from deque.
Stream contains oldest-newest data from left to right.
Stream has a "capacity" -- if the stream is full, it will drop the oldest data.
vizstream will monitor the stream and will send data to the browser whenever the
stream is updated and its timestamp changes.
"""
def __init__(self, capacity=1, fps=10):
"""
Args:
capacity: (int) maximum capacity of stream.
"""
self.capacity = capacity
self.fps = fps
self.timestamp = 0
super(Stream, self).__init__(maxlen=self.capacity)
def publish(self, **kwargs):
item = dict(data=kwargs, timestamp=self.timestamp)
self.timestamp += 1
self.append(item)
def reset(self):
self.clear()
self.timestamp = 0
def vizstream(app, stream, astype):
if astype == 'scene_cloud':
url = '/api/stream_scene_cloud'
data2msg = data2msg_scene_cloud
elif astype == 'lc_curtain':
url = '/api/stream_lc_curtain'
data2msg = data2msg_lc_curtain
# elif astype == 'camera_image':
# url = '/api/stream_camera_image'
# data2msg = data2msg_camera_image
# elif astype == 'lidar_cloud':
# url = '/api/stream_lidar_cloud'
# data2msg = data2msg_lidar_cloud
# elif astype == 'dt_boxes':
# url = '/api/stream_dt_boxes'
# data2msg = data2msg_dt_boxes
# elif astype == 'entropy_map':
# url = '/api/stream_entropy_map'
# data2msg = data2msg_entropy_map
# elif astype == 'arrows':
# url = '/api/stream_arrows'
# data2msg = data2msg_arrows
else:
raise Exception("astype={} not valid".format(astype))
def generator():
sent_timestamp = None
while True:
if len(stream) == 0:
sent_timestamp = None
elif sent_timestamp != stream[-1]["timestamp"]:
sent_timestamp = stream[-1]["timestamp"]
data = stream[-1]["data"]
msg = data2msg(**data)
yield "data:{}\n\n".format(msg)
time.sleep(1.0 / stream.fps)
@app.route(url, methods=['GET', 'POST'])
@functools.wraps(data2msg)
def route_fn():
return Response(generator(), mimetype="text/event-stream")
########################################################################################################################
# region data2msg functions
########################################################################################################################
def data2msg_scene_cloud(scene_points, se_design_pts=None, downsample=False, int16_factor=100):
"""
Args:
scene_points (np.ndarray, dtype=float32, shape=(N, 6)): scene points
se_design_points (Optional(np.ndarray, dtype=float32, shape=(C, 2))): design points of the safety envelope
"""
# the next line downsamples the scene points. it selects one from every three points.
if downsample:
scene_points = scene_points[::3, :]
# convert to int16
scene_points = scene_points * int16_factor
scene_points = scene_points.astype(np.int16)
scene_pc_str = base64.b64encode(scene_points.tobytes()).decode("utf-8")
send_dict = dict(scene_pc_str=scene_pc_str)
if se_design_pts is not None:
# convert to int16
se_design_pts = se_design_pts * int16_factor
se_design_pts = se_design_pts.astype(np.int16)
se_pc_str = base64.b64encode(se_design_pts.tobytes()).decode("utf-8")
send_dict["se_pc_str"] = se_pc_str
json_str = json.dumps(send_dict)
return json_str
# def data2msg_camera_image(data: Frame):
# image_str = data.cam["image_str"]
# image_dtype = data.cam["datatype"]
# image_b64 = base64.b64encode(image_str).decode("utf-8")
# image_b64 = f"data:image/{image_dtype};base64,{image_b64}"
# return image_b64
# def data2msg_lidar_cloud(data, int16_factor=100):
# points = data # (N, 3)
# # convert to int16
# points = points * int16_factor
# points = points.astype(np.int16)
# pc_str = base64.b64encode(points.tobytes()).decode("utf-8")
# return pc_str
def data2msg_lc_curtain(lc_image, lc_cloud, score=None, int16_factor=100):
"""
Args:
lc_image: light curtain image.
- (np.ndarray, dtype=float32, shape=(H, C, 4)).
- Axis 2 corresponds to (x, y, z, i):
- x : x in cam frame.
- y : y in cam frame.
- z : z in cam frame.
- i : intensity of LC cloud, lying in [0, 255].
lc_cloud: light curtain point cloud.
- (np.ndarray, dtype=float32, shape=(N, 4)).
- Axis 2 corresponds to (x, y, z, i):
- x : x in cam frame.
- y : y in cam frame.
- z : z in cam frame.
- i : intensity of LC cloud, lying in [0, 1].
score (Optional(float)): score to be displayed in kittiviewer
"""
# boundary
lc_image = lc_image[:, :, :3] # (H, C, 3)
ys = lc_image[:, :, 1] # (H, C)
ys[np.isnan(ys)] = 0 # replacing NaNs with zeros shouldn't affect the columnwise min or max of y
top_inds = np.argmin(ys, axis=0) # (C,)
bot_inds = np.argmax(ys, axis=0) # (C,)
top_xyz = lc_image[top_inds, np.arange(len(top_inds)), :] # (C, 3)
bot_xyz = lc_image[bot_inds, np.arange(len(bot_inds)), :] # (C, 3)
boundary = np.stack([top_xyz, bot_xyz], axis=1) # (C, 2, 3)
mask = np.isfinite(boundary).all(axis=(1, 2)) # (C,)
boundary = boundary[mask] # (C', 2, 3)
# intersection points
isect_pts = lc_cloud[lc_cloud[:, 3] > 0.05] # (N', 4)
# convert to int16
boundary = (boundary * int16_factor).astype(np.int16)
isect_pts = (isect_pts * int16_factor).astype(np.int16)
boundary_str = base64.b64encode(boundary.tobytes()).decode("utf-8")
isect_pts_str = base64.b64encode(isect_pts.tobytes()).decode("utf-8")
send_dict = dict(boundary=boundary_str, isect_pts=isect_pts_str, score=score)
json_str = json.dumps(send_dict)
return json_str
# def data2msg_dt_boxes(data):
# dt_boxes = data["detections"]
# json_str = json.dumps(dt_boxes)
# return json_str
# def data2msg_entropy_map(data):
# confidence_map = data["confidence_map"]
# entropy_heatmap = _create_entropy_heatmap(confidence_map)
# image_str = cv2.imencode('.png', entropy_heatmap)[1].tostring()
# image_b64 = base64.b64encode(image_str).decode("utf-8")
# image_b64 = f"data:image/png;base64,{image_b64}"
# return image_b64
# def data2msg_arrows(data):
# tails = list([float(e) for e in data["tails"].ravel()])
# heads = list([float(e) for e in data["heads"].ravel()])
# arrows = dict(tails=tails, heads=heads)
# json_str = json.dumps(arrows)
# return json_str
# endregion
########################################################################################################################
# region Helper functions
########################################################################################################################
# def _create_confidence_heatmap(confidence_map):
# # Take the mean of confidences for the 0-degrees and 90-degrees anchors
# conf_scores = confidence_map[:, :, 2:] # (Y, X, K)
# conf_scores = conf_scores.mean(axis=2) # (Y, X)
# # Rescale between 0 and 1.
# # conf_scores = conf_scores - conf_scores.min()
# # conf_scores = conf_scores / conf_scores.max()
# heatmap = cv2.applyColorMap((conf_scores * 255).astype(np.uint8), cv2.COLORMAP_HOT)
# return heatmap
# def _create_entropy_heatmap(confidence_map):
# p = confidence_map[:, :, 2:] # (Y, X, K)
# p = p.clip(1e-5, 1-1e-5) # (Y, X, K)
# entropy = -p * np.log2(p) - (1-p) * np.log2(1-p) # (Y, X, K)
# entropy = entropy.mean(axis=2) # (Y, X)
# heatmap = cv2.applyColorMap((entropy * 255).astype(np.uint8), cv2.COLORMAP_HOT)
# return heatmap
# endregion
########################################################################################################################
|
[
"numpy.stack",
"numpy.argmax",
"numpy.isfinite",
"numpy.argmin",
"json.dumps",
"numpy.isnan",
"time.sleep",
"functools.wraps"
] |
[((2501, 2526), 'functools.wraps', 'functools.wraps', (['data2msg'], {}), '(data2msg)\n', (2516, 2526), False, 'import functools\n'), ((3907, 3928), 'json.dumps', 'json.dumps', (['send_dict'], {}), '(send_dict)\n', (3917, 3928), False, 'import json\n'), ((5582, 5603), 'numpy.argmin', 'np.argmin', (['ys'], {'axis': '(0)'}), '(ys, axis=0)\n', (5591, 5603), True, 'import numpy as np\n'), ((5627, 5648), 'numpy.argmax', 'np.argmax', (['ys'], {'axis': '(0)'}), '(ys, axis=0)\n', (5636, 5648), True, 'import numpy as np\n'), ((5816, 5852), 'numpy.stack', 'np.stack', (['[top_xyz, bot_xyz]'], {'axis': '(1)'}), '([top_xyz, bot_xyz], axis=1)\n', (5824, 5852), True, 'import numpy as np\n'), ((6440, 6461), 'json.dumps', 'json.dumps', (['send_dict'], {}), '(send_dict)\n', (6450, 6461), False, 'import json\n'), ((5472, 5484), 'numpy.isnan', 'np.isnan', (['ys'], {}), '(ys)\n', (5480, 5484), True, 'import numpy as np\n'), ((2421, 2449), 'time.sleep', 'time.sleep', (['(1.0 / stream.fps)'], {}), '(1.0 / stream.fps)\n', (2431, 2449), False, 'import time\n'), ((5877, 5898), 'numpy.isfinite', 'np.isfinite', (['boundary'], {}), '(boundary)\n', (5888, 5898), True, 'import numpy as np\n')]
|
import numpy as np
from mpi4py import MPI
from SIMP import TO_SIMP, make_Conn_matrix
def get_void(nely,nelx):
v=np.zeros((nely,nelx))
R=min(nely,nelx)/15
loc=np.array([[1/3, 1/4], [2/3, 1/4],[ 1/3, 1/2], [2/3, 1/2], [1/3 , 3/4], [2/3, 3/4]])
loc=loc*np.array([[nely,nelx]])
for i in range(nely):
for j in range(nelx):
v[i,j]=R-np.min(np.sqrt(np.sum((loc-np.array([[i+1,j+1]]))**2,1)));
v=v>0
return v
def evaluate(x0,volfrac,void,Iar,cMat):
beta=0.05
epsilon_2=0.25
nelx=90
nely=45
penal=3
E0=1
nu=0.3
max_move=0.25
if np.mean(x0)>volfrac:
x0=x0*volfrac/np.mean(x0)
_,c1 = TO_SIMP(x0,nelx,nely,volfrac,penal,beta,epsilon_2,max_move,E0,nu,Iar,cMat,True,void,np.zeros((1,nely,nelx)),0,10)
_,c2 = TO_SIMP(x0,nelx,nely,volfrac,penal,beta,epsilon_2,max_move,E0,nu,Iar,cMat,True,void,np.zeros((1,nely,nelx)),0,0)
return c1,c2
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
nelx=90
nely=45
volfrac=0.4+0.6*0.0001
void=get_void(nely,nelx)
Iar,cMat=make_Conn_matrix(nelx,nely)
num_samples=100000
perrank=int(np.ceil(num_samples/size))
num_samples=perrank*size
C_rand_rank=np.zeros(perrank)
C_rand_opt_rank=np.zeros(perrank)
C_rand_inner_opt_rank=np.zeros(perrank)
X_rand_rank=np.zeros((perrank,nely,nelx))
for i in range(perrank):
X_rand_rank[i]=np.random.rand(nely,nelx)**1.5
X_rand_rank_inner_i=X_rand_rank[i]*0.5+0.2
C_rand_opt_rank[i],C_rand_rank[i]=evaluate(X_rand_rank[i],volfrac,void,Iar,cMat)
C_rand_inner_opt_rank[i],_=evaluate(X_rand_rank_inner_i,volfrac,void,Iar,cMat)
if rank==0:
X_rand=np.zeros((perrank*size,nely,nelx))
C_rand=np.zeros(perrank*size)
C_rand_opt=np.zeros(perrank*size)
C_rand_inner_opt=np.zeros(perrank*size)
else:
X_rand=None
C_rand=None
C_rand_opt=None
C_rand_inner_opt=None
comm.Gather(C_rand_rank,C_rand,root=0)
comm.Gather(C_rand_opt_rank,C_rand_opt,root=0)
comm.Gather(C_rand_inner_opt_rank,C_rand_inner_opt,root=0)
comm.Gather(X_rand_rank,X_rand,root=0)
if rank==0:
np.save('Sample_data/X_rand.npy',X_rand)
np.save('Sample_data/C_rand_opt.npy',C_rand_opt)
np.save('Sample_data/C_rand_inner_opt.npy',C_rand_inner_opt)
np.save('Sample_data/C_rand.npy',C_rand)
|
[
"numpy.save",
"numpy.ceil",
"SIMP.make_Conn_matrix",
"numpy.zeros",
"numpy.mean",
"numpy.array",
"numpy.random.rand"
] |
[((1080, 1108), 'SIMP.make_Conn_matrix', 'make_Conn_matrix', (['nelx', 'nely'], {}), '(nelx, nely)\n', (1096, 1108), False, 'from SIMP import TO_SIMP, make_Conn_matrix\n'), ((1206, 1223), 'numpy.zeros', 'np.zeros', (['perrank'], {}), '(perrank)\n', (1214, 1223), True, 'import numpy as np\n'), ((1240, 1257), 'numpy.zeros', 'np.zeros', (['perrank'], {}), '(perrank)\n', (1248, 1257), True, 'import numpy as np\n'), ((1280, 1297), 'numpy.zeros', 'np.zeros', (['perrank'], {}), '(perrank)\n', (1288, 1297), True, 'import numpy as np\n'), ((1310, 1341), 'numpy.zeros', 'np.zeros', (['(perrank, nely, nelx)'], {}), '((perrank, nely, nelx))\n', (1318, 1341), True, 'import numpy as np\n'), ((117, 139), 'numpy.zeros', 'np.zeros', (['(nely, nelx)'], {}), '((nely, nelx))\n', (125, 139), True, 'import numpy as np\n'), ((171, 282), 'numpy.array', 'np.array', (['[[1 / 3, 1 / 4], [2 / 3, 1 / 4], [1 / 3, 1 / 2], [2 / 3, 1 / 2], [1 / 3, 3 /\n 4], [2 / 3, 3 / 4]]'], {}), '([[1 / 3, 1 / 4], [2 / 3, 1 / 4], [1 / 3, 1 / 2], [2 / 3, 1 / 2], [\n 1 / 3, 3 / 4], [2 / 3, 3 / 4]])\n', (179, 282), True, 'import numpy as np\n'), ((1141, 1168), 'numpy.ceil', 'np.ceil', (['(num_samples / size)'], {}), '(num_samples / size)\n', (1148, 1168), True, 'import numpy as np\n'), ((1668, 1706), 'numpy.zeros', 'np.zeros', (['(perrank * size, nely, nelx)'], {}), '((perrank * size, nely, nelx))\n', (1676, 1706), True, 'import numpy as np\n'), ((1714, 1738), 'numpy.zeros', 'np.zeros', (['(perrank * size)'], {}), '(perrank * size)\n', (1722, 1738), True, 'import numpy as np\n'), ((1752, 1776), 'numpy.zeros', 'np.zeros', (['(perrank * size)'], {}), '(perrank * size)\n', (1760, 1776), True, 'import numpy as np\n'), ((1796, 1820), 'numpy.zeros', 'np.zeros', (['(perrank * size)'], {}), '(perrank * size)\n', (1804, 1820), True, 'import numpy as np\n'), ((2109, 2150), 'numpy.save', 'np.save', (['"""Sample_data/X_rand.npy"""', 'X_rand'], {}), "('Sample_data/X_rand.npy', X_rand)\n", (2116, 2150), True, 'import numpy as np\n'), ((2154, 2203), 'numpy.save', 'np.save', (['"""Sample_data/C_rand_opt.npy"""', 'C_rand_opt'], {}), "('Sample_data/C_rand_opt.npy', C_rand_opt)\n", (2161, 2203), True, 'import numpy as np\n'), ((2207, 2268), 'numpy.save', 'np.save', (['"""Sample_data/C_rand_inner_opt.npy"""', 'C_rand_inner_opt'], {}), "('Sample_data/C_rand_inner_opt.npy', C_rand_inner_opt)\n", (2214, 2268), True, 'import numpy as np\n'), ((2272, 2313), 'numpy.save', 'np.save', (['"""Sample_data/C_rand.npy"""', 'C_rand'], {}), "('Sample_data/C_rand.npy', C_rand)\n", (2279, 2313), True, 'import numpy as np\n'), ((267, 291), 'numpy.array', 'np.array', (['[[nely, nelx]]'], {}), '([[nely, nelx]])\n', (275, 291), True, 'import numpy as np\n'), ((605, 616), 'numpy.mean', 'np.mean', (['x0'], {}), '(x0)\n', (612, 616), True, 'import numpy as np\n'), ((764, 789), 'numpy.zeros', 'np.zeros', (['(1, nely, nelx)'], {}), '((1, nely, nelx))\n', (772, 789), True, 'import numpy as np\n'), ((889, 914), 'numpy.zeros', 'np.zeros', (['(1, nely, nelx)'], {}), '((1, nely, nelx))\n', (897, 914), True, 'import numpy as np\n'), ((1384, 1410), 'numpy.random.rand', 'np.random.rand', (['nely', 'nelx'], {}), '(nely, nelx)\n', (1398, 1410), True, 'import numpy as np\n'), ((648, 659), 'numpy.mean', 'np.mean', (['x0'], {}), '(x0)\n', (655, 659), True, 'import numpy as np\n'), ((395, 421), 'numpy.array', 'np.array', (['[[i + 1, j + 1]]'], {}), '([[i + 1, j + 1]])\n', (403, 421), True, 'import numpy as np\n')]
|
from typing import Tuple
import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow.keras.datasets import mnist
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import to_categorical
class MNIST:
def __init__(self, with_normalization: bool = True) -> None:
(x_train, y_train), (x_test, y_test) = mnist.load_data()
self.x_train_: np.ndarray = None
self.y_train_: np.ndarray = None
self.x_val_: np.ndarray = None
self.y_val_: np.ndarray = None
self.val_size = 0
self.train_splitted_size = 0
# Preprocess x data
self.x_train = x_train.astype(np.float32)
self.x_train = np.expand_dims(x_train, axis=-1)
if with_normalization:
self.x_train = self.x_train / 255.0
self.x_test = x_test.astype(np.float32)
self.x_test = np.expand_dims(x_test, axis=-1)
if with_normalization:
self.x_test = self.x_test / 255.0
# Dataset attributes
self.train_size = self.x_train.shape[0]
self.test_size = self.x_test.shape[0]
self.width = self.x_train.shape[1]
self.height = self.x_train.shape[2]
self.depth = self.x_train.shape[3]
self.img_shape = (self.width, self.height, self.depth)
self.num_classes = 10
# Preprocess y data
self.y_train = to_categorical(y_train, num_classes=self.num_classes)
self.y_test = to_categorical(y_test, num_classes=self.num_classes)
def get_train_set(self) -> Tuple[np.ndarray, np.ndarray]:
return self.x_train, self.y_train
def get_test_set(self) -> Tuple[np.ndarray, np.ndarray]:
return self.x_test, self.y_test
def get_splitted_train_validation_set(self, validation_size: float = 0.33) -> tuple:
self.x_train_, self.x_val_, self.y_train_, self.y_val_ = train_test_split(
self.x_train,
self.y_train,
test_size=validation_size
)
self.val_size = self.x_val_.shape[0]
self.train_splitted_size = self.x_train_.shape[0]
return self.x_train_, self.x_val_, self.y_train_, self.y_val_
def data_augmentation(self, augment_size: int = 5_000) -> None:
image_generator = ImageDataGenerator(
rotation_range=5,
zoom_range=0.08,
width_shift_range=0.08,
height_shift_range=0.08
)
# Fit the data generator
image_generator.fit(self.x_train, augment=True)
# Get random train images for the data augmentation
rand_idxs = np.random.randint(self.train_size, size=augment_size)
x_augmented = self.x_train[rand_idxs].copy()
y_augmented = self.y_train[rand_idxs].copy()
x_augmented = image_generator.flow(
x_augmented,
np.zeros(augment_size),
batch_size=augment_size,
shuffle=False
).next()[0]
# Append the augmented images to the train set
self.x_train = np.concatenate((self.x_train, x_augmented))
self.y_train = np.concatenate((self.y_train, y_augmented))
self.train_size = self.x_train.shape[0]
|
[
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.keras.utils.to_categorical",
"sklearn.model_selection.train_test_split",
"numpy.zeros",
"numpy.expand_dims",
"tensorflow.keras.datasets.mnist.load_data",
"numpy.random.randint",
"numpy.concatenate"
] |
[((398, 415), 'tensorflow.keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (413, 415), False, 'from tensorflow.keras.datasets import mnist\n'), ((749, 781), 'numpy.expand_dims', 'np.expand_dims', (['x_train'], {'axis': '(-1)'}), '(x_train, axis=-1)\n', (763, 781), True, 'import numpy as np\n'), ((935, 966), 'numpy.expand_dims', 'np.expand_dims', (['x_test'], {'axis': '(-1)'}), '(x_test, axis=-1)\n', (949, 966), True, 'import numpy as np\n'), ((1453, 1506), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['y_train'], {'num_classes': 'self.num_classes'}), '(y_train, num_classes=self.num_classes)\n', (1467, 1506), False, 'from tensorflow.keras.utils import to_categorical\n'), ((1530, 1582), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['y_test'], {'num_classes': 'self.num_classes'}), '(y_test, num_classes=self.num_classes)\n', (1544, 1582), False, 'from tensorflow.keras.utils import to_categorical\n'), ((1954, 2025), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.x_train', 'self.y_train'], {'test_size': 'validation_size'}), '(self.x_train, self.y_train, test_size=validation_size)\n', (1970, 2025), False, 'from sklearn.model_selection import train_test_split\n'), ((2350, 2457), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rotation_range': '(5)', 'zoom_range': '(0.08)', 'width_shift_range': '(0.08)', 'height_shift_range': '(0.08)'}), '(rotation_range=5, zoom_range=0.08, width_shift_range=\n 0.08, height_shift_range=0.08)\n', (2368, 2457), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((2689, 2742), 'numpy.random.randint', 'np.random.randint', (['self.train_size'], {'size': 'augment_size'}), '(self.train_size, size=augment_size)\n', (2706, 2742), True, 'import numpy as np\n'), ((3125, 3168), 'numpy.concatenate', 'np.concatenate', (['(self.x_train, x_augmented)'], {}), '((self.x_train, x_augmented))\n', (3139, 3168), True, 'import numpy as np\n'), ((3193, 3236), 'numpy.concatenate', 'np.concatenate', (['(self.y_train, y_augmented)'], {}), '((self.y_train, y_augmented))\n', (3207, 3236), True, 'import numpy as np\n'), ((2935, 2957), 'numpy.zeros', 'np.zeros', (['augment_size'], {}), '(augment_size)\n', (2943, 2957), True, 'import numpy as np\n')]
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/datasets/datasets.retailrocket.ipynb (unless otherwise specified).
__all__ = ['RetailRocketDataset', 'RetailRocketDatasetv2']
# Cell
from typing import List, Optional, Callable, Union, Any, Tuple
import os
import os.path as osp
from collections.abc import Sequence
import sys
import numpy as np
import pandas as pd
from datetime import timezone, datetime, timedelta
import time
from ..utils.common_utils import download_url, extract_zip, makedirs
from .bases.common import Dataset
from .bases.session_graph import SessionGraphDataset
# Cell
class RetailRocketDataset(SessionGraphDataset):
train_url = "https://github.com/RecoHut-Datasets/retail_rocket/raw/v1/train.txt"
test_url = "https://github.com/RecoHut-Datasets/retail_rocket/raw/v1/test.txt"
all_train_seq_url = "https://github.com/RecoHut-Datasets/retail_rocket/raw/v1/all_train_seq.txt"
def __init__(self, root, shuffle=False, n_node=40727, is_train=True):
self.n_node = n_node
self.shuffle = shuffle
self.is_train = is_train
super().__init__(root, shuffle, n_node)
@property
def raw_file_names(self) -> str:
if self.is_train:
return ['train.txt', 'all_train_seq.txt']
return ['test.txt', 'all_train_seq.txt']
def download(self):
download_url(self.all_train_seq_url, self.raw_dir)
if self.is_train:
download_url(self.train_url, self.raw_dir)
else:
download_url(self.test_url, self.raw_dir)
# Internal Cell
def to_list(value: Any) -> Sequence:
if isinstance(value, Sequence) and not isinstance(value, str):
return value
else:
return [value]
def files_exist(files: List[str]) -> bool:
# NOTE: We return `False` in case `files` is empty, leading to a
# re-processing of files on every instantiation.
return len(files) != 0 and all([osp.exists(f) for f in files])
# Cell
class RetailRocketDatasetv2(Dataset):
r"""Load and process RetailRocket dataset.
Args:
root (string): Root directory where the dataset should be saved.
process_method (string):
last: last day => test set
last_min_date: last day => test set, but from a minimal date onwards
days_test: last N days => test set
slice: create multiple train-test-combinations with a sliding window approach
min_date (string, optional): Minimum date
session_length (int, optional): Session time length :default = 30 * 60 #30 minutes
min_session_length (int, optional): Minimum number of items for a session to be valid
min_item_support (int, optional): Minimum number of interactions for an item to be valid
num_slices (int, optional): Offset in days from the first date in the data set
days_offset (int, optional): Number of days the training start date is shifted after creating one slice
days_shift (int, optional): Days shift
days_train (int, optional): Days in train set in each slice
days_test (int, optional): Days in test set in each slice
"""
url = 'https://github.com/RecoHut-Datasets/retail_rocket/raw/v2/retailrocket.zip'
def __init__(self, root, process_method, min_date='2015-09-02',
session_length=30*60, min_session_length=2, min_item_support=5,
num_slices=5, days_offset=0, days_shift=27, days_train=25, days_test=2):
super().__init__(root)
self.process_method = process_method
self.min_date = min_date
self.session_length = session_length
self.min_session_length = min_session_length
self.min_item_support = min_item_support
self.num_slices = num_slices
self.days_offset = days_offset
self.days_shift = days_shift
self.days_train = days_train
self.days_test = days_test
self.data = None
self.cart = None
self._process()
@property
def raw_file_names(self) -> str:
return 'events.csv'
@property
def processed_file_names(self) -> str:
return 'data.pkl'
def download(self):
path = download_url(self.url, self.raw_dir)
extract_zip(path, self.raw_dir)
from shutil import move, rmtree
move(osp.join(self.raw_dir, 'retailrocket', 'events.csv'),
osp.join(self.raw_dir, 'events.csv'))
rmtree(osp.join(self.raw_dir, 'retailrocket'))
os.unlink(path)
def load(self):
#load csv
data = pd.read_csv(osp.join(self.raw_dir,self.raw_file_names), sep=',',
header=0, usecols=[0,1,2,3],
dtype={0:np.int64, 1:np.int32, 2:str, 3:np.int32})
#specify header names
data.columns = ['Time','UserId','Type','ItemId']
data['Time'] = (data.Time / 1000).astype(int)
data.sort_values(['UserId','Time'], ascending=True, inplace=True)
#sessionize
data['TimeTmp'] = pd.to_datetime(data.Time, unit='s')
data.sort_values(['UserId','TimeTmp'], ascending=True, inplace=True)
data['TimeShift'] = data['TimeTmp'].shift(1)
data['TimeDiff'] = (data['TimeTmp'] - data['TimeShift']).dt.total_seconds().abs()
data['SessionIdTmp'] = (data['TimeDiff'] > self.session_length).astype(int)
data['SessionId'] = data['SessionIdTmp'].cumsum( skipna=False)
del data['SessionIdTmp'], data['TimeShift'], data['TimeDiff']
data.sort_values(['SessionId','Time'], ascending=True, inplace=True)
cart = data[data.Type == 'addtocart']
data = data[data.Type == 'view']
del data['Type']
# output
print(data.Time.min())
print(data.Time.max())
data_start = datetime.fromtimestamp( data.Time.min(), timezone.utc)
data_end = datetime.fromtimestamp( data.Time.max(), timezone.utc)
del data['TimeTmp']
print('Loaded data set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}\n\n'.
format(len(data), data.SessionId.nunique(), data.ItemId.nunique(), data_start.date().isoformat(), data_end.date().isoformat()))
self.data = data
self.cart = cart
def filter_data(self):
data = self.data
#filter session length
session_lengths = data.groupby('SessionId').size()
data = data[np.in1d(data.SessionId, session_lengths[session_lengths>1].index)]
#filter item support
item_supports = data.groupby('ItemId').size()
data = data[np.in1d(data.ItemId, item_supports[item_supports>= self.min_item_support].index)]
#filter session length
session_lengths = data.groupby('SessionId').size()
data = data[np.in1d(data.SessionId, session_lengths[session_lengths>= self.min_session_length].index)]
#output
data_start = datetime.fromtimestamp(data.Time.min(), timezone.utc)
data_end = datetime.fromtimestamp(data.Time.max(), timezone.utc)
print('Filtered data set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}\n\n'.
format(len(data), data.SessionId.nunique(), data.ItemId.nunique(), data_start.date().isoformat(), data_end.date().isoformat()))
self.data = data
def filter_min_date(self):
data = self.data
min_datetime = datetime.strptime(self.min_date + ' 00:00:00', '%Y-%m-%d %H:%M:%S')
#filter
session_max_times = data.groupby('SessionId').Time.max()
session_keep = session_max_times[session_max_times > min_datetime.timestamp()].index
data = data[np.in1d(data.SessionId, session_keep)]
#output
data_start = datetime.fromtimestamp(data.Time.min(), timezone.utc)
data_end = datetime.fromtimestamp(data.Time.max(), timezone.utc)
print('Filtered data set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}\n\n'.
format(len(data), data.SessionId.nunique(), data.ItemId.nunique(), data_start.date().isoformat(), data_end.date().isoformat()))
self.data = data
def split_data_org(self):
data = self.data
tmax = data.Time.max()
session_max_times = data.groupby('SessionId').Time.max()
session_train = session_max_times[session_max_times < tmax-86400].index
session_test = session_max_times[session_max_times >= tmax-86400].index
train = data[np.in1d(data.SessionId, session_train)]
test = data[np.in1d(data.SessionId, session_test)]
test = test[np.in1d(test.ItemId, train.ItemId)]
tslength = test.groupby('SessionId').size()
test = test[np.in1d(test.SessionId, tslength[tslength>=2].index)]
print('Full train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(train), train.SessionId.nunique(), train.ItemId.nunique()))
train.to_csv(osp.join(self.processed_dir,'events_train_full.txt'), sep='\t', index=False)
print('Test set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(test), test.SessionId.nunique(), test.ItemId.nunique()))
test.to_csv(osp.join(self.processed_dir,'events_test.txt'), sep='\t', index=False)
tmax = train.Time.max()
session_max_times = train.groupby('SessionId').Time.max()
session_train = session_max_times[session_max_times < tmax-86400].index
session_valid = session_max_times[session_max_times >= tmax-86400].index
train_tr = train[np.in1d(train.SessionId, session_train)]
valid = train[np.in1d(train.SessionId, session_valid)]
valid = valid[np.in1d(valid.ItemId, train_tr.ItemId)]
tslength = valid.groupby('SessionId').size()
valid = valid[np.in1d(valid.SessionId, tslength[tslength>=2].index)]
print('Train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(train_tr), train_tr.SessionId.nunique(), train_tr.ItemId.nunique()))
train_tr.to_csv(osp.join(self.processed_dir,'events_train_tr.txt'), sep='\t', index=False)
print('Validation set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(valid), valid.SessionId.nunique(), valid.ItemId.nunique()))
valid.to_csv(osp.join(self.processed_dir,'events_train_valid.txt'), sep='\t', index=False)
def split_data(self):
data = self.data
data_end = datetime.fromtimestamp(data.Time.max(), timezone.utc)
test_from = data_end - timedelta(self.days_test)
session_max_times = data.groupby('SessionId').Time.max()
session_train = session_max_times[session_max_times < test_from.timestamp()].index
session_test = session_max_times[session_max_times >= test_from.timestamp()].index
train = data[np.in1d(data.SessionId, session_train)]
test = data[np.in1d(data.SessionId, session_test)]
test = test[np.in1d(test.ItemId, train.ItemId)]
tslength = test.groupby('SessionId').size()
test = test[np.in1d(test.SessionId, tslength[tslength>=2].index)]
print('Full train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(train), train.SessionId.nunique(), train.ItemId.nunique()))
train.to_csv(osp.join(self.processed_dir,'events_train_full.txt'), sep='\t', index=False)
print('Test set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(test), test.SessionId.nunique(), test.ItemId.nunique()))
test.to_csv(osp.join(self.processed_dir,'events_test.txt'), sep='\t', index=False)
def slice_data(self):
for slice_id in range(0, self.num_slices):
self.split_data_slice(slice_id, self.days_offset+(slice_id*self.days_shift))
def split_data_slice(self, slice_id, days_offset):
data = self.data
data_start = datetime.fromtimestamp(data.Time.min(), timezone.utc)
data_end = datetime.fromtimestamp(data.Time.max(), timezone.utc)
print('Full data set {}\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}'.
format(slice_id, len(data), data.SessionId.nunique(), data.ItemId.nunique(), data_start.isoformat(), data_end.isoformat()))
start = datetime.fromtimestamp(data.Time.min(), timezone.utc ) + timedelta(days_offset)
middle = start + timedelta(self.days_train)
end = middle + timedelta(self.days_test)
#prefilter the timespan
session_max_times = data.groupby('SessionId').Time.max()
greater_start = session_max_times[session_max_times >= start.timestamp()].index
lower_end = session_max_times[session_max_times <= end.timestamp()].index
data_filtered = data[np.in1d(data.SessionId, greater_start.intersection(lower_end))]
print('Slice data set {}\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {} / {}'.
format( slice_id, len(data_filtered), data_filtered.SessionId.nunique(), data_filtered.ItemId.nunique(), start.date().isoformat(), middle.date().isoformat(), end.date().isoformat() ) )
#split to train and test
session_max_times = data_filtered.groupby('SessionId').Time.max()
sessions_train = session_max_times[session_max_times < middle.timestamp()].index
sessions_test = session_max_times[session_max_times >= middle.timestamp()].index
train = data[np.in1d(data.SessionId, sessions_train)]
print('Train set {}\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}'.
format( slice_id, len(train), train.SessionId.nunique(), train.ItemId.nunique(), start.date().isoformat(), middle.date().isoformat() ) )
train.to_csv(osp.join(self.processed_dir,'events_train_full.'+str(slice_id)+'.txt'), sep='\t', index=False)
test = data[np.in1d(data.SessionId, sessions_test)]
test = test[np.in1d(test.ItemId, train.ItemId)]
tslength = test.groupby('SessionId').size()
test = test[np.in1d(test.SessionId, tslength[tslength>=2].index)]
print('Test set {}\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {} \n\n'.
format( slice_id, len(test), test.SessionId.nunique(), test.ItemId.nunique(), middle.date().isoformat(), end.date().isoformat() ) )
test.to_csv(osp.join(self.processed_dir,'events_test.'+str(slice_id)+'.txt'), sep='\t', index=False)
def store_buys(self):
self.cart.to_csv(osp.join(self.processed_dir,'events_buys.txt'), sep='\t', index=False)
def process(self):
self.load()
self.filter_data()
if self.process_method == 'last':
self.split_data_org()
elif self.process_method == 'last_min_date':
self.filter_min_date()
self.split_data_org()
elif self.process_method == 'days_test':
self.split_data()
elif self.process_method == 'slice':
self.slice_data()
self.store_buys()
|
[
"os.unlink",
"os.path.exists",
"datetime.datetime.strptime",
"pandas.to_datetime",
"datetime.timedelta",
"os.path.join",
"numpy.in1d"
] |
[((4482, 4497), 'os.unlink', 'os.unlink', (['path'], {}), '(path)\n', (4491, 4497), False, 'import os\n'), ((5013, 5048), 'pandas.to_datetime', 'pd.to_datetime', (['data.Time'], {'unit': '"""s"""'}), "(data.Time, unit='s')\n", (5027, 5048), True, 'import pandas as pd\n'), ((7368, 7435), 'datetime.datetime.strptime', 'datetime.strptime', (["(self.min_date + ' 00:00:00')", '"""%Y-%m-%d %H:%M:%S"""'], {}), "(self.min_date + ' 00:00:00', '%Y-%m-%d %H:%M:%S')\n", (7385, 7435), False, 'from datetime import timezone, datetime, timedelta\n'), ((4314, 4366), 'os.path.join', 'osp.join', (['self.raw_dir', '"""retailrocket"""', '"""events.csv"""'], {}), "(self.raw_dir, 'retailrocket', 'events.csv')\n", (4322, 4366), True, 'import os.path as osp\n'), ((4381, 4417), 'os.path.join', 'osp.join', (['self.raw_dir', '"""events.csv"""'], {}), "(self.raw_dir, 'events.csv')\n", (4389, 4417), True, 'import os.path as osp\n'), ((4434, 4472), 'os.path.join', 'osp.join', (['self.raw_dir', '"""retailrocket"""'], {}), "(self.raw_dir, 'retailrocket')\n", (4442, 4472), True, 'import os.path as osp\n'), ((4564, 4607), 'os.path.join', 'osp.join', (['self.raw_dir', 'self.raw_file_names'], {}), '(self.raw_dir, self.raw_file_names)\n', (4572, 4607), True, 'import os.path as osp\n'), ((6399, 6466), 'numpy.in1d', 'np.in1d', (['data.SessionId', 'session_lengths[session_lengths > 1].index'], {}), '(data.SessionId, session_lengths[session_lengths > 1].index)\n', (6406, 6466), True, 'import numpy as np\n'), ((6570, 6656), 'numpy.in1d', 'np.in1d', (['data.ItemId', 'item_supports[item_supports >= self.min_item_support].index'], {}), '(data.ItemId, item_supports[item_supports >= self.min_item_support].\n index)\n', (6577, 6656), True, 'import numpy as np\n'), ((6763, 6858), 'numpy.in1d', 'np.in1d', (['data.SessionId', 'session_lengths[session_lengths >= self.min_session_length].index'], {}), '(data.SessionId, session_lengths[session_lengths >= self.\n min_session_length].index)\n', (6770, 6858), True, 'import numpy as np\n'), ((7632, 7669), 'numpy.in1d', 'np.in1d', (['data.SessionId', 'session_keep'], {}), '(data.SessionId, session_keep)\n', (7639, 7669), True, 'import numpy as np\n'), ((8437, 8475), 'numpy.in1d', 'np.in1d', (['data.SessionId', 'session_train'], {}), '(data.SessionId, session_train)\n', (8444, 8475), True, 'import numpy as np\n'), ((8497, 8534), 'numpy.in1d', 'np.in1d', (['data.SessionId', 'session_test'], {}), '(data.SessionId, session_test)\n', (8504, 8534), True, 'import numpy as np\n'), ((8556, 8590), 'numpy.in1d', 'np.in1d', (['test.ItemId', 'train.ItemId'], {}), '(test.ItemId, train.ItemId)\n', (8563, 8590), True, 'import numpy as np\n'), ((8664, 8718), 'numpy.in1d', 'np.in1d', (['test.SessionId', 'tslength[tslength >= 2].index'], {}), '(test.SessionId, tslength[tslength >= 2].index)\n', (8671, 8718), True, 'import numpy as np\n'), ((8884, 8937), 'os.path.join', 'osp.join', (['self.processed_dir', '"""events_train_full.txt"""'], {}), "(self.processed_dir, 'events_train_full.txt')\n", (8892, 8937), True, 'import os.path as osp\n'), ((9117, 9164), 'os.path.join', 'osp.join', (['self.processed_dir', '"""events_test.txt"""'], {}), "(self.processed_dir, 'events_test.txt')\n", (9125, 9164), True, 'import os.path as osp\n'), ((9473, 9512), 'numpy.in1d', 'np.in1d', (['train.SessionId', 'session_train'], {}), '(train.SessionId, session_train)\n', (9480, 9512), True, 'import numpy as np\n'), ((9536, 9575), 'numpy.in1d', 'np.in1d', (['train.SessionId', 'session_valid'], {}), '(train.SessionId, session_valid)\n', (9543, 9575), True, 'import numpy as np\n'), ((9599, 9637), 'numpy.in1d', 'np.in1d', (['valid.ItemId', 'train_tr.ItemId'], {}), '(valid.ItemId, train_tr.ItemId)\n', (9606, 9637), True, 'import numpy as np\n'), ((9714, 9769), 'numpy.in1d', 'np.in1d', (['valid.SessionId', 'tslength[tslength >= 2].index'], {}), '(valid.SessionId, tslength[tslength >= 2].index)\n', (9721, 9769), True, 'import numpy as np\n'), ((9942, 9993), 'os.path.join', 'osp.join', (['self.processed_dir', '"""events_train_tr.txt"""'], {}), "(self.processed_dir, 'events_train_tr.txt')\n", (9950, 9993), True, 'import os.path as osp\n'), ((10183, 10237), 'os.path.join', 'osp.join', (['self.processed_dir', '"""events_train_valid.txt"""'], {}), "(self.processed_dir, 'events_train_valid.txt')\n", (10191, 10237), True, 'import os.path as osp\n'), ((10417, 10442), 'datetime.timedelta', 'timedelta', (['self.days_test'], {}), '(self.days_test)\n', (10426, 10442), False, 'from datetime import timezone, datetime, timedelta\n'), ((10712, 10750), 'numpy.in1d', 'np.in1d', (['data.SessionId', 'session_train'], {}), '(data.SessionId, session_train)\n', (10719, 10750), True, 'import numpy as np\n'), ((10772, 10809), 'numpy.in1d', 'np.in1d', (['data.SessionId', 'session_test'], {}), '(data.SessionId, session_test)\n', (10779, 10809), True, 'import numpy as np\n'), ((10831, 10865), 'numpy.in1d', 'np.in1d', (['test.ItemId', 'train.ItemId'], {}), '(test.ItemId, train.ItemId)\n', (10838, 10865), True, 'import numpy as np\n'), ((10939, 10993), 'numpy.in1d', 'np.in1d', (['test.SessionId', 'tslength[tslength >= 2].index'], {}), '(test.SessionId, tslength[tslength >= 2].index)\n', (10946, 10993), True, 'import numpy as np\n'), ((11159, 11212), 'os.path.join', 'osp.join', (['self.processed_dir', '"""events_train_full.txt"""'], {}), "(self.processed_dir, 'events_train_full.txt')\n", (11167, 11212), True, 'import os.path as osp\n'), ((11392, 11439), 'os.path.join', 'osp.join', (['self.processed_dir', '"""events_test.txt"""'], {}), "(self.processed_dir, 'events_test.txt')\n", (11400, 11439), True, 'import os.path as osp\n'), ((12164, 12186), 'datetime.timedelta', 'timedelta', (['days_offset'], {}), '(days_offset)\n', (12173, 12186), False, 'from datetime import timezone, datetime, timedelta\n'), ((12213, 12239), 'datetime.timedelta', 'timedelta', (['self.days_train'], {}), '(self.days_train)\n', (12222, 12239), False, 'from datetime import timezone, datetime, timedelta\n'), ((12264, 12289), 'datetime.timedelta', 'timedelta', (['self.days_test'], {}), '(self.days_test)\n', (12273, 12289), False, 'from datetime import timezone, datetime, timedelta\n'), ((13257, 13296), 'numpy.in1d', 'np.in1d', (['data.SessionId', 'sessions_train'], {}), '(data.SessionId, sessions_train)\n', (13264, 13296), True, 'import numpy as np\n'), ((13676, 13714), 'numpy.in1d', 'np.in1d', (['data.SessionId', 'sessions_test'], {}), '(data.SessionId, sessions_test)\n', (13683, 13714), True, 'import numpy as np\n'), ((13736, 13770), 'numpy.in1d', 'np.in1d', (['test.ItemId', 'train.ItemId'], {}), '(test.ItemId, train.ItemId)\n', (13743, 13770), True, 'import numpy as np\n'), ((13845, 13899), 'numpy.in1d', 'np.in1d', (['test.SessionId', 'tslength[tslength >= 2].index'], {}), '(test.SessionId, tslength[tslength >= 2].index)\n', (13852, 13899), True, 'import numpy as np\n'), ((14300, 14347), 'os.path.join', 'osp.join', (['self.processed_dir', '"""events_buys.txt"""'], {}), "(self.processed_dir, 'events_buys.txt')\n", (14308, 14347), True, 'import os.path as osp\n'), ((1919, 1932), 'os.path.exists', 'osp.exists', (['f'], {}), '(f)\n', (1929, 1932), True, 'import os.path as osp\n')]
|
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu" at 17:40, 06/11/2021 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Nguyen_Thieu2 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
from mealpy.bio_based import SMA
import numpy as np
def obj_function(solution):
def booth(x, y):
return (x + 2*y - 7)**2 + (2*x + y - 5)**2
def bukin(x, y):
return 100 * np.sqrt(np.abs(y - 0.01 * x**2)) + 0.01 * np.abs(x + 10)
def matyas(x, y):
return 0.26 * (x**2 + y**2) - 0.48 * x * y
return [booth(solution[0], solution[1]), bukin(solution[0], solution[1]), matyas(solution[0], solution[1])]
problem_dict1 = {
"obj_func": obj_function,
"lb": [-10, -10],
"ub": [10, 10],
"minmax": "min",
"verbose": True,
"obj_weight": [0.4, 0.1, 0.5] # Define it or default value will be [1, 1, 1]
}
## Run the algorithm
model1 = SMA.BaseSMA(problem_dict1, epoch=100, pop_size=50, pr=0.03)
model1.solve()
## You can access them all via object "history" like this:
model1.history.save_global_objectives_chart(filename="hello/goc")
model1.history.save_local_objectives_chart(filename="hello/loc")
model1.history.save_global_best_fitness_chart(filename="hello/gbfc")
model1.history.save_local_best_fitness_chart(filename="hello/lbfc")
model1.history.save_runtime_chart(filename="hello/rtc")
model1.history.save_exploration_exploitation_chart(filename="hello/eec")
model1.history.save_diversity_chart(filename="hello/dc")
model1.history.save_trajectory_chart(list_agent_idx=[3, 5], list_dimensions=[2], filename="hello/tc")
|
[
"numpy.abs",
"mealpy.bio_based.SMA.BaseSMA"
] |
[((1451, 1510), 'mealpy.bio_based.SMA.BaseSMA', 'SMA.BaseSMA', (['problem_dict1'], {'epoch': '(100)', 'pop_size': '(50)', 'pr': '(0.03)'}), '(problem_dict1, epoch=100, pop_size=50, pr=0.03)\n', (1462, 1510), False, 'from mealpy.bio_based import SMA\n'), ((987, 1001), 'numpy.abs', 'np.abs', (['(x + 10)'], {}), '(x + 10)\n', (993, 1001), True, 'import numpy as np\n'), ((953, 978), 'numpy.abs', 'np.abs', (['(y - 0.01 * x ** 2)'], {}), '(y - 0.01 * x ** 2)\n', (959, 978), True, 'import numpy as np\n')]
|
from keras.utils import multi_gpu_model
import numpy as np
import tensorflow as tf
import pickle
from keras.models import Model, Input
from keras.optimizers import Adam, RMSprop
from keras.layers import Dense
from keras.layers import Conv2D, Conv2DTranspose
from keras.layers import Flatten, Add
from keras.layers import Concatenate, Activation
from keras.layers import LeakyReLU, BatchNormalization, Lambda
from keras import backend as K
import os
def accw(y_true, y_pred):
y_pred=K.clip(y_pred, -1, 1)
return K.mean(K.equal(y_true, K.round(y_pred)))
def mssim(y_true, y_pred):
costs = 1.0 - tf.reduce_mean(tf.image.ssim(y_true, y_pred, 2.0))
return costs
def wloss(y_true,y_predict):
return -K.mean(y_true*y_predict)
def discriminator(inp_shape = (256,256,1), trainable = True):
gamma_init = tf.random_normal_initializer(1., 0.02)
inp = Input(shape = (256,256,1))
l0 = Conv2D(64, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(inp) #b_init is set to none, maybe they are not using bias here, but I am.
l0 = LeakyReLU(alpha=0.2)(l0)
l1 = Conv2D(64*2, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l0)
l1 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l1)
l1 = LeakyReLU(alpha=0.2)(l1)
l2 = Conv2D(64*4, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l1)
l2 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l2)
l2 = LeakyReLU(alpha=0.2)(l2)
l3 = Conv2D(64*8, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l2)
l3 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l3)
l3 = LeakyReLU(alpha=0.2)(l3)
l4 = Conv2D(64*16, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l3)
l4 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l4)
l4 = LeakyReLU(alpha=0.2)(l4)
l5 = Conv2D(64*32, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l4)
l5 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l5)
l5 = LeakyReLU(alpha=0.2)(l5)
l6 = Conv2D(64*16, (1,1), strides = (1,1), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l5)
l6 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l6)
l6 = LeakyReLU(alpha=0.2)(l6)
l7 = Conv2D(64*8, (1,1), strides = (1,1), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l6)
l7 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l7)
l7 = LeakyReLU(alpha=0.2)(l7)
#x
l8 = Conv2D(64*2, (1,1), strides = (1,1), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l7)
l8 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l8)
l8 = LeakyReLU(alpha=0.2)(l8)
l9 = Conv2D(64*2, (3,3), strides = (1,1), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l8)
l9 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l9)
l9 = LeakyReLU(alpha=0.2)(l9)
l10 = Conv2D(64*8, (3,3), strides = (1,1), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l9)
l10 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l10)
l10 = LeakyReLU(alpha=0.2)(l10)
#y
l11 = Add()([l7,l10])
l11 = LeakyReLU(alpha = 0.2)(l11)
out=Conv2D(filters=1,kernel_size=3,strides=1,padding='same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l11)
model = Model(inputs = inp, outputs = out)
return model
def resden(x,fil,gr,beta,gamma_init,trainable):
x1=Conv2D(filters=gr,kernel_size=3,strides=1,padding='same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(x)
x1=BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(x1)
x1=LeakyReLU(alpha=0.2)(x1)
x1=Concatenate(axis=-1)([x,x1])
x2=Conv2D(filters=gr,kernel_size=3,strides=1,padding='same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(x1)
x2=BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(x2)
x2=LeakyReLU(alpha=0.2)(x2)
x2=Concatenate(axis=-1)([x1,x2])
x3=Conv2D(filters=gr,kernel_size=3,strides=1,padding='same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(x2)
x3=BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(x3)
x3=LeakyReLU(alpha=0.2)(x3)
x3=Concatenate(axis=-1)([x2,x3])
x4=Conv2D(filters=gr,kernel_size=3,strides=1,padding='same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(x3)
x4=BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(x4)
x4=LeakyReLU(alpha=0.2)(x4)
x4=Concatenate(axis=-1)([x3,x4])
x5=Conv2D(filters=fil,kernel_size=3,strides=1,padding='same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(x4)
x5=Lambda(lambda x:x*beta)(x5)
xout=Add()([x5,x])
return xout
def resresden(x,fil,gr,betad,betar,gamma_init,trainable):
x1=resden(x,fil,gr,betad,gamma_init,trainable)
x2=resden(x1,fil,gr,betad,gamma_init,trainable)
x3=resden(x2,fil,gr,betad,gamma_init,trainable)
x3=Lambda(lambda x:x*betar)(x3)
xout=Add()([x3,x])
return xout
def generator(inp_shape, trainable = True):
gamma_init = tf.random_normal_initializer(1., 0.02)
fd=512
gr=32
nb=12
betad=0.2
betar=0.2
inp_real_imag = Input(inp_shape)
lay_128dn = Conv2D(64, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(inp_real_imag)
lay_128dn = LeakyReLU(alpha = 0.2)(lay_128dn)
lay_64dn = Conv2D(128, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_128dn)
lay_64dn = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_64dn)
lay_64dn = LeakyReLU(alpha = 0.2)(lay_64dn)
lay_32dn = Conv2D(256, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_64dn)
lay_32dn = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_32dn)
lay_32dn = LeakyReLU(alpha=0.2)(lay_32dn)
lay_16dn = Conv2D(512, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_32dn)
lay_16dn = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_16dn)
lay_16dn = LeakyReLU(alpha=0.2)(lay_16dn) #16x16
lay_8dn = Conv2D(512, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_16dn)
lay_8dn = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_8dn)
lay_8dn = LeakyReLU(alpha=0.2)(lay_8dn) #8x8
xc1=Conv2D(filters=fd,kernel_size=3,strides=1,padding='same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_8dn) #8x8
xrrd=xc1
for m in range(nb):
xrrd=resresden(xrrd,fd,gr,betad,betar,gamma_init,trainable)
xc2=Conv2D(filters=fd,kernel_size=3,strides=1,padding='same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(xrrd)
lay_8upc=Add()([xc1,xc2])
lay_16up = Conv2DTranspose(1024, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_8upc)
lay_16up = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_16up)
lay_16up = Activation('relu')(lay_16up) #16x16
lay_16upc = Concatenate(axis = -1)([lay_16up,lay_16dn])
lay_32up = Conv2DTranspose(256, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_16upc)
lay_32up = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_32up)
lay_32up = Activation('relu')(lay_32up) #32x32
lay_32upc = Concatenate(axis = -1)([lay_32up,lay_32dn])
lay_64up = Conv2DTranspose(128, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_32upc)
lay_64up = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_64up)
lay_64up = Activation('relu')(lay_64up) #64x64
lay_64upc = Concatenate(axis = -1)([lay_64up,lay_64dn])
lay_128up = Conv2DTranspose(64, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_64upc)
lay_128up = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_128up)
lay_128up = Activation('relu')(lay_128up) #128x128
lay_128upc = Concatenate(axis = -1)([lay_128up,lay_128dn])
lay_256up = Conv2DTranspose(64, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_128upc)
lay_256up = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_256up)
lay_256up = Activation('relu')(lay_256up) #256x256
out = Conv2D(1, (1,1), strides = (1,1), activation = 'tanh', padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_256up)
model = Model(inputs = inp_real_imag, outputs = out)
return model
def define_gan_model(gen_model, dis_model, inp_shape):
dis_model.trainable = False
inp = Input(shape = inp_shape)
out_g = gen_model(inp)
out_dis = dis_model(out_g)
out_g1 = out_g
model = Model(inputs = inp, outputs = [out_dis, out_g, out_g1])
model.summary()
return model
def train(g_par, d_par, gan_model, dataset_real, u_sampled_data, n_epochs, n_batch, n_critic, clip_val, n_patch, f):
bat_per_epo = int(dataset_real.shape[0]/n_batch)
half_batch = int(n_batch/2)
for i in range(n_epochs):
for j in range(bat_per_epo):
# training the discriminator
for k in range(n_critic):
ix = np.random.randint(0, dataset_real.shape[0], half_batch)
X_real = dataset_real[ix]
y_real = np.ones((half_batch,n_patch,n_patch,1))
ix_1 = np.random.randint(0, u_sampled_data.shape[0], half_batch)
X_fake = g_par.predict(u_sampled_data[ix_1])
y_fake = -np.ones((half_batch,n_patch,n_patch,1))
X, y = np.vstack((X_real, X_fake)), np.vstack((y_real,y_fake))
d_loss, accuracy = d_par.train_on_batch(X,y)
for l in d_par.layers:
weights=l.get_weights()
weights=[np.clip(w, -clip_val,clip_val) for w in weights]
l.set_weights(weights)
# training the generator
ix = np.random.randint(0, dataset_real.shape[0], n_batch)
X_r = dataset_real[ix]
X_gen_inp = u_sampled_data[ix]
y_gan = np.ones((n_batch,n_patch,n_patch,1))
g_loss = gan_model.train_on_batch ([X_gen_inp], [y_gan, X_r, X_r])
f.write('>%d, %d/%d, d=%.3f, acc = %.3f, w=%.3f, mae=%.3f, mssim=%.3f, g=%.3f' %(i+1, j+1, bat_per_epo, d_loss, accuracy, g_loss[1], g_loss[2], g_loss[3], g_loss[0]))
f.write('\n')
print ('>%d, %d/%d, d=%.3f, acc = %.3f, g=%.3f' %(i+1, j+1, bat_per_epo, d_loss, accuracy, g_loss[0]))
filename = '/home/cs-mri-gan/gen_weights_a5_%04d.h5' % (i+1)
g_save = g_par.get_layer('model_3')
g_save.save_weights(filename)
f.close()
#hyperparameters
n_epochs = 300
n_batch = 32
n_critic = 3
clip_val = 0.05
in_shape_gen = (256,256,2)
in_shape_dis = (256,256,1)
accel = 3
d_model = discriminator (inp_shape = in_shape_dis, trainable = True)
d_model.summary()
d_par = multi_gpu_model(d_model, gpus=4, cpu_relocation = True) #for multi-gpu training
opt = Adam(lr = 0.0002, beta_1 = 0.5)
d_par.compile(loss = wloss, optimizer = opt, metrics = [accw])
g_model = generator(inp_shape = in_shape_gen , trainable = True)
g_par = multi_gpu_model(g_model, gpus=4, cpu_relocation = True) #for multi-gpu training
g_par.summary()
gan_model = define_gan_model(g_par, d_par, in_shape_gen)
opt1 = Adam(lr = 0.0001, beta_1 = 0.5)
gan_model.compile(loss = [wloss, 'mae', mssim], optimizer = opt1, loss_weights = [0.01, 20.0, 1.0]) #loss weights for generator training
n_patch=d_model.output_shape[1]
data_path='/home/cs-mri-gan/training_gt_aug.pickle' #Ground truth
usam_path='/home/cs-mri-gan/training_usamp_1dg_a5_aug.pickle' #Zero-filled reconstructions
df = open(data_path,'rb')
uf = open(usam_path,'rb')
dataset_real = pickle.load(df)
u_sampled_data = pickle.load(uf)
dataset_real = np.expand_dims(dataset_real, axis = -1)
u_sampled_data = np.expand_dims(u_sampled_data, axis = -1)
u_sampled_data_real = u_sampled_data.real
u_sampled_data_imag = u_sampled_data.imag
u_sampled_data_2c = np.concatenate((u_sampled_data_real, u_sampled_data_imag), axis = -1)
f = open('/home/cs-mri-gan/log_a5.txt', 'x')
f = open('/home/cs-mri-gan/log_a5.txt', 'a')
train(g_par, d_par, gan_model, dataset_real, u_sampled_data_2c, n_epochs, n_batch, n_critic, clip_val, n_patch, f)
|
[
"tensorflow.image.ssim",
"numpy.ones",
"keras.models.Model",
"numpy.clip",
"pickle.load",
"numpy.random.randint",
"keras.layers.LeakyReLU",
"keras.utils.multi_gpu_model",
"keras.optimizers.Adam",
"keras.layers.Conv2DTranspose",
"keras.layers.Concatenate",
"tensorflow.random_normal_initializer",
"keras.layers.Conv2D",
"keras.backend.clip",
"keras.backend.round",
"keras.layers.BatchNormalization",
"numpy.concatenate",
"numpy.vstack",
"keras.layers.Activation",
"numpy.expand_dims",
"keras.models.Input",
"keras.layers.Add",
"keras.backend.mean",
"keras.layers.Lambda"
] |
[((12779, 12832), 'keras.utils.multi_gpu_model', 'multi_gpu_model', (['d_model'], {'gpus': '(4)', 'cpu_relocation': '(True)'}), '(d_model, gpus=4, cpu_relocation=True)\n', (12794, 12832), False, 'from keras.utils import multi_gpu_model\n'), ((12866, 12893), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0002)', 'beta_1': '(0.5)'}), '(lr=0.0002, beta_1=0.5)\n', (12870, 12893), False, 'from keras.optimizers import Adam, RMSprop\n'), ((13036, 13089), 'keras.utils.multi_gpu_model', 'multi_gpu_model', (['g_model'], {'gpus': '(4)', 'cpu_relocation': '(True)'}), '(g_model, gpus=4, cpu_relocation=True)\n', (13051, 13089), False, 'from keras.utils import multi_gpu_model\n'), ((13198, 13225), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0001)', 'beta_1': '(0.5)'}), '(lr=0.0001, beta_1=0.5)\n', (13202, 13225), False, 'from keras.optimizers import Adam, RMSprop\n'), ((13626, 13641), 'pickle.load', 'pickle.load', (['df'], {}), '(df)\n', (13637, 13641), False, 'import pickle\n'), ((13659, 13674), 'pickle.load', 'pickle.load', (['uf'], {}), '(uf)\n', (13670, 13674), False, 'import pickle\n'), ((13691, 13728), 'numpy.expand_dims', 'np.expand_dims', (['dataset_real'], {'axis': '(-1)'}), '(dataset_real, axis=-1)\n', (13705, 13728), True, 'import numpy as np\n'), ((13748, 13787), 'numpy.expand_dims', 'np.expand_dims', (['u_sampled_data'], {'axis': '(-1)'}), '(u_sampled_data, axis=-1)\n', (13762, 13787), True, 'import numpy as np\n'), ((13897, 13964), 'numpy.concatenate', 'np.concatenate', (['(u_sampled_data_real, u_sampled_data_imag)'], {'axis': '(-1)'}), '((u_sampled_data_real, u_sampled_data_imag), axis=-1)\n', (13911, 13964), True, 'import numpy as np\n'), ((487, 508), 'keras.backend.clip', 'K.clip', (['y_pred', '(-1)', '(1)'], {}), '(y_pred, -1, 1)\n', (493, 508), True, 'from keras import backend as K\n'), ((822, 861), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['(1.0)', '(0.02)'], {}), '(1.0, 0.02)\n', (850, 861), True, 'import tensorflow as tf\n'), ((876, 902), 'keras.models.Input', 'Input', ([], {'shape': '(256, 256, 1)'}), '(shape=(256, 256, 1))\n', (881, 902), False, 'from keras.models import Model, Input\n'), ((4140, 4170), 'keras.models.Model', 'Model', ([], {'inputs': 'inp', 'outputs': 'out'}), '(inputs=inp, outputs=out)\n', (4145, 4170), False, 'from keras.models import Model, Input\n'), ((6067, 6106), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['(1.0)', '(0.02)'], {}), '(1.0, 0.02)\n', (6095, 6106), True, 'import tensorflow as tf\n'), ((6184, 6200), 'keras.models.Input', 'Input', (['inp_shape'], {}), '(inp_shape)\n', (6189, 6200), False, 'from keras.models import Model, Input\n'), ((10158, 10198), 'keras.models.Model', 'Model', ([], {'inputs': 'inp_real_imag', 'outputs': 'out'}), '(inputs=inp_real_imag, outputs=out)\n', (10163, 10198), False, 'from keras.models import Model, Input\n'), ((10327, 10349), 'keras.models.Input', 'Input', ([], {'shape': 'inp_shape'}), '(shape=inp_shape)\n', (10332, 10349), False, 'from keras.models import Model, Input\n'), ((10441, 10492), 'keras.models.Model', 'Model', ([], {'inputs': 'inp', 'outputs': '[out_dis, out_g, out_g1]'}), '(inputs=inp, outputs=[out_dis, out_g, out_g1])\n', (10446, 10492), False, 'from keras.models import Model, Input\n'), ((711, 737), 'keras.backend.mean', 'K.mean', (['(y_true * y_predict)'], {}), '(y_true * y_predict)\n', (717, 737), True, 'from keras import backend as K\n'), ((917, 1044), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (923, 1044), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((1133, 1153), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (1142, 1153), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((1172, 1303), 'keras.layers.Conv2D', 'Conv2D', (['(64 * 2)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64 * 2, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (1178, 1303), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((1319, 1388), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (1337, 1388), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((1406, 1426), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (1415, 1426), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((1445, 1576), 'keras.layers.Conv2D', 'Conv2D', (['(64 * 4)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64 * 4, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (1451, 1576), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((1592, 1661), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (1610, 1661), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((1679, 1699), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (1688, 1699), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((1718, 1849), 'keras.layers.Conv2D', 'Conv2D', (['(64 * 8)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64 * 8, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (1724, 1849), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((1865, 1934), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (1883, 1934), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((1952, 1972), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (1961, 1972), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((1991, 2123), 'keras.layers.Conv2D', 'Conv2D', (['(64 * 16)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64 * 16, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (1997, 2123), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((2139, 2208), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (2157, 2208), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((2226, 2246), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (2235, 2246), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((2265, 2397), 'keras.layers.Conv2D', 'Conv2D', (['(64 * 32)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64 * 32, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (2271, 2397), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((2413, 2482), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (2431, 2482), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((2500, 2520), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (2509, 2520), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((2539, 2671), 'keras.layers.Conv2D', 'Conv2D', (['(64 * 16)', '(1, 1)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64 * 16, (1, 1), strides=(1, 1), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (2545, 2671), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((2687, 2756), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (2705, 2756), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((2774, 2794), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (2783, 2794), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((2813, 2944), 'keras.layers.Conv2D', 'Conv2D', (['(64 * 8)', '(1, 1)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64 * 8, (1, 1), strides=(1, 1), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (2819, 2944), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((2960, 3029), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (2978, 3029), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((3047, 3067), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (3056, 3067), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((3093, 3224), 'keras.layers.Conv2D', 'Conv2D', (['(64 * 2)', '(1, 1)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64 * 2, (1, 1), strides=(1, 1), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (3099, 3224), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((3240, 3309), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (3258, 3309), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((3327, 3347), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (3336, 3347), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((3366, 3497), 'keras.layers.Conv2D', 'Conv2D', (['(64 * 2)', '(3, 3)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64 * 2, (3, 3), strides=(1, 1), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (3372, 3497), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((3513, 3582), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (3531, 3582), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((3600, 3620), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (3609, 3620), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((3640, 3771), 'keras.layers.Conv2D', 'Conv2D', (['(64 * 8)', '(3, 3)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64 * 8, (3, 3), strides=(1, 1), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (3646, 3771), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((3788, 3857), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (3806, 3857), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((3877, 3897), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (3886, 3897), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((3920, 3925), 'keras.layers.Add', 'Add', ([], {}), '()\n', (3923, 3925), False, 'from keras.layers import Flatten, Add\n'), ((3946, 3966), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (3955, 3966), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((3987, 4123), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(1)', 'kernel_size': '(3)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(filters=1, kernel_size=3, strides=1, padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (3993, 4123), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((4252, 4389), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'gr', 'kernel_size': '(3)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(filters=gr, kernel_size=3, strides=1, padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (4258, 4389), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((4399, 4468), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (4417, 4468), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((4484, 4504), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (4493, 4504), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((4521, 4541), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (4532, 4541), False, 'from keras.layers import Concatenate, Activation\n'), ((4562, 4699), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'gr', 'kernel_size': '(3)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(filters=gr, kernel_size=3, strides=1, padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (4568, 4699), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((4710, 4779), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (4728, 4779), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((4795, 4815), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (4804, 4815), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((4828, 4848), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (4839, 4848), False, 'from keras.layers import Concatenate, Activation\n'), ((4874, 5011), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'gr', 'kernel_size': '(3)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(filters=gr, kernel_size=3, strides=1, padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (4880, 5011), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((5022, 5091), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (5040, 5091), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((5107, 5127), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (5116, 5127), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((5140, 5160), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (5151, 5160), False, 'from keras.layers import Concatenate, Activation\n'), ((5182, 5319), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'gr', 'kernel_size': '(3)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(filters=gr, kernel_size=3, strides=1, padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (5188, 5319), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((5330, 5399), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (5348, 5399), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((5415, 5435), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (5424, 5435), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((5448, 5468), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (5459, 5468), False, 'from keras.layers import Concatenate, Activation\n'), ((5490, 5628), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'fil', 'kernel_size': '(3)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(filters=fil, kernel_size=3, strides=1, padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (5496, 5628), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((5639, 5665), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x * beta)'], {}), '(lambda x: x * beta)\n', (5645, 5665), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((5676, 5681), 'keras.layers.Add', 'Add', ([], {}), '()\n', (5679, 5681), False, 'from keras.layers import Flatten, Add\n'), ((5932, 5959), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x * betar)'], {}), '(lambda x: x * betar)\n', (5938, 5959), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((5970, 5975), 'keras.layers.Add', 'Add', ([], {}), '()\n', (5973, 5975), False, 'from keras.layers import Flatten, Add\n'), ((6216, 6343), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (6222, 6343), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((6382, 6402), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (6391, 6402), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((6435, 6563), 'keras.layers.Conv2D', 'Conv2D', (['(128)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(128, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (6441, 6563), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((6593, 6662), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (6611, 6662), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((6691, 6711), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (6700, 6711), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((6743, 6871), 'keras.layers.Conv2D', 'Conv2D', (['(256)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(256, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (6749, 6871), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((6900, 6969), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (6918, 6969), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((6998, 7018), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (7007, 7018), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((7048, 7176), 'keras.layers.Conv2D', 'Conv2D', (['(512)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(512, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (7054, 7176), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((7205, 7274), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (7223, 7274), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((7303, 7323), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (7312, 7323), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((7361, 7489), 'keras.layers.Conv2D', 'Conv2D', (['(512)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(512, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (7367, 7489), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((7517, 7586), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (7535, 7586), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((7613, 7633), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (7622, 7633), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((7657, 7794), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'fd', 'kernel_size': '(3)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(filters=fd, kernel_size=3, strides=1, padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (7663, 7794), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((7924, 8061), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'fd', 'kernel_size': '(3)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(filters=fd, kernel_size=3, strides=1, padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (7930, 8061), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((8079, 8084), 'keras.layers.Add', 'Add', ([], {}), '()\n', (8082, 8084), False, 'from keras.layers import Flatten, Add\n'), ((8111, 8249), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(1024)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(1024, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (8126, 8249), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((8278, 8347), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (8296, 8347), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((8376, 8394), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (8386, 8394), False, 'from keras.layers import Concatenate, Activation\n'), ((8432, 8452), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (8443, 8452), False, 'from keras.layers import Concatenate, Activation\n'), ((8495, 8632), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(256)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(256, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (8510, 8632), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((8663, 8732), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (8681, 8732), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((8761, 8779), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (8771, 8779), False, 'from keras.layers import Concatenate, Activation\n'), ((8817, 8837), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (8828, 8837), False, 'from keras.layers import Concatenate, Activation\n'), ((8881, 9018), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(128)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(128, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (8896, 9018), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((9048, 9117), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (9066, 9117), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((9146, 9164), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9156, 9164), False, 'from keras.layers import Concatenate, Activation\n'), ((9202, 9222), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (9213, 9222), False, 'from keras.layers import Concatenate, Activation\n'), ((9267, 9403), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(64)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (9282, 9403), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((9434, 9503), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (9452, 9503), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((9534, 9552), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9544, 9552), False, 'from keras.layers import Concatenate, Activation\n'), ((9594, 9614), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (9605, 9614), False, 'from keras.layers import Concatenate, Activation\n'), ((9661, 9797), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(64)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(64, (4, 4), strides=(2, 2), padding='same', use_bias=True,\n kernel_initializer='he_normal', bias_initializer='zeros')\n", (9676, 9797), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((9829, 9898), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'gamma_initializer': 'gamma_init', 'trainable': 'trainable'}), '(gamma_initializer=gamma_init, trainable=trainable)\n', (9847, 9898), False, 'from keras.layers import LeakyReLU, BatchNormalization, Lambda\n'), ((9929, 9947), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9939, 9947), False, 'from keras.layers import Concatenate, Activation\n'), ((9983, 10128), 'keras.layers.Conv2D', 'Conv2D', (['(1)', '(1, 1)'], {'strides': '(1, 1)', 'activation': '"""tanh"""', 'padding': '"""same"""', 'use_bias': '(True)', 'kernel_initializer': '"""he_normal"""', 'bias_initializer': '"""zeros"""'}), "(1, (1, 1), strides=(1, 1), activation='tanh', padding='same',\n use_bias=True, kernel_initializer='he_normal', bias_initializer='zeros')\n", (9989, 10128), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((541, 556), 'keras.backend.round', 'K.round', (['y_pred'], {}), '(y_pred)\n', (548, 556), True, 'from keras import backend as K\n'), ((618, 652), 'tensorflow.image.ssim', 'tf.image.ssim', (['y_true', 'y_pred', '(2.0)'], {}), '(y_true, y_pred, 2.0)\n', (631, 652), True, 'import tensorflow as tf\n'), ((11768, 11820), 'numpy.random.randint', 'np.random.randint', (['(0)', 'dataset_real.shape[0]', 'n_batch'], {}), '(0, dataset_real.shape[0], n_batch)\n', (11785, 11820), True, 'import numpy as np\n'), ((11919, 11958), 'numpy.ones', 'np.ones', (['(n_batch, n_patch, n_patch, 1)'], {}), '((n_batch, n_patch, n_patch, 1))\n', (11926, 11958), True, 'import numpy as np\n'), ((10932, 10987), 'numpy.random.randint', 'np.random.randint', (['(0)', 'dataset_real.shape[0]', 'half_batch'], {}), '(0, dataset_real.shape[0], half_batch)\n', (10949, 10987), True, 'import numpy as np\n'), ((11068, 11110), 'numpy.ones', 'np.ones', (['(half_batch, n_patch, n_patch, 1)'], {}), '((half_batch, n_patch, n_patch, 1))\n', (11075, 11110), True, 'import numpy as np\n'), ((11145, 11202), 'numpy.random.randint', 'np.random.randint', (['(0)', 'u_sampled_data.shape[0]', 'half_batch'], {}), '(0, u_sampled_data.shape[0], half_batch)\n', (11162, 11202), True, 'import numpy as np\n'), ((11291, 11333), 'numpy.ones', 'np.ones', (['(half_batch, n_patch, n_patch, 1)'], {}), '((half_batch, n_patch, n_patch, 1))\n', (11298, 11333), True, 'import numpy as np\n'), ((11367, 11394), 'numpy.vstack', 'np.vstack', (['(X_real, X_fake)'], {}), '((X_real, X_fake))\n', (11376, 11394), True, 'import numpy as np\n'), ((11396, 11423), 'numpy.vstack', 'np.vstack', (['(y_real, y_fake)'], {}), '((y_real, y_fake))\n', (11405, 11423), True, 'import numpy as np\n'), ((11609, 11640), 'numpy.clip', 'np.clip', (['w', '(-clip_val)', 'clip_val'], {}), '(w, -clip_val, clip_val)\n', (11616, 11640), True, 'import numpy as np\n')]
|
"""Identity matrix."""
from scipy import sparse
import numpy as np
def iden(dim: int, is_sparse: bool = False) -> np.ndarray:
r"""
Calculate the :code:`dim`-by-:code:`dim` identity matrix [WIKID]_.
Returns the :code:`dim`-by-:code:`dim` identity matrix. If :code:`is_sparse
= False` then the matrix will be full. If :code:`is_sparse = True` then the
matrix will be sparse.
.. math::
\mathbb{I} = \begin{pmatrix}
1 & 0 & 0 & \ldots & 0 \\
0 & 1 & 0 & \ldots & 0 \\
0 & 0 & 1 & \ldots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \ldots & 1
\end{pmatrix}
Only use this function within other functions to easily get the correct
identity matrix. If you always want either the full or the sparse
identity matrix, just use numpy's built-in np.identity function.
Examples
==========
The identity matrix generated from :math:`d = 3` yields the following
matrix:
.. math::
\mathbb{I}_3 = \begin{pmatrix}
1 & 0 & 0 \\
0 & 1 & 0 \\
0 & 0 & 1
\end{pmatrix}
>>> from toqito.matrices import iden
>>> iden(3)
[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
It is also possible to create sparse identity matrices. The sparse identity
matrix generated from :math:`d = 10` yields the following matrix:
>>> from toqito.matrices import iden
>>> iden(10, True)
<10x10 sparse matrix of type '<class 'numpy.float64'>' with 10 stored
elements (1 diagonals) in DIAgonal format>
References
==========
.. [WIKID] Wikipedia: Identity matrix
https://en.wikipedia.org/wiki/Identity_matrix
:param dim: Integer representing dimension of identity matrix.
:param is_sparse: Whether or not the matrix is sparse.
:return: Sparse identity matrix of dimension :code:`dim`.
"""
if is_sparse:
id_mat = sparse.eye(dim)
else:
id_mat = np.identity(dim)
return id_mat
|
[
"numpy.identity",
"scipy.sparse.eye"
] |
[((2041, 2056), 'scipy.sparse.eye', 'sparse.eye', (['dim'], {}), '(dim)\n', (2051, 2056), False, 'from scipy import sparse\n'), ((2084, 2100), 'numpy.identity', 'np.identity', (['dim'], {}), '(dim)\n', (2095, 2100), True, 'import numpy as np\n')]
|
import faiss
import numpy as np
from sklearn.cluster import KMeans
from sklearn.metrics.cluster import normalized_mutual_info_score
from argparse import ArgumentParser
def parse_args():
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser = ArgumentParser(description="PyTorch metric learning nmi script")
# Optional arguments for the launch helper
parser.add_argument("--num_workers", type=int, default=4,
help="The number of workers for eval")
parser.add_argument("--snap", type=str,
help="The snapshot to compute nmi")
parser.add_argument("--output", type=str, default="/data1/output/",
help="The output file")
parser.add_argument("--dataset", type=str, default="StanfordOnlineProducts",
help="The dataset for training")
parser.add_argument('--binarize', action='store_true')
return parser.parse_args()
def test_nmi(embeddings, labels, output_file):
unique_labels = np.unique(labels)
kmeans = KMeans(n_clusters=unique_labels.size, random_state=0, n_jobs=-1).fit(embeddings)
nmi = normalized_mutual_info_score(kmeans.labels_, labels)
print("NMI: {}".format(nmi))
return nmi
def test_nmi_faiss(embeddings, labels):
res = faiss.StandardGpuResources()
flat_config = faiss.GpuIndexFlatConfig()
flat_config.device = 0
unique_labels = np.unique(labels)
d = embeddings.shape[1]
kmeans = faiss.Clustering(d, unique_labels.size)
kmeans.verbose = True
kmeans.niter = 300
kmeans.nredo = 10
kmeans.seed = 0
index = faiss.GpuIndexFlatL2(res, d, flat_config)
kmeans.train(embeddings, index)
dists, pred_labels = index.search(embeddings, 1)
pred_labels = pred_labels.squeeze()
nmi = normalized_mutual_info_score(labels, pred_labels)
print("NMI: {}".format(nmi))
return nmi
if __name__ == '__main__':
args = parse_args()
embedding_file = args.snap.replace('.pth', '_embed.npy')
all_embeddings = np.load(embedding_file)
lable_file = args.snap.replace('.pth', '_label.npy')
all_labels = np.load(lable_file)
nmi = test_nmi_faiss(all_embeddings, all_labels)
|
[
"numpy.load",
"faiss.GpuIndexFlatL2",
"argparse.ArgumentParser",
"sklearn.cluster.KMeans",
"faiss.Clustering",
"faiss.GpuIndexFlatConfig",
"faiss.StandardGpuResources",
"numpy.unique",
"sklearn.metrics.cluster.normalized_mutual_info_score"
] |
[((298, 362), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""PyTorch metric learning nmi script"""'}), "(description='PyTorch metric learning nmi script')\n", (312, 362), False, 'from argparse import ArgumentParser\n'), ((1057, 1074), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (1066, 1074), True, 'import numpy as np\n'), ((1180, 1232), 'sklearn.metrics.cluster.normalized_mutual_info_score', 'normalized_mutual_info_score', (['kmeans.labels_', 'labels'], {}), '(kmeans.labels_, labels)\n', (1208, 1232), False, 'from sklearn.metrics.cluster import normalized_mutual_info_score\n'), ((1334, 1362), 'faiss.StandardGpuResources', 'faiss.StandardGpuResources', ([], {}), '()\n', (1360, 1362), False, 'import faiss\n'), ((1381, 1407), 'faiss.GpuIndexFlatConfig', 'faiss.GpuIndexFlatConfig', ([], {}), '()\n', (1405, 1407), False, 'import faiss\n'), ((1456, 1473), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (1465, 1473), True, 'import numpy as np\n'), ((1515, 1554), 'faiss.Clustering', 'faiss.Clustering', (['d', 'unique_labels.size'], {}), '(d, unique_labels.size)\n', (1531, 1554), False, 'import faiss\n'), ((1659, 1700), 'faiss.GpuIndexFlatL2', 'faiss.GpuIndexFlatL2', (['res', 'd', 'flat_config'], {}), '(res, d, flat_config)\n', (1679, 1700), False, 'import faiss\n'), ((1844, 1893), 'sklearn.metrics.cluster.normalized_mutual_info_score', 'normalized_mutual_info_score', (['labels', 'pred_labels'], {}), '(labels, pred_labels)\n', (1872, 1893), False, 'from sklearn.metrics.cluster import normalized_mutual_info_score\n'), ((2078, 2101), 'numpy.load', 'np.load', (['embedding_file'], {}), '(embedding_file)\n', (2085, 2101), True, 'import numpy as np\n'), ((2176, 2195), 'numpy.load', 'np.load', (['lable_file'], {}), '(lable_file)\n', (2183, 2195), True, 'import numpy as np\n'), ((1088, 1152), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'unique_labels.size', 'random_state': '(0)', 'n_jobs': '(-1)'}), '(n_clusters=unique_labels.size, random_state=0, n_jobs=-1)\n', (1094, 1152), False, 'from sklearn.cluster import KMeans\n')]
|
import os
import numpy as np
import os.path as op
import matplotlib.pyplot as plt
from matplotlib.colors import BASE_COLORS, SymLogNorm
from scipy.stats import ttest_ind
from swann.preprocessing import get_info
from swann.utils import get_config, derivative_fname
from swann.analyses import decompose_tfr, find_bursts, get_bursts
from mne.viz import iter_topography
from mne.time_frequency import tfr_morlet, EpochsTFR, AverageTFR
from mne import Epochs, EvokedArray
def plot_spectrogram(rawf, raw, event, events, bl_events,
method='raw', baseline='z-score',
freqs=np.logspace(np.log(4), np.log(250), 50, base=np.e),
n_cycles=7, use_fft=True, ncols=3, plot_erp=True,
plot_bursts=False, picks=None, verbose=True,
overwrite=False):
''' Plots a bar chart of beta bursts.
Parameters
----------
rawf : pybids.BIDSlayout file
The object containing the raw data.
raw : mne.io.Raw
The raw data object.
event : str
The name of the event (e.g. `Response`).
events : np.array(n_events, 3)
The events from mne.events_from_annotations or mne.find_events
corresponding to the event and trials that are described by the name.
bl_events: np.array(n_events, 3)
The events from mne.events_from_annotations or mne.find_events
corresponding to the baseline for the event and trials
that are described by the name.
method : `raw` | `phase-locked` | `non-phase-locked` | `total'
How to plot the spectrograms:
raw -- plot without averaging power (default)
phase-locked -- just average the event-related potential (ERP)
non-phase-locked -- subtract the ERP from each epoch, do time
frequency decomposition (TFR) then average
total -- do TFR on each epoch and then average
baseline : `z-score` | `gain`
How to baseline specrogram data:
z-score -- for each frequency, subtract the median and divide
by the standard deviation (default)
gain -- divide by median
freqs : np.array
The frequencies over which to compute the spectral data.
n_cycles : int, np.array
The number of cycles to use in the Morlet transform
use_fft : bool
Use Fast Fourier Transform see `mne.time_frequency.tfr.cwt`.
ncols : int
The number of ncols to use in the plot (for `method=raw`).
plot_erp : bool
Whether to plot the event-related potential on top.
plot_bursts : bool
Whether to include vertical bars for when the bursts are detected
(for `method=raw`).
picks : None | list of str
The names of the channels to plot
'''
config = get_config()
raw = raw.copy()
raw.load_data()
if method not in ('raw', 'phase-locked', 'non-phase-locked', 'total'):
raise ValueError('Unrecognized method {}'.format(method))
if picks is None:
picks = raw.ch_names
else:
if isinstance(picks, str):
picks = [picks]
raw = raw.pick_channels(picks)
if method == 'raw' and len(picks) > 1:
raise ValueError('Only one channel can be plotted at a time '
'for raw spectrograms')
plotf = derivative_fname(rawf, 'plots/spectrograms',
'event-{}_spectrogram_{}_{}_power'.format(
event, method, baseline),
config['fig'])
if op.isfile(plotf) and not overwrite:
print('Spectrogram plot for {} already exists, '
'use `overwrite=True` to replot'.format(event))
return
if method == 'raw' and plot_bursts:
bursts = find_bursts(rawf, return_saved=True)
if isinstance(n_cycles, np.ndarray) and len(freqs) != len(n_cycles):
raise ValueError('Mismatch lengths n_cycles {} to freqs {}'.format(
n_cycles, freqs))
epochs = Epochs(raw, events, tmin=config['tmin'] - 1, baseline=None,
tmax=config['tmax'] + 1, preload=True)
# align baseline events with epochs with enough events
bl_events = np.delete(bl_events, [i for i, e in enumerate(bl_events[:, 2])
if e not in epochs.events[:, 2]], axis=0)
bl_epochs = Epochs(raw, bl_events, tmin=config['baseline_tmin'] - 1,
baseline=None, tmax=config['baseline_tmax'] + 1,
preload=True)
cropped_epochs = epochs.copy().crop(tmin=config['tmin'],
tmax=config['tmax'])
cropped_bl_epochs = bl_epochs.copy().crop(
tmin=config['baseline_tmin'], tmax=config['baseline_tmax'])
if method == 'phase-locked':
bl_evoked = EvokedArray(np.median(bl_epochs._data, axis=0),
info=bl_epochs.info, tmin=bl_epochs.tmin,
nave=len(bl_epochs))
bl_evoked_tfr = tfr_morlet(bl_evoked, freqs, n_cycles=n_cycles,
use_fft=use_fft, return_itc=False)
bl_evoked_tfr.crop(tmin=config['baseline_tmin'],
tmax=config['baseline_tmax'])
evoked = EvokedArray(np.median(epochs._data, axis=0),
info=epochs.info, tmin=epochs.tmin,
nave=len(epochs))
evoked_tfr = tfr_morlet(evoked, freqs, n_cycles=n_cycles,
use_fft=use_fft, return_itc=False)
evoked_tfr.crop(tmin=config['tmin'], tmax=config['tmax'])
evoked_tfr.data = \
evoked_tfr.data - np.median(bl_evoked_tfr.data,
axis=2)[:, :, np.newaxis]
evoked_tfr.data /= np.std(bl_evoked_tfr.data, axis=2)[:, :, np.newaxis]
else:
if method == 'non-phase-locked':
epochs._data -= np.median(epochs._data, axis=0)
epochs_data = np.zeros((len(epochs), len(epochs.ch_names), len(freqs),
len(cropped_epochs.times)))
bl_epochs_data = np.zeros((len(bl_epochs), len(bl_epochs.ch_names),
len(freqs), len(cropped_bl_epochs.times)))
epochs_tfr = EpochsTFR(epochs.info, epochs_data, cropped_epochs.times,
freqs, verbose=False)
bl_epochs_tfr = EpochsTFR(bl_epochs.info, bl_epochs_data,
cropped_bl_epochs.times, freqs,
verbose=False)
if method != 'raw':
evoked_tfr_data = np.zeros((len(epochs.ch_names), len(freqs),
len(cropped_epochs.times)))
evoked_tfr = AverageTFR(epochs.info, evoked_tfr_data,
cropped_epochs.times, freqs,
nave=len(epochs))
for i, ch in enumerate(epochs.ch_names):
if verbose:
print('\nComputing TFR ({}/{}) for {}... '
'Computing frequency'.format(i, len(epochs.ch_names),
ch), end=' ', flush=True) # noqa
this_epochs = epochs.copy().pick_channels([ch])
this_bl_epochs = bl_epochs.copy().pick_channels([ch])
for j, freq in enumerate(freqs):
if verbose:
print('{:.2f}'.format(freq), end=' ', flush=True)
this_n_cycles = (n_cycles if isinstance(n_cycles, int) else
n_cycles[i])
this_bl_epochs_tfr = \
tfr_morlet(this_bl_epochs, [freq], n_cycles=this_n_cycles,
use_fft=use_fft, average=False,
return_itc=False, verbose=False)
this_bl_epochs_tfr = this_bl_epochs_tfr.crop(
tmin=config['baseline_tmin'], tmax=config['baseline_tmax'])
this_epochs_tfr = \
tfr_morlet(this_epochs, [freq], n_cycles=this_n_cycles,
use_fft=use_fft, average=False,
return_itc=False, verbose=False)
this_epochs_tfr = this_epochs_tfr.crop(
tmin=config['tmin'], tmax=config['tmax'])
full_data = np.concatenate([this_bl_epochs_tfr.data,
this_epochs_tfr.data], axis=3)
epochs_tfr.data[:, i:i + 1, j:j + 1, :] = this_epochs_tfr.data
epochs_tfr.data[:, i:i + 1, j:j + 1, :] -= \
np.median(full_data, axis=3)[:, :, :, np.newaxis]
epochs_tfr.data[:, i:i + 1, j:j + 1, :] /= \
np.std(full_data, axis=3)[:, :, :, np.newaxis]
bl_epochs_tfr.data[:, i:i + 1, j:j + 1, :] = \
this_bl_epochs_tfr.data
bl_epochs_tfr.data[:, i:i + 1, j:j + 1, :] -= \
np.median(full_data, axis=3)[:, :, :, np.newaxis]
bl_epochs_tfr.data[:, i:i + 1, j:j + 1, :] /= \
np.std(full_data, axis=3)[:, :, :, np.newaxis]
if method != 'raw':
this_evoked_tfr = np.median(epochs_tfr.data[:, i, j],
axis=0)
this_bl_evoked_tfr = np.median(bl_epochs_tfr.data[:, i, j],
axis=0)
evoked_tfr.data[i, j] = \
this_evoked_tfr - np.median(this_bl_evoked_tfr)
evoked_tfr.data[i, j] /= np.std(this_bl_evoked_tfr)
if method == 'raw':
ch_name = epochs_tfr.ch_names[0]
vmin, vmax = np.min(epochs_tfr.data), np.max(epochs_tfr.data)
emin, emax = np.min(cropped_epochs._data), np.max(cropped_epochs._data)
if verbose:
print('Plotting spectrogram for channel {}'.format(ch_name))
if plot_bursts:
n_bursts = len(bursts[bursts['channel'] == ch_name])
print('{} bursts for this channel total'.format(n_bursts))
nrows = int(np.ceil(len(events) / ncols))
fig, axes = plt.subplots(nrows, ncols)
fig.set_size_inches(ncols, nrows)
axes = axes.flatten()
for j, this_tfr in enumerate(epochs_tfr):
evoked_data = (cropped_epochs._data[j, 0], emin, emax)
cmap = _plot_spectrogram(
axes[j], this_tfr[i], epochs_tfr.times,
vmin, vmax, freqs, evoked_data,
show_xticks=j >= len(events) - ncols,
show_yticks=j % ncols == 0,
show_ylabel=j == int(nrows / 2) * ncols)
if plot_bursts:
_plot_bursts(config, events, raw, bursts, j, axes, ch_name)
for ax in axes[len(epochs_tfr):]:
ax.axis('off')
else:
if plot_erp:
evoked_data = np.median(cropped_epochs._data, axis=0)
evoked_data -= np.median(evoked_data, axis=1)[:, np.newaxis]
evoked = EvokedArray(evoked_data, info=epochs.info,
tmin=epochs.tmin, nave=len(epochs))
emin, emax = np.min(evoked.data), np.max(evoked.data)
vmin, vmax = np.min(evoked_tfr.data), np.max(evoked_tfr.data)
if raw.info['dig'] is None:
nrows = int(len(raw.ch_names) ** 0.5)
ncols = int(len(raw.ch_names) / nrows) + 1
fig, axes = plt.subplots(nrows, ncols)
fig.set_size_inches(12, 8)
axes = axes.flatten()
for idx, ax in enumerate(axes):
if idx < len(picks):
cmap = _plot_spectrogram(
ax, evoked_tfr.data[idx], evoked_tfr.times,
vmin, vmax, freqs, ((evoked.data[idx], emin, emax) if
plot_erp else None),
show_xticks=idx >= len(picks) - ncols,
show_yticks=idx % ncols == 0,
show_ylabel=idx == int(nrows / 2) * ncols)
ax.set_title(raw.ch_names[idx])
else:
ax.axis('off')
else:
for ax, idx in iter_topography(raw.info, fig_facecolor='white',
axis_facecolor='white',
axis_spinecolor='white'):
cmap = _plot_spectrogram(
ax, this_tfr, evoked_tfr.times, vmin, vmax, freqs,
((evoked.data[idx], emin, emax) if plot_erp else None))
fig.subplots_adjust(right=0.85, hspace=0.3)
cax = fig.add_subplot(position=[0.87, 0.1, 0.05, 0.8])
cax = fig.colorbar(cmap, cax=cax, format='{:.2f}',
ticks=[vmin, vmin / 10, vmin / 100,
vmax / 100, vmax / 10, vmax])
cax.set_label(('Log {} Power {} Normalized'.format(method, baseline)
).title())
fig.suptitle('Time Frequency Decomposition for the {} '
'Event, {} Power'.format(event, baseline.capitalize()))
fig.savefig(plotf, dpi=300)
plt.close(fig)
def _plot_spectrogram(ax, this_tfr, times, vmin, vmax,
freqs, evoked_data, show_yticks=True,
show_ylabel=True, show_xticks=True):
'''Plot a single spectrogram'''
cmap = ax.imshow(this_tfr, cmap='RdYlBu_r', aspect='auto',
extent=[0, this_tfr.shape[1], 0, this_tfr.shape[0]],
norm=SymLogNorm(linthresh=(vmax - vmin) / 100,
vmin=vmin, vmax=vmax))
if evoked_data is not None:
evoked, emin, emax = evoked_data
ax2 = ax.twinx()
ax2.set_yticks([])
ax2.plot(range(this_tfr.shape[1]), evoked, alpha=0.25, color='k')
ax2.set_ylim([emin, emax])
ax.invert_yaxis()
if show_yticks:
ax.set_yticks(np.linspace(0, len(freqs), 5))
ax.set_yticklabels(['{:.2f}'.format(f) for f in
freqs[::-int(len(freqs) / 5)]])
else:
ax.set_yticklabels([])
if show_ylabel:
ax.set_ylabel('Frequency (Hz)')
ax.axvline(np.where(times == 0)[0][0], color='k')
if show_xticks:
ax.set_xlabel('Time (s)')
ax.set_xticks(np.linspace(0, len(times), 3))
ax.set_xticklabels(['{:.1f}'.format(t) for t in
np.linspace(times[0], times[-1], 3)])
else:
ax.set_xticks([])
return cmap
def _plot_bursts(config, events, raw, bursts, j, axes, ch_name):
'''Plot bursts on a single spectrogram'''
min_idx = events[j, 0] + raw.info['sfreq'] * config['tmin']
max_idx = events[j, 0] + raw.info['sfreq'] * config['tmax']
these_bursts = bursts[(bursts['channel'] == ch_name) &
(bursts['burst_end'] > min_idx) &
(bursts['burst_start'] < max_idx)]
if these_bursts.size > 0:
for burst_idx in these_bursts.index:
for start_stop in ['burst_start', 'burst_end']:
if (max_idx > these_bursts.loc[burst_idx,
start_stop] >
min_idx):
axes[j].axvline(
x=these_bursts.loc[burst_idx,
start_stop] - min_idx,
color='green')
|
[
"numpy.log",
"swann.utils.get_config",
"mne.viz.iter_topography",
"mne.time_frequency.EpochsTFR",
"matplotlib.pyplot.close",
"numpy.median",
"numpy.std",
"numpy.concatenate",
"mne.Epochs",
"os.path.isfile",
"numpy.min",
"numpy.max",
"mne.time_frequency.tfr_morlet",
"swann.analyses.find_bursts",
"numpy.where",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.colors.SymLogNorm"
] |
[((2836, 2848), 'swann.utils.get_config', 'get_config', ([], {}), '()\n', (2846, 2848), False, 'from swann.utils import get_config, derivative_fname\n'), ((4064, 4167), 'mne.Epochs', 'Epochs', (['raw', 'events'], {'tmin': "(config['tmin'] - 1)", 'baseline': 'None', 'tmax': "(config['tmax'] + 1)", 'preload': '(True)'}), "(raw, events, tmin=config['tmin'] - 1, baseline=None, tmax=config[\n 'tmax'] + 1, preload=True)\n", (4070, 4167), False, 'from mne import Epochs, EvokedArray\n'), ((4405, 4528), 'mne.Epochs', 'Epochs', (['raw', 'bl_events'], {'tmin': "(config['baseline_tmin'] - 1)", 'baseline': 'None', 'tmax': "(config['baseline_tmax'] + 1)", 'preload': '(True)'}), "(raw, bl_events, tmin=config['baseline_tmin'] - 1, baseline=None,\n tmax=config['baseline_tmax'] + 1, preload=True)\n", (4411, 4528), False, 'from mne import Epochs, EvokedArray\n'), ((13287, 13301), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (13296, 13301), True, 'import matplotlib.pyplot as plt\n'), ((624, 633), 'numpy.log', 'np.log', (['(4)'], {}), '(4)\n', (630, 633), True, 'import numpy as np\n'), ((635, 646), 'numpy.log', 'np.log', (['(250)'], {}), '(250)\n', (641, 646), True, 'import numpy as np\n'), ((3595, 3611), 'os.path.isfile', 'op.isfile', (['plotf'], {}), '(plotf)\n', (3604, 3611), True, 'import os.path as op\n'), ((3822, 3858), 'swann.analyses.find_bursts', 'find_bursts', (['rawf'], {'return_saved': '(True)'}), '(rawf, return_saved=True)\n', (3833, 3858), False, 'from swann.analyses import decompose_tfr, find_bursts, get_bursts\n'), ((5060, 5147), 'mne.time_frequency.tfr_morlet', 'tfr_morlet', (['bl_evoked', 'freqs'], {'n_cycles': 'n_cycles', 'use_fft': 'use_fft', 'return_itc': '(False)'}), '(bl_evoked, freqs, n_cycles=n_cycles, use_fft=use_fft, return_itc\n =False)\n', (5070, 5147), False, 'from mne.time_frequency import tfr_morlet, EpochsTFR, AverageTFR\n'), ((5487, 5566), 'mne.time_frequency.tfr_morlet', 'tfr_morlet', (['evoked', 'freqs'], {'n_cycles': 'n_cycles', 'use_fft': 'use_fft', 'return_itc': '(False)'}), '(evoked, freqs, n_cycles=n_cycles, use_fft=use_fft, return_itc=False)\n', (5497, 5566), False, 'from mne.time_frequency import tfr_morlet, EpochsTFR, AverageTFR\n'), ((6324, 6403), 'mne.time_frequency.EpochsTFR', 'EpochsTFR', (['epochs.info', 'epochs_data', 'cropped_epochs.times', 'freqs'], {'verbose': '(False)'}), '(epochs.info, epochs_data, cropped_epochs.times, freqs, verbose=False)\n', (6333, 6403), False, 'from mne.time_frequency import tfr_morlet, EpochsTFR, AverageTFR\n'), ((6459, 6551), 'mne.time_frequency.EpochsTFR', 'EpochsTFR', (['bl_epochs.info', 'bl_epochs_data', 'cropped_bl_epochs.times', 'freqs'], {'verbose': '(False)'}), '(bl_epochs.info, bl_epochs_data, cropped_bl_epochs.times, freqs,\n verbose=False)\n', (6468, 6551), False, 'from mne.time_frequency import tfr_morlet, EpochsTFR, AverageTFR\n'), ((10298, 10324), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nrows', 'ncols'], {}), '(nrows, ncols)\n', (10310, 10324), True, 'import matplotlib.pyplot as plt\n'), ((4873, 4907), 'numpy.median', 'np.median', (['bl_epochs._data'], {'axis': '(0)'}), '(bl_epochs._data, axis=0)\n', (4882, 4907), True, 'import numpy as np\n'), ((5321, 5352), 'numpy.median', 'np.median', (['epochs._data'], {'axis': '(0)'}), '(epochs._data, axis=0)\n', (5330, 5352), True, 'import numpy as np\n'), ((5846, 5880), 'numpy.std', 'np.std', (['bl_evoked_tfr.data'], {'axis': '(2)'}), '(bl_evoked_tfr.data, axis=2)\n', (5852, 5880), True, 'import numpy as np\n'), ((5978, 6009), 'numpy.median', 'np.median', (['epochs._data'], {'axis': '(0)'}), '(epochs._data, axis=0)\n', (5987, 6009), True, 'import numpy as np\n'), ((9834, 9857), 'numpy.min', 'np.min', (['epochs_tfr.data'], {}), '(epochs_tfr.data)\n', (9840, 9857), True, 'import numpy as np\n'), ((9859, 9882), 'numpy.max', 'np.max', (['epochs_tfr.data'], {}), '(epochs_tfr.data)\n', (9865, 9882), True, 'import numpy as np\n'), ((9904, 9932), 'numpy.min', 'np.min', (['cropped_epochs._data'], {}), '(cropped_epochs._data)\n', (9910, 9932), True, 'import numpy as np\n'), ((9934, 9962), 'numpy.max', 'np.max', (['cropped_epochs._data'], {}), '(cropped_epochs._data)\n', (9940, 9962), True, 'import numpy as np\n'), ((11041, 11080), 'numpy.median', 'np.median', (['cropped_epochs._data'], {'axis': '(0)'}), '(cropped_epochs._data, axis=0)\n', (11050, 11080), True, 'import numpy as np\n'), ((11374, 11397), 'numpy.min', 'np.min', (['evoked_tfr.data'], {}), '(evoked_tfr.data)\n', (11380, 11397), True, 'import numpy as np\n'), ((11399, 11422), 'numpy.max', 'np.max', (['evoked_tfr.data'], {}), '(evoked_tfr.data)\n', (11405, 11422), True, 'import numpy as np\n'), ((11588, 11614), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nrows', 'ncols'], {}), '(nrows, ncols)\n', (11600, 11614), True, 'import matplotlib.pyplot as plt\n'), ((12360, 12461), 'mne.viz.iter_topography', 'iter_topography', (['raw.info'], {'fig_facecolor': '"""white"""', 'axis_facecolor': '"""white"""', 'axis_spinecolor': '"""white"""'}), "(raw.info, fig_facecolor='white', axis_facecolor='white',\n axis_spinecolor='white')\n", (12375, 12461), False, 'from mne.viz import iter_topography\n'), ((13677, 13740), 'matplotlib.colors.SymLogNorm', 'SymLogNorm', ([], {'linthresh': '((vmax - vmin) / 100)', 'vmin': 'vmin', 'vmax': 'vmax'}), '(linthresh=(vmax - vmin) / 100, vmin=vmin, vmax=vmax)\n', (13687, 13740), False, 'from matplotlib.colors import BASE_COLORS, SymLogNorm\n'), ((5723, 5760), 'numpy.median', 'np.median', (['bl_evoked_tfr.data'], {'axis': '(2)'}), '(bl_evoked_tfr.data, axis=2)\n', (5732, 5760), True, 'import numpy as np\n'), ((7714, 7841), 'mne.time_frequency.tfr_morlet', 'tfr_morlet', (['this_bl_epochs', '[freq]'], {'n_cycles': 'this_n_cycles', 'use_fft': 'use_fft', 'average': '(False)', 'return_itc': '(False)', 'verbose': '(False)'}), '(this_bl_epochs, [freq], n_cycles=this_n_cycles, use_fft=use_fft,\n average=False, return_itc=False, verbose=False)\n', (7724, 7841), False, 'from mne.time_frequency import tfr_morlet, EpochsTFR, AverageTFR\n'), ((8098, 8222), 'mne.time_frequency.tfr_morlet', 'tfr_morlet', (['this_epochs', '[freq]'], {'n_cycles': 'this_n_cycles', 'use_fft': 'use_fft', 'average': '(False)', 'return_itc': '(False)', 'verbose': '(False)'}), '(this_epochs, [freq], n_cycles=this_n_cycles, use_fft=use_fft,\n average=False, return_itc=False, verbose=False)\n', (8108, 8222), False, 'from mne.time_frequency import tfr_morlet, EpochsTFR, AverageTFR\n'), ((8427, 8498), 'numpy.concatenate', 'np.concatenate', (['[this_bl_epochs_tfr.data, this_epochs_tfr.data]'], {'axis': '(3)'}), '([this_bl_epochs_tfr.data, this_epochs_tfr.data], axis=3)\n', (8441, 8498), True, 'import numpy as np\n'), ((11108, 11138), 'numpy.median', 'np.median', (['evoked_data'], {'axis': '(1)'}), '(evoked_data, axis=1)\n', (11117, 11138), True, 'import numpy as np\n'), ((11312, 11331), 'numpy.min', 'np.min', (['evoked.data'], {}), '(evoked.data)\n', (11318, 11331), True, 'import numpy as np\n'), ((11333, 11352), 'numpy.max', 'np.max', (['evoked.data'], {}), '(evoked.data)\n', (11339, 11352), True, 'import numpy as np\n'), ((14339, 14359), 'numpy.where', 'np.where', (['(times == 0)'], {}), '(times == 0)\n', (14347, 14359), True, 'import numpy as np\n'), ((8703, 8731), 'numpy.median', 'np.median', (['full_data'], {'axis': '(3)'}), '(full_data, axis=3)\n', (8712, 8731), True, 'import numpy as np\n'), ((8834, 8859), 'numpy.std', 'np.std', (['full_data'], {'axis': '(3)'}), '(full_data, axis=3)\n', (8840, 8859), True, 'import numpy as np\n'), ((9072, 9100), 'numpy.median', 'np.median', (['full_data'], {'axis': '(3)'}), '(full_data, axis=3)\n', (9081, 9100), True, 'import numpy as np\n'), ((9206, 9231), 'numpy.std', 'np.std', (['full_data'], {'axis': '(3)'}), '(full_data, axis=3)\n', (9212, 9231), True, 'import numpy as np\n'), ((9327, 9370), 'numpy.median', 'np.median', (['epochs_tfr.data[:, i, j]'], {'axis': '(0)'}), '(epochs_tfr.data[:, i, j], axis=0)\n', (9336, 9370), True, 'import numpy as np\n'), ((9460, 9506), 'numpy.median', 'np.median', (['bl_epochs_tfr.data[:, i, j]'], {'axis': '(0)'}), '(bl_epochs_tfr.data[:, i, j], axis=0)\n', (9469, 9506), True, 'import numpy as np\n'), ((9721, 9747), 'numpy.std', 'np.std', (['this_bl_evoked_tfr'], {}), '(this_bl_evoked_tfr)\n', (9727, 9747), True, 'import numpy as np\n'), ((14569, 14604), 'numpy.linspace', 'np.linspace', (['times[0]', 'times[-1]', '(3)'], {}), '(times[0], times[-1], 3)\n', (14580, 14604), True, 'import numpy as np\n'), ((9646, 9675), 'numpy.median', 'np.median', (['this_bl_evoked_tfr'], {}), '(this_bl_evoked_tfr)\n', (9655, 9675), True, 'import numpy as np\n')]
|
# -------------------------------------------------------------------------------------------------------------------- #
# Import packages
# -------------------------------------------------------------------------------------------------------------------- #
import numpy as np
from .nurbs_surface import NurbsSurface
# -------------------------------------------------------------------------------------------------------------------- #
# Define the bilinear NURBS surface class
# -------------------------------------------------------------------------------------------------------------------- #
class NurbsSurfaceBilinear:
""" Create a NURBS representation of the bilinear patch defined by corners P00, P01, P10, and P11
Create a NURBS representation of the bilinear patch
S(u,v) = (1-v)*[(1-u)*P00 + u*P01] + v*[(1-u)*P10 + u*P11]
Note that a bilinear patch is a ruled surface with segments (P00, P01) and (P10, P11) as generating curves
S(u,v) = (1-v)*C1(u) + v*C2(u)
C1(u) = (1-u)*P00 + u*P01
C2(u) = (1-u)*P10 + u*P11
Parameters
----------
P00, P01, P10, P11 : ndarrays with shape (ndim,)
Coordinates of the corner points defining the bilinear surface (ndim=3)
References
----------
The NURBS book. Chapter 8.2
<NAME> and <NAME>
Springer, second edition
"""
def __init__(self, P00, P01, P10, P11):
# Declare input variables as instance variables
self.P00 = P00
self.P01 = P01
self.P10 = P10
self.P11 = P11
self.ndim = 3
# Check the number of dimensions of the problem
ndims = [np.shape(P00)[0], np.shape(P01)[0], np.shape(P10)[0], np.shape(P11)[0]]
if any([ndim != 3 for ndim in ndims]):
raise Exception("The input points must be three-dimensional")
# Make the bilinear patch NURBS representation
self.NurbsSurface = None
self.make_nurbs_surface()
def make_nurbs_surface(self):
""" Make a NURBS surface representation of the bilinear surface """
# Define the array of control points
n_dim, n, m = self.ndim, 2, 2
P = np.zeros((n_dim, n, m))
P[:, 0, 0] = self.P00
P[:, 1, 0] = self.P01
P[:, 0, 1] = self.P10
P[:, 1, 1] = self.P11
# Create the NURBS surface
self.NurbsSurface = NurbsSurface(control_points=P)
|
[
"numpy.shape",
"numpy.zeros"
] |
[((2206, 2229), 'numpy.zeros', 'np.zeros', (['(n_dim, n, m)'], {}), '((n_dim, n, m))\n', (2214, 2229), True, 'import numpy as np\n'), ((1681, 1694), 'numpy.shape', 'np.shape', (['P00'], {}), '(P00)\n', (1689, 1694), True, 'import numpy as np\n'), ((1699, 1712), 'numpy.shape', 'np.shape', (['P01'], {}), '(P01)\n', (1707, 1712), True, 'import numpy as np\n'), ((1717, 1730), 'numpy.shape', 'np.shape', (['P10'], {}), '(P10)\n', (1725, 1730), True, 'import numpy as np\n'), ((1735, 1748), 'numpy.shape', 'np.shape', (['P11'], {}), '(P11)\n', (1743, 1748), True, 'import numpy as np\n')]
|
from hypothesis import HealthCheck
from hypothesis import given, settings
from hypothesis.extra import numpy as hnp
from pytiff import *
import hypothesis.strategies as st
import numpy as np
import pytest
import subprocess
import tifffile
from skimage.data import coffee
def test_write_rgb(tmpdir_factory):
img = coffee()
filename = str(tmpdir_factory.mktemp("write").join("rgb_img.tif"))
with Tiff(filename, "w") as handle:
handle.write(img, method="tile")
with Tiff(filename) as handle:
data = handle[:]
assert np.all(img == data[:, :, :3])
with Tiff(filename, "w") as handle:
handle.write(img, method="scanline")
with Tiff(filename) as handle:
data = handle[:]
assert np.all(img == data[:, :, :3])
# scanline integer tests
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=st.one_of(hnp.integer_dtypes(endianness="="), hnp.unsigned_integer_dtypes(endianness="=")),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50)))
def test_write_int_scanline(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("int_img.tif"))
with Tiff(filename, "w") as handle:
handle.write(data, method="scanline")
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data, img)
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=st.one_of(hnp.integer_dtypes(endianness="="), hnp.unsigned_integer_dtypes(endianness="=")),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50)))
def test_write_int_scanline_set_rows_per_strip(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("int_img.tif"))
rows_per_strip = 1
with Tiff(filename, "w") as handle:
handle.write(data, method="scanline", rows_per_strip=rows_per_strip)
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data, img)
assert rows_per_strip == handle[0].tags["rows_per_strip"].value
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=st.one_of(hnp.integer_dtypes(endianness="="), hnp.unsigned_integer_dtypes(endianness="=")),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=20, max_side=20)))
def test_write_int_slices_scanline(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("int_img_scanline.tif"))
with Tiff(filename, "w") as handle:
handle.write(data[:, :], method="scanline")
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data[:,:], img)
# tile integer tests
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=st.one_of(hnp.integer_dtypes(endianness="="), hnp.unsigned_integer_dtypes(endianness="=")),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50)))
def test_write_int_tile(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("int_tile_img.tif"))
with Tiff(filename, "w") as handle:
handle.write(data, method="tile", tile_width=16, tile_length=16)
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data, img)
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=hnp.floating_dtypes(endianness="="),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50), elements=st.floats(0, 1)))
def test_write_float_scanline(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("float_img.tif"))
with Tiff(filename, "w") as handle:
handle.write(data, method="scanline")
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data, img)
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=hnp.floating_dtypes(endianness="="),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50), elements=st.floats(0, 1)))
def test_write_float_tile(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("float_tile_img.tif"))
with Tiff(filename, "w") as handle:
handle.write(data, method="tile", tile_length=16, tile_width=16)
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data, img)
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=st.one_of(hnp.integer_dtypes(endianness="="), hnp.unsigned_integer_dtypes(endianness="=")),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50)))
def test_append_int_tile(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("append_img.tif"))
with Tiff(filename, "w") as handle:
handle.write(data, method="tile", tile_width=16, tile_length=16)
with Tiff(filename, "a") as handle:
handle.write(data, method="tile", tile_width=16, tile_length=16)
with Tiff(filename, "r") as handle:
assert handle.number_of_pages == 2
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data, img[0])
np.testing.assert_array_equal(data, img[1])
def test_write_chunk(tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("chunk_img.tif"))
filename = "test_chunk.tif"
data1 = np.ones((64,64), dtype=np.uint8) * 1
data2 = np.ones((64,64), dtype=np.uint8) * 2
data3 = np.ones((64,64), dtype=np.uint8) * 3
data4 = np.ones((64,64), dtype=np.uint8) * 4
with Tiff(filename, "w") as handle:
chunks = [data1, data2, data3, data4]
handle.new_page((300, 300), dtype=np.uint8, tile_length=16, tile_width=16)
row = 0
col = 0
max_row_end = 0
positions = []
for c in chunks:
shape = c.shape
row_end, col_end = row + shape[0], col + shape[1]
max_row_end = max(max_row_end, row_end)
handle[row:row_end, col:col_end] = c
# save for reading chunks
positions.append([row, row_end, col, col_end])
if col_end >= handle.shape[1]:
col = 0
row = max_row_end
else:
col = col_end
handle.save_page()
with Tiff(filename) as handle:
for pos, chunk in zip(positions, chunks):
row, row_end, col, col_end = pos
data = handle[row:row_end, col:col_end]
assert np.all(data == chunk)
with Tiff(filename) as handle:
with pytest.raises(ValueError):
handle.new_page((50, 50), np.dtype("uint8"))
handle[:, :] = np.random.rand(50, 50)
handle.save_page()
def test_write_chunk_multiple_pages(tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("multi_page_chunk_img.tif"))
data1 = np.ones((64,64), dtype=np.uint8) * 1
data2 = np.ones((64,64), dtype=np.uint8) * 2
data3 = np.ones((64,64), dtype=np.uint8) * 3
data4 = np.ones((64,64), dtype=np.uint8) * 4
with Tiff(filename, "w")as handle:
chunks = [data1, data2, data3, data4]
for c in chunks:
shape = c.shape
handle.new_page(shape, dtype=np.uint8, tile_length=16, tile_width=16)
handle[:] = c
with Tiff(filename) as handle:
for page, chunk in enumerate(chunks):
handle.set_page(page)
data = handle[:]
assert data.shape == chunk.shape
assert np.all(data == chunk)
|
[
"numpy.testing.assert_array_equal",
"hypothesis.extra.numpy.integer_dtypes",
"tifffile.TiffFile",
"numpy.dtype",
"numpy.ones",
"hypothesis.extra.numpy.unsigned_integer_dtypes",
"hypothesis.strategies.floats",
"hypothesis.extra.numpy.floating_dtypes",
"hypothesis.settings",
"pytest.raises",
"numpy.random.rand",
"hypothesis.extra.numpy.array_shapes",
"skimage.data.coffee",
"numpy.all"
] |
[((803, 833), 'hypothesis.settings', 'settings', ([], {'buffer_size': '(11000000)'}), '(buffer_size=11000000)\n', (811, 833), False, 'from hypothesis import given, settings\n'), ((1373, 1403), 'hypothesis.settings', 'settings', ([], {'buffer_size': '(11000000)'}), '(buffer_size=11000000)\n', (1381, 1403), False, 'from hypothesis import given, settings\n'), ((2088, 2118), 'hypothesis.settings', 'settings', ([], {'buffer_size': '(11000000)'}), '(buffer_size=11000000)\n', (2096, 2118), False, 'from hypothesis import given, settings\n'), ((2707, 2737), 'hypothesis.settings', 'settings', ([], {'buffer_size': '(11000000)'}), '(buffer_size=11000000)\n', (2715, 2737), False, 'from hypothesis import given, settings\n'), ((3305, 3335), 'hypothesis.settings', 'settings', ([], {'buffer_size': '(11000000)'}), '(buffer_size=11000000)\n', (3313, 3335), False, 'from hypothesis import given, settings\n'), ((3850, 3880), 'hypothesis.settings', 'settings', ([], {'buffer_size': '(11000000)'}), '(buffer_size=11000000)\n', (3858, 3880), False, 'from hypothesis import given, settings\n'), ((4423, 4453), 'hypothesis.settings', 'settings', ([], {'buffer_size': '(11000000)'}), '(buffer_size=11000000)\n', (4431, 4453), False, 'from hypothesis import given, settings\n'), ((318, 326), 'skimage.data.coffee', 'coffee', ([], {}), '()\n', (324, 326), False, 'from skimage.data import coffee\n'), ((554, 583), 'numpy.all', 'np.all', (['(img == data[:, :, :3])'], {}), '(img == data[:, :, :3])\n', (560, 583), True, 'import numpy as np\n'), ((745, 774), 'numpy.all', 'np.all', (['(img == data[:, :, :3])'], {}), '(img == data[:, :, :3])\n', (751, 774), True, 'import numpy as np\n'), ((1252, 1279), 'tifffile.TiffFile', 'tifffile.TiffFile', (['filename'], {}), '(filename)\n', (1269, 1279), False, 'import tifffile\n'), ((1330, 1370), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['data', 'img'], {}), '(data, img)\n', (1359, 1370), True, 'import numpy as np\n'), ((1895, 1922), 'tifffile.TiffFile', 'tifffile.TiffFile', (['filename'], {}), '(filename)\n', (1912, 1922), False, 'import tifffile\n'), ((1973, 2013), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['data', 'img'], {}), '(data, img)\n', (2002, 2013), True, 'import numpy as np\n'), ((2559, 2586), 'tifffile.TiffFile', 'tifffile.TiffFile', (['filename'], {}), '(filename)\n', (2576, 2586), False, 'import tifffile\n'), ((2637, 2683), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['data[:, :]', 'img'], {}), '(data[:, :], img)\n', (2666, 2683), True, 'import numpy as np\n'), ((3184, 3211), 'tifffile.TiffFile', 'tifffile.TiffFile', (['filename'], {}), '(filename)\n', (3201, 3211), False, 'import tifffile\n'), ((3262, 3302), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['data', 'img'], {}), '(data, img)\n', (3291, 3302), True, 'import numpy as np\n'), ((3729, 3756), 'tifffile.TiffFile', 'tifffile.TiffFile', (['filename'], {}), '(filename)\n', (3746, 3756), False, 'import tifffile\n'), ((3807, 3847), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['data', 'img'], {}), '(data, img)\n', (3836, 3847), True, 'import numpy as np\n'), ((4302, 4329), 'tifffile.TiffFile', 'tifffile.TiffFile', (['filename'], {}), '(filename)\n', (4319, 4329), False, 'import tifffile\n'), ((4380, 4420), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['data', 'img'], {}), '(data, img)\n', (4409, 4420), True, 'import numpy as np\n'), ((5097, 5124), 'tifffile.TiffFile', 'tifffile.TiffFile', (['filename'], {}), '(filename)\n', (5114, 5124), False, 'import tifffile\n'), ((5175, 5218), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['data', 'img[0]'], {}), '(data, img[0])\n', (5204, 5218), True, 'import numpy as np\n'), ((5227, 5270), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['data', 'img[1]'], {}), '(data, img[1])\n', (5256, 5270), True, 'import numpy as np\n'), ((5427, 5460), 'numpy.ones', 'np.ones', (['(64, 64)'], {'dtype': 'np.uint8'}), '((64, 64), dtype=np.uint8)\n', (5434, 5460), True, 'import numpy as np\n'), ((5476, 5509), 'numpy.ones', 'np.ones', (['(64, 64)'], {'dtype': 'np.uint8'}), '((64, 64), dtype=np.uint8)\n', (5483, 5509), True, 'import numpy as np\n'), ((5525, 5558), 'numpy.ones', 'np.ones', (['(64, 64)'], {'dtype': 'np.uint8'}), '((64, 64), dtype=np.uint8)\n', (5532, 5558), True, 'import numpy as np\n'), ((5574, 5607), 'numpy.ones', 'np.ones', (['(64, 64)'], {'dtype': 'np.uint8'}), '((64, 64), dtype=np.uint8)\n', (5581, 5607), True, 'import numpy as np\n'), ((6937, 6970), 'numpy.ones', 'np.ones', (['(64, 64)'], {'dtype': 'np.uint8'}), '((64, 64), dtype=np.uint8)\n', (6944, 6970), True, 'import numpy as np\n'), ((6986, 7019), 'numpy.ones', 'np.ones', (['(64, 64)'], {'dtype': 'np.uint8'}), '((64, 64), dtype=np.uint8)\n', (6993, 7019), True, 'import numpy as np\n'), ((7035, 7068), 'numpy.ones', 'np.ones', (['(64, 64)'], {'dtype': 'np.uint8'}), '((64, 64), dtype=np.uint8)\n', (7042, 7068), True, 'import numpy as np\n'), ((7084, 7117), 'numpy.ones', 'np.ones', (['(64, 64)'], {'dtype': 'np.uint8'}), '((64, 64), dtype=np.uint8)\n', (7091, 7117), True, 'import numpy as np\n'), ((6551, 6572), 'numpy.all', 'np.all', (['(data == chunk)'], {}), '(data == chunk)\n', (6557, 6572), True, 'import numpy as np\n'), ((6622, 6647), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6635, 6647), False, 'import pytest\n'), ((6733, 6755), 'numpy.random.rand', 'np.random.rand', (['(50)', '(50)'], {}), '(50, 50)\n', (6747, 6755), True, 'import numpy as np\n'), ((7577, 7598), 'numpy.all', 'np.all', (['(data == chunk)'], {}), '(data == chunk)\n', (7583, 7598), True, 'import numpy as np\n'), ((965, 1031), 'hypothesis.extra.numpy.array_shapes', 'hnp.array_shapes', ([], {'min_dims': '(2)', 'max_dims': '(2)', 'min_side': '(10)', 'max_side': '(50)'}), '(min_dims=2, max_dims=2, min_side=10, max_side=50)\n', (981, 1031), True, 'from hypothesis.extra import numpy as hnp\n'), ((1535, 1601), 'hypothesis.extra.numpy.array_shapes', 'hnp.array_shapes', ([], {'min_dims': '(2)', 'max_dims': '(2)', 'min_side': '(10)', 'max_side': '(50)'}), '(min_dims=2, max_dims=2, min_side=10, max_side=50)\n', (1551, 1601), True, 'from hypothesis.extra import numpy as hnp\n'), ((2250, 2316), 'hypothesis.extra.numpy.array_shapes', 'hnp.array_shapes', ([], {'min_dims': '(2)', 'max_dims': '(2)', 'min_side': '(20)', 'max_side': '(20)'}), '(min_dims=2, max_dims=2, min_side=20, max_side=20)\n', (2266, 2316), True, 'from hypothesis.extra import numpy as hnp\n'), ((2869, 2935), 'hypothesis.extra.numpy.array_shapes', 'hnp.array_shapes', ([], {'min_dims': '(2)', 'max_dims': '(2)', 'min_side': '(10)', 'max_side': '(50)'}), '(min_dims=2, max_dims=2, min_side=10, max_side=50)\n', (2885, 2935), True, 'from hypothesis.extra import numpy as hnp\n'), ((3365, 3400), 'hypothesis.extra.numpy.floating_dtypes', 'hnp.floating_dtypes', ([], {'endianness': '"""="""'}), "(endianness='=')\n", (3384, 3400), True, 'from hypothesis.extra import numpy as hnp\n'), ((3412, 3478), 'hypothesis.extra.numpy.array_shapes', 'hnp.array_shapes', ([], {'min_dims': '(2)', 'max_dims': '(2)', 'min_side': '(10)', 'max_side': '(50)'}), '(min_dims=2, max_dims=2, min_side=10, max_side=50)\n', (3428, 3478), True, 'from hypothesis.extra import numpy as hnp\n'), ((3489, 3504), 'hypothesis.strategies.floats', 'st.floats', (['(0)', '(1)'], {}), '(0, 1)\n', (3498, 3504), True, 'import hypothesis.strategies as st\n'), ((3910, 3945), 'hypothesis.extra.numpy.floating_dtypes', 'hnp.floating_dtypes', ([], {'endianness': '"""="""'}), "(endianness='=')\n", (3929, 3945), True, 'from hypothesis.extra import numpy as hnp\n'), ((3957, 4023), 'hypothesis.extra.numpy.array_shapes', 'hnp.array_shapes', ([], {'min_dims': '(2)', 'max_dims': '(2)', 'min_side': '(10)', 'max_side': '(50)'}), '(min_dims=2, max_dims=2, min_side=10, max_side=50)\n', (3973, 4023), True, 'from hypothesis.extra import numpy as hnp\n'), ((4034, 4049), 'hypothesis.strategies.floats', 'st.floats', (['(0)', '(1)'], {}), '(0, 1)\n', (4043, 4049), True, 'import hypothesis.strategies as st\n'), ((4585, 4651), 'hypothesis.extra.numpy.array_shapes', 'hnp.array_shapes', ([], {'min_dims': '(2)', 'max_dims': '(2)', 'min_side': '(10)', 'max_side': '(50)'}), '(min_dims=2, max_dims=2, min_side=10, max_side=50)\n', (4601, 4651), True, 'from hypothesis.extra import numpy as hnp\n'), ((6687, 6704), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (6695, 6704), True, 'import numpy as np\n'), ((873, 907), 'hypothesis.extra.numpy.integer_dtypes', 'hnp.integer_dtypes', ([], {'endianness': '"""="""'}), "(endianness='=')\n", (891, 907), True, 'from hypothesis.extra import numpy as hnp\n'), ((909, 952), 'hypothesis.extra.numpy.unsigned_integer_dtypes', 'hnp.unsigned_integer_dtypes', ([], {'endianness': '"""="""'}), "(endianness='=')\n", (936, 952), True, 'from hypothesis.extra import numpy as hnp\n'), ((1443, 1477), 'hypothesis.extra.numpy.integer_dtypes', 'hnp.integer_dtypes', ([], {'endianness': '"""="""'}), "(endianness='=')\n", (1461, 1477), True, 'from hypothesis.extra import numpy as hnp\n'), ((1479, 1522), 'hypothesis.extra.numpy.unsigned_integer_dtypes', 'hnp.unsigned_integer_dtypes', ([], {'endianness': '"""="""'}), "(endianness='=')\n", (1506, 1522), True, 'from hypothesis.extra import numpy as hnp\n'), ((2158, 2192), 'hypothesis.extra.numpy.integer_dtypes', 'hnp.integer_dtypes', ([], {'endianness': '"""="""'}), "(endianness='=')\n", (2176, 2192), True, 'from hypothesis.extra import numpy as hnp\n'), ((2194, 2237), 'hypothesis.extra.numpy.unsigned_integer_dtypes', 'hnp.unsigned_integer_dtypes', ([], {'endianness': '"""="""'}), "(endianness='=')\n", (2221, 2237), True, 'from hypothesis.extra import numpy as hnp\n'), ((2777, 2811), 'hypothesis.extra.numpy.integer_dtypes', 'hnp.integer_dtypes', ([], {'endianness': '"""="""'}), "(endianness='=')\n", (2795, 2811), True, 'from hypothesis.extra import numpy as hnp\n'), ((2813, 2856), 'hypothesis.extra.numpy.unsigned_integer_dtypes', 'hnp.unsigned_integer_dtypes', ([], {'endianness': '"""="""'}), "(endianness='=')\n", (2840, 2856), True, 'from hypothesis.extra import numpy as hnp\n'), ((4493, 4527), 'hypothesis.extra.numpy.integer_dtypes', 'hnp.integer_dtypes', ([], {'endianness': '"""="""'}), "(endianness='=')\n", (4511, 4527), True, 'from hypothesis.extra import numpy as hnp\n'), ((4529, 4572), 'hypothesis.extra.numpy.unsigned_integer_dtypes', 'hnp.unsigned_integer_dtypes', ([], {'endianness': '"""="""'}), "(endianness='=')\n", (4556, 4572), True, 'from hypothesis.extra import numpy as hnp\n')]
|
import numpy as np
def rd(c1, c2):
return np.sqrt((c1[0]-c2[0])**2+(c1[1]-c2[1])**2+(c1[2]-c2[2])**2)
#rbf as global support spline type
#Gaussian Spline
def rbf(r):
return np.exp(-r**2)
#Spline polynomial
def rbf1(r,deg):
return r**deg
# Global
def rbf2(r):
return np.exp(-r**2)
# %% codecell
|
[
"numpy.exp",
"numpy.sqrt"
] |
[((46, 121), 'numpy.sqrt', 'np.sqrt', (['((c1[0] - c2[0]) ** 2 + (c1[1] - c2[1]) ** 2 + (c1[2] - c2[2]) ** 2)'], {}), '((c1[0] - c2[0]) ** 2 + (c1[1] - c2[1]) ** 2 + (c1[2] - c2[2]) ** 2)\n', (53, 121), True, 'import numpy as np\n'), ((181, 196), 'numpy.exp', 'np.exp', (['(-r ** 2)'], {}), '(-r ** 2)\n', (187, 196), True, 'import numpy as np\n'), ((282, 297), 'numpy.exp', 'np.exp', (['(-r ** 2)'], {}), '(-r ** 2)\n', (288, 297), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
'''This module contains routines to perform Gram-Schmidt orthonormalization on
a sequence of vectors.
'''
import numpy as np
import numpy.linalg as la
def gso(A, overwrite=False, out=None):
'''Performs Gram-Schmidt orthonormalization on a sequence of vectors.
Parameters
----------
A : ndarray
(M x N) ndarray with M <= N. The rows of A contain the sequence of
vectors.
overwrite : bool, optional
If `True`, the matrix A is overwritten.
out : ndarray, optional
(M x N) ndarray with M <= N. The rows of `out` contain the sequence of
orthonormal vectors. If `overwrite = True`, `out` is neglected.
Returns
-------
output : ndarray
(M x N) ndarray with M <= N. The rows of `out` contain the sequence of
orthonormal vectors.
Notes
-----
See <NAME> <NAME>, Matrix Computations, 3rd edition, Section 5.2.8,
Algorithm 5.2.5, p. 231.
'''
assert A.shape[0] <= A.shape[1]
M = A.shape[0]
if overwrite:
output = A
else:
if out is not None:
output = out
else:
output = np.zeros_like(A)
output[:,:] = A
for i in range(M):
output[i,:] = output[i,:]/la.norm(output[i,:])
for j in range(i+1, M):
output[j,:] = output[j,:] - np.dot(output[j,:], output[i,:])*output[i,:]
return output
if __name__ == '__main__':
A = np.random.random((6,6))
print('A')
print(A)
out = gso(A)
print('\n')
print(out)
print('\n')
print(np.dot(out.T, out))
for i in range(A.shape[0]):
for j in range(A.shape[0]):
print(i, j, np.dot(out[i,:], out[j,:]))
print('\n')
|
[
"numpy.dot",
"numpy.random.random",
"numpy.linalg.norm",
"numpy.zeros_like"
] |
[((1456, 1480), 'numpy.random.random', 'np.random.random', (['(6, 6)'], {}), '((6, 6))\n', (1472, 1480), True, 'import numpy as np\n'), ((1582, 1600), 'numpy.dot', 'np.dot', (['out.T', 'out'], {}), '(out.T, out)\n', (1588, 1600), True, 'import numpy as np\n'), ((1161, 1177), 'numpy.zeros_like', 'np.zeros_like', (['A'], {}), '(A)\n', (1174, 1177), True, 'import numpy as np\n'), ((1263, 1284), 'numpy.linalg.norm', 'la.norm', (['output[i, :]'], {}), '(output[i, :])\n', (1270, 1284), True, 'import numpy.linalg as la\n'), ((1694, 1722), 'numpy.dot', 'np.dot', (['out[i, :]', 'out[j, :]'], {}), '(out[i, :], out[j, :])\n', (1700, 1722), True, 'import numpy as np\n'), ((1356, 1390), 'numpy.dot', 'np.dot', (['output[j, :]', 'output[i, :]'], {}), '(output[j, :], output[i, :])\n', (1362, 1390), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
from statsmodels.formula.api import ols
import plotly_express
import plotly.graph_objs as go
from plotly.subplots import make_subplots
# Read in data
batter_data = pd.read_csv("~/Desktop/MLB_FA/Data/fg_bat_data.csv")
del batter_data['Age']
print(len(batter_data))
print(batter_data.head())
pitcher_data = pd.read_csv("~/Desktop/MLB_FA/Data/fg_pitch_data.csv")
del pitcher_data['Age']
print(len(pitcher_data))
print(pitcher_data.head())
salary_data = pd.read_csv("~/Desktop/MLB_FA/Data/salary_data.csv")
print(len(salary_data))
injury_data = pd.read_csv("~/Desktop/MLB_FA/Data/injury_data_use.csv")
# Check for whether there is overlap between injury data and the salary data players
# injury_data_players = injury_data['Player'].unique()
# mutual = salary_data[salary_data['Player'].isin(injury_data_players)] # 945 out of 1135 players included
# excl = salary_data[~salary_data['Player'].isin(injury_data_players)]
# print(len(excl['Player'].unique())) # 129 unique players injury data omitted; use mlb.com trans for these
# Define inflation
def npv(df, rate):
r = rate
df['Salary'] = pd.to_numeric(df['Salary'])
df['AAV'] = salary_data['Salary'] / df['Years']
df['NPV'] = 0
df['NPV'] = round(df['AAV'] * (1 - (1 / ((1 + r) ** df['Years']))) / r, 2)
return df
salary_data = npv(salary_data, 0.05)
# Lagged metrics to see if there is carryover value / value in continuity
class Metrics:
def lagged_batter(df):
df['WAR'] = pd.to_numeric(df['WAR'])
df['y_n1_war'] = df.groupby("Name")['WAR'].shift(1)
df['y_n2_war'] = df.groupby("Name")['y_n1_war'].shift(1)
df['y_n3_war'] = df.groupby("Name")['y_n2_war'].shift(1)
df['y_n4_war'] = df.groupby("Name")['y_n3_war'].shift(1)
df['y_n5_war'] = df.groupby("Name")['y_n4_war'].shift(1)
df['y_n6_war'] = df.groupby("Name")['y_n5_war'].shift(1)
df['wOBA'] = pd.to_numeric(df['wOBA'])
df['y_n1_wOBA'] = df.groupby("Name")['wOBA'].shift(1)
df['y_n2_wOBA'] = df.groupby("Name")['y_n1_wOBA'].shift(1)
df['y_n3_wOBA'] = df.groupby("Name")['y_n2_wOBA'].shift(1)
df['y_n4_wOBA'] = df.groupby("Name")['y_n3_wOBA'].shift(1)
df['wRC+'] = pd.to_numeric(df['wRC+'])
df['y_n1_wRC+'] = df.groupby("Name")['wRC+'].shift(1)
df['y_n2_wRC+'] = df.groupby("Name")['y_n1_wRC+'].shift(1)
df['y_n1_war_pa'] = df.groupby("Name")['WAR_PA'].shift(1)
df['y_n2_war_pa'] = df.groupby("Name")['y_n1_war_pa'].shift(1)
df['y_n3_war_pa'] = df.groupby("Name")['y_n2_war_pa'].shift(1)
df['y_n4_war_pa'] = df.groupby("Name")['y_n3_war_pa'].shift(1)
df['y_n5_war_pa'] = df.groupby("Name")['y_n4_war_pa'].shift(1)
df['y_n6_war_pa'] = df.groupby("Name")['y_n5_war_pa'].shift(1)
df["BB%"] = df["BB%"].apply(lambda x: x.replace("%", ""))
df['BB%'] = pd.to_numeric(df['BB%'])
df["K%"] = df["K%"].apply(lambda x: x.replace("%", ""))
df['K%'] = pd.to_numeric(df['K%'])
df.rename(columns={'BB%': 'BBpct', 'K%': 'Kpct'}, inplace=True)
return df
def lagged_pitcher(df):
df['WAR'] = pd.to_numeric(df['WAR'])
df['y_n1_war'] = df.groupby("Name")['WAR'].shift(1)
df['y_n2_war'] = df.groupby("Name")['y_n1_war'].shift(1)
df['y_n3_war'] = df.groupby("Name")['y_n2_war'].shift(1)
df['y_n4_war'] = df.groupby("Name")['y_n3_war'].shift(1)
df['y_n5_war'] = df.groupby("Name")['y_n4_war'].shift(1)
df['y_n6_war'] = df.groupby("Name")['y_n5_war'].shift(1)
# df['ERA-'] = pd.to_numeric(df['ERA-'])
# df['y_n1_ERA-'] = df.groupby("Name")['ERA-'].shift(1)
# df['y_n2_ERA-'] = df.groupby("Name")['y_n1_ERA-'].shift(1)
df['xFIP'] = pd.to_numeric(df['xFIP'])
df['y_n1_xFIP'] = df.groupby("Name")['xFIP'].shift(1)
df['y_n2_xFIP'] = df.groupby("Name")['y_n1_xFIP'].shift(1)
df['y_n1_war_tbf'] = df.groupby("Name")['WAR_TBF'].shift(1)
df['y_n2_war_tbf'] = df.groupby("Name")['y_n1_war_tbf'].shift(1)
df['y_n3_war_tbf'] = df.groupby("Name")['y_n2_war_tbf'].shift(1)
df['y_n4_war_tbf'] = df.groupby("Name")['y_n3_war_tbf'].shift(1)
df['y_n5_war_tbf'] = df.groupby("Name")['y_n4_war_tbf'].shift(1)
df['y_n6_war_tbf'] = df.groupby("Name")['y_n5_war_tbf'].shift(1)
df['BB%'] = df['BB%'].astype(str)
df["BB%"] = df["BB%"].apply(lambda x: x.replace("%", ""))
df['BB%'] = pd.to_numeric(df['BB%'])
df['K%'] = df['K%'].astype(str)
df["K%"] = df["K%"].apply(lambda x: x.replace("%", ""))
df['K%'] = pd.to_numeric(df['K%'])
df['K-BB%'] = df['K-BB%'].astype(str)
df["K-BB%"] = df["K-BB%"].apply(lambda x: x.replace("%", ""))
df['K-BB%'] = pd.to_numeric(df['K-BB%'])
df['SwStr%'] = df['SwStr%'].astype(str)
df["SwStr%"] = df["SwStr%"].apply(lambda x: x.replace("%", ""))
df['SwStr%'] = pd.to_numeric(df['SwStr%'])
df['LOB%'] = df['LOB%'].astype(str)
df["LOB%"] = df["LOB%"].apply(lambda x: x.replace("%", ""))
df['LOB%'] = pd.to_numeric(df['LOB%'])
# df['CB%'] = df['CB%'].astype(str)
# df["CB%"] = df["CB%"].apply(lambda x: x.replace("%", ""))
# df['CB%'] = pd.to_numeric(df['CB%'])
df.rename(columns={'BB%': 'BBpct', 'K%': 'Kpct', 'K-BB%': 'K_minus_BBpct', 'CB%': 'CBpct',
'SwStr%': 'Swstrpct'}, inplace=True)
return df
def fix_position(df):
df['Position'] = np.where(df['Position'] == "OF", "CF", df['Position'])
df['Position'] = np.where((df['Position'] == "LF") | (df['Position'] == "RF"),
"Corner Outfield", df['Position'])
df['Position'] = np.where(df['Position'] == "P", "RP", df['Position'])
# df['Position'] = np.where(df['Position'] == "SP", 1, df['Position'])
# df['Position'] = np.where(df['Position'] == "C", 2, df['Position'])
# df['Position'] = np.where(df['Position'] == "1B", 3, df['Position'])
# df['Position'] = np.where(df['Position'] == "2B", 4, df['Position'])
# df['Position'] = np.where(df['Position'] == "3B", 5, df['Position'])
# df['Position'] = np.where(df['Position'] == "SS", 6, df['Position'])
# df['Position'] = np.where(df['Position'] == "Corner Outfield", 7, df['Position'])
# df['Position'] = np.where(df['Position'] == "CF", 8, df['Position'])
# df['Position'] = np.where(df['Position'] == "RP", 9, df['Position'])
# df['Position'] = np.where(df['Position'] == "DH", 10, df['Position'])
return df
def rate_stats_batter(df):
df['WAR_PA'] = df['WAR'] / df['PA'] # add in rate based WAR (per PA, game played, etc)
df['oWAR_PA'] = df['oWAR'] / df['PA']
df['WAR_PA'] = round(df['WAR_PA'], 3)
df['oWAR_PA'] = round(df['oWAR_PA'], 3)
return df
def rate_stats_pitcher(df):
df['WAR_TBF'] = df['WAR'] / df['TBF'] # add in rate based WAR (per IP, etc)
# df['WAR_IP'] = df['WAR'] / df['IP']
df['wFB_TBF'] = df['wFB'] / df['TBF']
df['WAR_TBF'] = round(df['WAR_TBF'], 3)
# df['WAR_IP'] = round(df['WAR_IP'], 3)
df['wFB_TBF'] = round(df['wFB_TBF'], 3)
return df
def injury_engineering(df):
df['two_year_inj_avg'] = 0
df.loc[:, "two_year_inj_avg"] = (
df.groupby("Player")["injury_duration"].shift(1) / df.groupby("Player")["injury_duration"].shift(
2) - 1)
df['Injury'] = df['Injury'].fillna("None")
df['injury_duration'] = df['injury_duration'].fillna(0)
return df
def short_season_fix_batter(df):
df['WAR_162'] = np.where(df['Year'] == 2021, df['WAR']*2.3, df['WAR'])
df['PA_162'] = np.where(df['Year'] == 2021, df['PA']*2.3, df['PA'])
df['oWAR_162'] = np.where(df['Year'] == 2021, df['oWAR'] * 2.3, df['oWAR'])
df['dWAR_162'] = np.where(df['Year'] == 2021, df['dWAR'] * 2.3, df['dWAR'])
return df
def short_season_fix_pitcher(df):
df['WAR_162'] = np.where(df['Year'] == 2021, df['WAR']*2.3, df['WAR'])
df['IP_162'] = np.where(df['Year'] == 2021, df['IP']*2.3, df['IP'])
return df
class NonLinearVars():
def fg_batter_vars(df):
df['WAR_sq'] = np.where(df['WAR'] > 0, df['WAR'] ** 2, df['WAR'] * 2)
df['y_n1_war_sq'] = np.where(df['y_n1_war'] > 0, df['y_n1_war'] ** 2, df['y_n1_war'] * 2)
df['y_n2_war_sq'] = np.where(df['y_n2_war'] > 0, df['y_n2_war'] ** 2, df['y_n2_war'] * 2)
df['y_n3_war_sq'] = np.where(df['y_n3_war'] > 0, df['y_n3_war'] ** 2, df['y_n3_war'] * 2)
df['y_n4_war_sq'] = np.where(df['y_n4_war'] > 0, df['y_n4_war'] ** 2, df['y_n4_war'] * 2)
df['y_n5_war_sq'] = np.where(df['y_n5_war'] > 0, df['y_n5_war'] ** 2, df['y_n5_war'] * 2)
df['y_n6_war_sq'] = np.where(df['y_n6_war'] > 0, df['y_n6_war'] ** 2, df['y_n6_war'] * 2)
df['y_n1_wOBA_sq'] = df['y_n1_wOBA'] ** 2
df['y_n2_wOBA_sq'] = df['y_n2_wOBA'] ** 2
df['y_n1_wRC+_sq'] = df['y_n1_wRC+'] ** 2
df['y_n2_wRC+_sq'] = df['y_n2_wRC+'] ** 2
return df
def fg_pitcher_vars(df):
df['WAR_sq'] = df['WAR'] **2
df['y_n1_war_sq'] = np.where(df['y_n1_war'] > 0, df['y_n1_war'] ** 2, df['y_n1_war'] * 2)
df['y_n2_war_sq'] = np.where(df['y_n2_war'] > 0, df['y_n2_war'] ** 2, df['y_n2_war'] * 2)
df['y_n3_war_sq'] = np.where(df['y_n3_war'] > 0, df['y_n3_war'] ** 2, df['y_n3_war'] * 2)
df['y_n4_war_sq'] = np.where(df['y_n4_war'] > 0, df['y_n4_war'] ** 2, df['y_n4_war'] * 2)
df['y_n5_war_sq'] = np.where(df['y_n5_war'] > 0, df['y_n5_war'] ** 2, df['y_n5_war'] * 2)
df['y_n6_war_sq'] = np.where(df['y_n6_war'] > 0, df['y_n6_war'] ** 2, df['y_n6_war'] * 2)
# df['ERA-_sq'] = df['ERA-'] **2
# df['y_n1_ERA-_sq'] = df['y_n1_ERA-'] **2
# df['y_n2_ERA-_sq'] = df['y_n2_ERA-'] **2
df['xFIP_sq'] = df['xFIP'] **2
df['y_n1_xFIP_sq'] = df['y_n1_xFIP'] **2
df['y_n2_xFIP_sq'] = df['y_n2_xFIP'] **2
return df
def salary_vars(df):
# df['Age'] = df['Age'].astype('int')
df['Age_sq'] = df['Age'] ** 2
df['Age_log'] = np.log(df['Age'])
return df
# Attach the injury data to the players, merge on player and year
def merge_injuries(salary_df, injury_df):
merged_df = pd.merge(salary_df, injury_df, how='left', left_on=['Player', 'Season'], right_on=['Player', 'Year'])
del merged_df['Year']
return merged_df
# MA
print(len(salary_data))
salary_data = merge_injuries(salary_data, injury_data)
print(len(salary_data))
salary_data['injury_duration'] = salary_data['injury_duration'].fillna(0)
salary_data = Metrics.injury_engineering(salary_data)
# Lag
batter_data = Metrics.short_season_fix_batter(batter_data)
batter_data = Metrics.rate_stats_batter(batter_data)
batter_data = Metrics.lagged_batter(batter_data)
pitcher_data = Metrics.short_season_fix_pitcher(pitcher_data)
pitcher_data = Metrics.rate_stats_pitcher(pitcher_data)
pitcher_data = Metrics.lagged_pitcher(pitcher_data)
# Position fix
salary_data = Metrics.fix_position(salary_data)
# Non Linears
batter_data = NonLinearVars.fg_batter_vars(batter_data)
pitcher_data = NonLinearVars.fg_pitcher_vars(pitcher_data)
salary_data = NonLinearVars.salary_vars(salary_data)
# Merge data sets (one pitcher, one batter)
batter_merged = pd.merge(batter_data, salary_data, left_on=['Name', 'Year'], right_on=['Player', 'Season'])
batter_merged = batter_merged[(batter_merged['Position'] != "SP") & (batter_merged['Position'] != "RP")] # remove P's
print(len(batter_merged))
pitcher_merged = pd.merge(pitcher_data, salary_data, left_on=['Name', 'Year'], right_on=['Player', 'Season'])
pitcher_merged = pitcher_merged[(pitcher_merged['Position'] == "SP") | (pitcher_merged['Position'] == "RP")] # keep P's
print(len(pitcher_merged))
# Begin modeling
# train_data_batter = batter_merged[(batter_merged['Year'] != max(batter_merged['Year']))]
# train_data_pitcher = pitcher_merged[(pitcher_merged['Year'] != max(pitcher_merged['Year']))]
train_data_batter = batter_merged.loc[~batter_merged['NPV'].isnull()]
train_data_pitcher = pitcher_merged.loc[~pitcher_merged['NPV'].isnull()]
test_data_batter = batter_merged[
# (batter_merged['Year'] == max(batter_merged['Year']))
# &
(np.isnan(batter_merged['NPV']))]
test_data_pitcher = pitcher_merged[
# (pitcher_merged['Year'] == max(pitcher_merged['Year']))
# &
(np.isnan(pitcher_merged['NPV']))]
train_data_batter.to_csv('~/Desktop/MLB_FA/Data/train_data_batter.csv', index=False)
train_data_pitcher.to_csv('~/Desktop/MLB_FA/Data/train_data_pitcher.csv', index=False)
test_data_batter.to_csv('~/Desktop/MLB_FA/Data/test_data_batter.csv', index=False)
test_data_pitcher.to_csv('~/Desktop/MLB_FA/Data/test_data_pitcher.csv', index=False)
fit = ols('NPV ~ C(Position) + WAR_sq + WAR + Age', data=train_data_batter).fit()
fit.summary() # 0.597 r-sq, 0.587 adj r-sq
# Plot NPV / WAR to see nonlinear relationship
plot_data = train_data_batter[(train_data_batter['Year'] > 2010)]
fig = plotly_express.scatter(plot_data, x="dWAR", y="NPV", color='Position',
hover_data=['Player', 'Position', 'Year', 'Prev Team'],
title="dWAR, NPV Colored By Position (since {})".format(min(plot_data['Year'])))
fig.show()
# Plot WAR / Rate WAR
plot_data = batter_data[(batter_data['Year'] == 2021) & (batter_data['PA'] > 100)]
fig = plotly_express.scatter(plot_data, x="PA", y="dWAR", color='Name')
fig.update_layout(
hoverlabel=dict(
bgcolor="white",
font_size=10,
font_family="Arial"
)
)
fig.show()
# remove linear WAR
# Let's add a season factor and qualifying offer
fit = ols('NPV ~ C(Position) + C(Season) + WAR_sq + Age + Qual + WAR_PA', data=train_data_batter).fit()
fit.summary()
# Getting better, but there's more unexplained variance. Let's try log of Age and prior season's WAR
# Log Age
fit = ols('NPV ~ C(Position) + C(Season) + y_n1_war_sq + WAR_sq + Age_log + Qual + WAR_PA + y_n1_war_pa',
data=train_data_batter).fit()
fit.summary()
# Still marginally improving. Up to around 50% of the variance explained.
# WAR is a counting stat, let's add in base-running UBR, non-log Age
# UBR
fit = ols('NPV ~ C(Position) + y_n1_war_sq + WAR_sq + Age + UBR + Qual', data=train_data_batter).fit()
fit.summary()
# Try some new variables (e.g. OPS, ISO, wRC+, wOBA, y_n2_war_sq, etc)
fit = ols('NPV ~ C(Position) + y_n2_war_sq + y_n1_war_sq + WAR_sq + Age + UBR + Qual + wOBA + ISO',
data=train_data_batter).fit()
fit.summary()
# Now let's consider only deals signed for multiple-years
train_data_batter_multiyear = train_data_batter[(train_data_batter['Years'] > 1)]
fit = ols('NPV ~ C(Position) + y_n1_war_sq + WAR_sq + Age + UBR + Qual', data=train_data_batter_multiyear).fit()
fit.summary()
# Single year only
train_data_batter_single = train_data_batter[(train_data_batter['Years'] == 1)]
fit = ols('NPV ~ C(Position) + y_n1_war_sq + WAR_sq + Age + Qual', data=train_data_batter_single).fit()
fit.summary()
# So what are team's using to assess these single year contracts?
fit = ols('NPV ~ ISO + WAR_sq + y_n1_war_sq + y_n2_war_sq + wGDP + BABIP + Qual', data=train_data_batter_single).fit()
fit.summary()
# Now add injury duration
fit = ols('NPV ~ ISO + WAR_sq + y_n1_war_sq + y_n2_war_sq + injury_duration + Qual', data=train_data_batter).fit()
fit.summary()
# Kitchen sink
fit_rate = ols('NPV ~ BBpct + Kpct + AVG + OBP + SLG + OPS + ISO + Spd + BABIP + UBR + wGDP + wSB + wRC + '
'wRAA + wOBA + WAR + dWAR + oWAR + Year + WAR_PA + oWAR_PA + y_n1_war + y_n2_war + y_n3_war + '
'y_n4_war + y_n5_war + y_n6_war + y_n1_wOBA + y_n2_wOBA + y_n3_wOBA + y_n4_wOBA + '
'y_n1_war_pa + y_n2_war_pa + y_n3_war_pa + y_n4_war_pa + y_n5_war_pa + y_n6_war_pa +'
'WAR_sq + y_n1_war_sq + y_n2_war_sq + y_n3_war_sq + y_n4_war_sq + y_n5_war_sq + y_n6_war_sq + '
'y_n1_wOBA_sq + y_n2_wOBA_sq + Position + Age + Qual + injury_duration', data=train_data_batter).fit()
fit_rate.summary()
# Remove unwanted vars
fit_rate = ols('NPV ~ Kpct + Year + y_n1_war +'
'y_n1_wOBA + y_n2_war_pa + WAR_sq + y_n1_war_sq +'
'Age + Qual', data=train_data_batter).fit()
fit_rate.summary()
# PITCHERS
train_data_pitcher['pos_dummy'] = np.where(train_data_pitcher['Position'] == "SP", 1, 0)
fit = ols('NPV ~ WAR_sq + Age + Qual + pos_dummy + FBv + Kpct + y_n1_war_sq', data=train_data_pitcher).fit()
fit.summary()
# Predict WAR
fit = ols('WAR ~ FBv + Kpct + BBpct + FIP + IP + wFB + pos_dummy', data=train_data_pitcher).fit()
fit.summary()
# Let's add in injury duration
train_data_pitcher['injury_duration_log'] = np.log(train_data_pitcher['injury_duration'])
fit = ols('NPV ~ WAR_sq + Age + Qual + injury_duration + pos_dummy', data=train_data_pitcher).fit()
fit.summary()
# Add FBv
fit = ols('NPV ~ WAR_sq + Age + Qual + injury_duration + FBv + pos_dummy', data=train_data_pitcher).fit()
fit.summary()
# Kpct
fit = ols('NPV ~ WAR_sq + Age + Qual + injury_duration + FBv + Kpct + pos_dummy + BBpct', data=train_data_pitcher).fit()
fit.summary()
# CBv
fit = ols('NPV ~ Age + Qual + injury_duration + FBv + Kpct + CBv + pos_dummy', data=train_data_pitcher).fit()
fit.summary()
# Rate stats
fit_rate = ols(
'NPV ~ Age + WAR_TBF + y_n1_war_tbf + y_n2_war_tbf + FBv + xFIP_sq + pos_dummy + injury_duration + Qual',
data=train_data_pitcher).fit()
fit_rate.summary()
multi_year_pitcher = train_data_pitcher[(train_data_pitcher['Years'] > 1)]
fit_rate_multi = ols(
'NPV ~ Age + WAR_TBF + y_n1_war_tbf + y_n2_war_tbf + FBv + xFIP_sq + pos_dummy + injury_duration',
data=multi_year_pitcher).fit()
fit_rate_multi.summary()
# Change position and Season to random effect
batter_grp = batter_merged.groupby(['Season']).agg({
'NPV': sum,
'WAR': sum,
'Name': 'nunique'
}).reset_index()
batter_grp['NPV'] = batter_grp['NPV'] / 1000000
fig = plotly_express.bar(batter_grp, x="Season", y="NPV",
color_continuous_scale=plotly_express.colors.qualitative.D3,
title="Yearly total NPV and total WAR")
fig.add_trace(go.Scatter(x=batter_grp['Season'], y=batter_grp['WAR'], line=dict(color='red'), name='WAR'),
row=1, col=1)
fig.show()
# Create figure with secondary y-axis
fig = make_subplots(specs=[[{"secondary_y": True}]])
# Add traces
fig.add_trace(
go.Bar(x=batter_grp['Season'], y=batter_grp['NPV'], name="NPV total"),
secondary_y=False,
)
fig.add_trace(
go.Scatter(x=batter_grp['Season'], y=batter_grp['WAR'], name="WAR total"),
secondary_y=True,
)
# Add figure title
fig.update_layout(
title_text="Yearly total NPV and total WAR"
)
# Set x-axis title
fig.update_xaxes(title_text="Off-Season Year")
# Set y-axes titles
fig.update_yaxes(title_text="<b>NPV</b> total ($ Millions)", secondary_y=False)
fig.update_yaxes(title_text="<b>WAR</b> total", secondary_y=True)
fig.show()
|
[
"numpy.log",
"pandas.read_csv",
"plotly.graph_objs.Scatter",
"pandas.merge",
"numpy.isnan",
"numpy.where",
"statsmodels.formula.api.ols",
"plotly_express.scatter",
"plotly.subplots.make_subplots",
"plotly_express.bar",
"pandas.to_numeric",
"plotly.graph_objs.Bar"
] |
[((204, 256), 'pandas.read_csv', 'pd.read_csv', (['"""~/Desktop/MLB_FA/Data/fg_bat_data.csv"""'], {}), "('~/Desktop/MLB_FA/Data/fg_bat_data.csv')\n", (215, 256), True, 'import pandas as pd\n'), ((346, 400), 'pandas.read_csv', 'pd.read_csv', (['"""~/Desktop/MLB_FA/Data/fg_pitch_data.csv"""'], {}), "('~/Desktop/MLB_FA/Data/fg_pitch_data.csv')\n", (357, 400), True, 'import pandas as pd\n'), ((492, 544), 'pandas.read_csv', 'pd.read_csv', (['"""~/Desktop/MLB_FA/Data/salary_data.csv"""'], {}), "('~/Desktop/MLB_FA/Data/salary_data.csv')\n", (503, 544), True, 'import pandas as pd\n'), ((584, 640), 'pandas.read_csv', 'pd.read_csv', (['"""~/Desktop/MLB_FA/Data/injury_data_use.csv"""'], {}), "('~/Desktop/MLB_FA/Data/injury_data_use.csv')\n", (595, 640), True, 'import pandas as pd\n'), ((11549, 11645), 'pandas.merge', 'pd.merge', (['batter_data', 'salary_data'], {'left_on': "['Name', 'Year']", 'right_on': "['Player', 'Season']"}), "(batter_data, salary_data, left_on=['Name', 'Year'], right_on=[\n 'Player', 'Season'])\n", (11557, 11645), True, 'import pandas as pd\n'), ((11804, 11901), 'pandas.merge', 'pd.merge', (['pitcher_data', 'salary_data'], {'left_on': "['Name', 'Year']", 'right_on': "['Player', 'Season']"}), "(pitcher_data, salary_data, left_on=['Name', 'Year'], right_on=[\n 'Player', 'Season'])\n", (11812, 11901), True, 'import pandas as pd\n'), ((13655, 13720), 'plotly_express.scatter', 'plotly_express.scatter', (['plot_data'], {'x': '"""PA"""', 'y': '"""dWAR"""', 'color': '"""Name"""'}), "(plot_data, x='PA', y='dWAR', color='Name')\n", (13677, 13720), False, 'import plotly_express\n'), ((16600, 16654), 'numpy.where', 'np.where', (["(train_data_pitcher['Position'] == 'SP')", '(1)', '(0)'], {}), "(train_data_pitcher['Position'] == 'SP', 1, 0)\n", (16608, 16654), True, 'import numpy as np\n'), ((16981, 17026), 'numpy.log', 'np.log', (["train_data_pitcher['injury_duration']"], {}), "(train_data_pitcher['injury_duration'])\n", (16987, 17026), True, 'import numpy as np\n'), ((18229, 18391), 'plotly_express.bar', 'plotly_express.bar', (['batter_grp'], {'x': '"""Season"""', 'y': '"""NPV"""', 'color_continuous_scale': 'plotly_express.colors.qualitative.D3', 'title': '"""Yearly total NPV and total WAR"""'}), "(batter_grp, x='Season', y='NPV', color_continuous_scale=\n plotly_express.colors.qualitative.D3, title=\n 'Yearly total NPV and total WAR')\n", (18247, 18391), False, 'import plotly_express\n'), ((18624, 18670), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'specs': "[[{'secondary_y': True}]]"}), "(specs=[[{'secondary_y': True}]])\n", (18637, 18670), False, 'from plotly.subplots import make_subplots\n'), ((1143, 1170), 'pandas.to_numeric', 'pd.to_numeric', (["df['Salary']"], {}), "(df['Salary'])\n", (1156, 1170), True, 'import pandas as pd\n'), ((10515, 10620), 'pandas.merge', 'pd.merge', (['salary_df', 'injury_df'], {'how': '"""left"""', 'left_on': "['Player', 'Season']", 'right_on': "['Player', 'Year']"}), "(salary_df, injury_df, how='left', left_on=['Player', 'Season'],\n right_on=['Player', 'Year'])\n", (10523, 10620), True, 'import pandas as pd\n'), ((12500, 12530), 'numpy.isnan', 'np.isnan', (["batter_merged['NPV']"], {}), "(batter_merged['NPV'])\n", (12508, 12530), True, 'import numpy as np\n'), ((12644, 12675), 'numpy.isnan', 'np.isnan', (["pitcher_merged['NPV']"], {}), "(pitcher_merged['NPV'])\n", (12652, 12675), True, 'import numpy as np\n'), ((18704, 18773), 'plotly.graph_objs.Bar', 'go.Bar', ([], {'x': "batter_grp['Season']", 'y': "batter_grp['NPV']", 'name': '"""NPV total"""'}), "(x=batter_grp['Season'], y=batter_grp['NPV'], name='NPV total')\n", (18710, 18773), True, 'import plotly.graph_objs as go\n'), ((18820, 18893), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': "batter_grp['Season']", 'y': "batter_grp['WAR']", 'name': '"""WAR total"""'}), "(x=batter_grp['Season'], y=batter_grp['WAR'], name='WAR total')\n", (18830, 18893), True, 'import plotly.graph_objs as go\n'), ((1511, 1535), 'pandas.to_numeric', 'pd.to_numeric', (["df['WAR']"], {}), "(df['WAR'])\n", (1524, 1535), True, 'import pandas as pd\n'), ((1943, 1968), 'pandas.to_numeric', 'pd.to_numeric', (["df['wOBA']"], {}), "(df['wOBA'])\n", (1956, 1968), True, 'import pandas as pd\n'), ((2254, 2279), 'pandas.to_numeric', 'pd.to_numeric', (["df['wRC+']"], {}), "(df['wRC+'])\n", (2267, 2279), True, 'import pandas as pd\n'), ((2918, 2942), 'pandas.to_numeric', 'pd.to_numeric', (["df['BB%']"], {}), "(df['BB%'])\n", (2931, 2942), True, 'import pandas as pd\n'), ((3026, 3049), 'pandas.to_numeric', 'pd.to_numeric', (["df['K%']"], {}), "(df['K%'])\n", (3039, 3049), True, 'import pandas as pd\n'), ((3190, 3214), 'pandas.to_numeric', 'pd.to_numeric', (["df['WAR']"], {}), "(df['WAR'])\n", (3203, 3214), True, 'import pandas as pd\n'), ((3805, 3830), 'pandas.to_numeric', 'pd.to_numeric', (["df['xFIP']"], {}), "(df['xFIP'])\n", (3818, 3830), True, 'import pandas as pd\n'), ((4523, 4547), 'pandas.to_numeric', 'pd.to_numeric', (["df['BB%']"], {}), "(df['BB%'])\n", (4536, 4547), True, 'import pandas as pd\n'), ((4672, 4695), 'pandas.to_numeric', 'pd.to_numeric', (["df['K%']"], {}), "(df['K%'])\n", (4685, 4695), True, 'import pandas as pd\n'), ((4835, 4861), 'pandas.to_numeric', 'pd.to_numeric', (["df['K-BB%']"], {}), "(df['K-BB%'])\n", (4848, 4861), True, 'import pandas as pd\n'), ((5006, 5033), 'pandas.to_numeric', 'pd.to_numeric', (["df['SwStr%']"], {}), "(df['SwStr%'])\n", (5019, 5033), True, 'import pandas as pd\n'), ((5168, 5193), 'pandas.to_numeric', 'pd.to_numeric', (["df['LOB%']"], {}), "(df['LOB%'])\n", (5181, 5193), True, 'import pandas as pd\n'), ((5589, 5643), 'numpy.where', 'np.where', (["(df['Position'] == 'OF')", '"""CF"""', "df['Position']"], {}), "(df['Position'] == 'OF', 'CF', df['Position'])\n", (5597, 5643), True, 'import numpy as np\n'), ((5669, 5769), 'numpy.where', 'np.where', (["((df['Position'] == 'LF') | (df['Position'] == 'RF'))", '"""Corner Outfield"""', "df['Position']"], {}), "((df['Position'] == 'LF') | (df['Position'] == 'RF'),\n 'Corner Outfield', df['Position'])\n", (5677, 5769), True, 'import numpy as np\n'), ((5825, 5878), 'numpy.where', 'np.where', (["(df['Position'] == 'P')", '"""RP"""', "df['Position']"], {}), "(df['Position'] == 'P', 'RP', df['Position'])\n", (5833, 5878), True, 'import numpy as np\n'), ((7799, 7855), 'numpy.where', 'np.where', (["(df['Year'] == 2021)", "(df['WAR'] * 2.3)", "df['WAR']"], {}), "(df['Year'] == 2021, df['WAR'] * 2.3, df['WAR'])\n", (7807, 7855), True, 'import numpy as np\n'), ((7877, 7931), 'numpy.where', 'np.where', (["(df['Year'] == 2021)", "(df['PA'] * 2.3)", "df['PA']"], {}), "(df['Year'] == 2021, df['PA'] * 2.3, df['PA'])\n", (7885, 7931), True, 'import numpy as np\n'), ((7955, 8013), 'numpy.where', 'np.where', (["(df['Year'] == 2021)", "(df['oWAR'] * 2.3)", "df['oWAR']"], {}), "(df['Year'] == 2021, df['oWAR'] * 2.3, df['oWAR'])\n", (7963, 8013), True, 'import numpy as np\n'), ((8039, 8097), 'numpy.where', 'np.where', (["(df['Year'] == 2021)", "(df['dWAR'] * 2.3)", "df['dWAR']"], {}), "(df['Year'] == 2021, df['dWAR'] * 2.3, df['dWAR'])\n", (8047, 8097), True, 'import numpy as np\n'), ((8179, 8235), 'numpy.where', 'np.where', (["(df['Year'] == 2021)", "(df['WAR'] * 2.3)", "df['WAR']"], {}), "(df['Year'] == 2021, df['WAR'] * 2.3, df['WAR'])\n", (8187, 8235), True, 'import numpy as np\n'), ((8257, 8311), 'numpy.where', 'np.where', (["(df['Year'] == 2021)", "(df['IP'] * 2.3)", "df['IP']"], {}), "(df['Year'] == 2021, df['IP'] * 2.3, df['IP'])\n", (8265, 8311), True, 'import numpy as np\n'), ((8404, 8458), 'numpy.where', 'np.where', (["(df['WAR'] > 0)", "(df['WAR'] ** 2)", "(df['WAR'] * 2)"], {}), "(df['WAR'] > 0, df['WAR'] ** 2, df['WAR'] * 2)\n", (8412, 8458), True, 'import numpy as np\n'), ((8487, 8556), 'numpy.where', 'np.where', (["(df['y_n1_war'] > 0)", "(df['y_n1_war'] ** 2)", "(df['y_n1_war'] * 2)"], {}), "(df['y_n1_war'] > 0, df['y_n1_war'] ** 2, df['y_n1_war'] * 2)\n", (8495, 8556), True, 'import numpy as np\n'), ((8585, 8654), 'numpy.where', 'np.where', (["(df['y_n2_war'] > 0)", "(df['y_n2_war'] ** 2)", "(df['y_n2_war'] * 2)"], {}), "(df['y_n2_war'] > 0, df['y_n2_war'] ** 2, df['y_n2_war'] * 2)\n", (8593, 8654), True, 'import numpy as np\n'), ((8683, 8752), 'numpy.where', 'np.where', (["(df['y_n3_war'] > 0)", "(df['y_n3_war'] ** 2)", "(df['y_n3_war'] * 2)"], {}), "(df['y_n3_war'] > 0, df['y_n3_war'] ** 2, df['y_n3_war'] * 2)\n", (8691, 8752), True, 'import numpy as np\n'), ((8781, 8850), 'numpy.where', 'np.where', (["(df['y_n4_war'] > 0)", "(df['y_n4_war'] ** 2)", "(df['y_n4_war'] * 2)"], {}), "(df['y_n4_war'] > 0, df['y_n4_war'] ** 2, df['y_n4_war'] * 2)\n", (8789, 8850), True, 'import numpy as np\n'), ((8879, 8948), 'numpy.where', 'np.where', (["(df['y_n5_war'] > 0)", "(df['y_n5_war'] ** 2)", "(df['y_n5_war'] * 2)"], {}), "(df['y_n5_war'] > 0, df['y_n5_war'] ** 2, df['y_n5_war'] * 2)\n", (8887, 8948), True, 'import numpy as np\n'), ((8977, 9046), 'numpy.where', 'np.where', (["(df['y_n6_war'] > 0)", "(df['y_n6_war'] ** 2)", "(df['y_n6_war'] * 2)"], {}), "(df['y_n6_war'] > 0, df['y_n6_war'] ** 2, df['y_n6_war'] * 2)\n", (8985, 9046), True, 'import numpy as np\n'), ((9360, 9429), 'numpy.where', 'np.where', (["(df['y_n1_war'] > 0)", "(df['y_n1_war'] ** 2)", "(df['y_n1_war'] * 2)"], {}), "(df['y_n1_war'] > 0, df['y_n1_war'] ** 2, df['y_n1_war'] * 2)\n", (9368, 9429), True, 'import numpy as np\n'), ((9458, 9527), 'numpy.where', 'np.where', (["(df['y_n2_war'] > 0)", "(df['y_n2_war'] ** 2)", "(df['y_n2_war'] * 2)"], {}), "(df['y_n2_war'] > 0, df['y_n2_war'] ** 2, df['y_n2_war'] * 2)\n", (9466, 9527), True, 'import numpy as np\n'), ((9556, 9625), 'numpy.where', 'np.where', (["(df['y_n3_war'] > 0)", "(df['y_n3_war'] ** 2)", "(df['y_n3_war'] * 2)"], {}), "(df['y_n3_war'] > 0, df['y_n3_war'] ** 2, df['y_n3_war'] * 2)\n", (9564, 9625), True, 'import numpy as np\n'), ((9654, 9723), 'numpy.where', 'np.where', (["(df['y_n4_war'] > 0)", "(df['y_n4_war'] ** 2)", "(df['y_n4_war'] * 2)"], {}), "(df['y_n4_war'] > 0, df['y_n4_war'] ** 2, df['y_n4_war'] * 2)\n", (9662, 9723), True, 'import numpy as np\n'), ((9752, 9821), 'numpy.where', 'np.where', (["(df['y_n5_war'] > 0)", "(df['y_n5_war'] ** 2)", "(df['y_n5_war'] * 2)"], {}), "(df['y_n5_war'] > 0, df['y_n5_war'] ** 2, df['y_n5_war'] * 2)\n", (9760, 9821), True, 'import numpy as np\n'), ((9850, 9919), 'numpy.where', 'np.where', (["(df['y_n6_war'] > 0)", "(df['y_n6_war'] ** 2)", "(df['y_n6_war'] * 2)"], {}), "(df['y_n6_war'] > 0, df['y_n6_war'] ** 2, df['y_n6_war'] * 2)\n", (9858, 9919), True, 'import numpy as np\n'), ((10352, 10369), 'numpy.log', 'np.log', (["df['Age']"], {}), "(df['Age'])\n", (10358, 10369), True, 'import numpy as np\n'), ((13026, 13095), 'statsmodels.formula.api.ols', 'ols', (['"""NPV ~ C(Position) + WAR_sq + WAR + Age"""'], {'data': 'train_data_batter'}), "('NPV ~ C(Position) + WAR_sq + WAR + Age', data=train_data_batter)\n", (13029, 13095), False, 'from statsmodels.formula.api import ols\n'), ((13931, 14027), 'statsmodels.formula.api.ols', 'ols', (['"""NPV ~ C(Position) + C(Season) + WAR_sq + Age + Qual + WAR_PA"""'], {'data': 'train_data_batter'}), "('NPV ~ C(Position) + C(Season) + WAR_sq + Age + Qual + WAR_PA', data=\n train_data_batter)\n", (13934, 14027), False, 'from statsmodels.formula.api import ols\n'), ((14161, 14290), 'statsmodels.formula.api.ols', 'ols', (['"""NPV ~ C(Position) + C(Season) + y_n1_war_sq + WAR_sq + Age_log + Qual + WAR_PA + y_n1_war_pa"""'], {'data': 'train_data_batter'}), "('NPV ~ C(Position) + C(Season) + y_n1_war_sq + WAR_sq + Age_log + Qual + WAR_PA + y_n1_war_pa'\n , data=train_data_batter)\n", (14164, 14290), False, 'from statsmodels.formula.api import ols\n'), ((14472, 14568), 'statsmodels.formula.api.ols', 'ols', (['"""NPV ~ C(Position) + y_n1_war_sq + WAR_sq + Age + UBR + Qual"""'], {'data': 'train_data_batter'}), "('NPV ~ C(Position) + y_n1_war_sq + WAR_sq + Age + UBR + Qual', data=\n train_data_batter)\n", (14475, 14568), False, 'from statsmodels.formula.api import ols\n'), ((14662, 14785), 'statsmodels.formula.api.ols', 'ols', (['"""NPV ~ C(Position) + y_n2_war_sq + y_n1_war_sq + WAR_sq + Age + UBR + Qual + wOBA + ISO"""'], {'data': 'train_data_batter'}), "('NPV ~ C(Position) + y_n2_war_sq + y_n1_war_sq + WAR_sq + Age + UBR + Qual + wOBA + ISO'\n , data=train_data_batter)\n", (14665, 14785), False, 'from statsmodels.formula.api import ols\n'), ((14958, 15064), 'statsmodels.formula.api.ols', 'ols', (['"""NPV ~ C(Position) + y_n1_war_sq + WAR_sq + Age + UBR + Qual"""'], {'data': 'train_data_batter_multiyear'}), "('NPV ~ C(Position) + y_n1_war_sq + WAR_sq + Age + UBR + Qual', data=\n train_data_batter_multiyear)\n", (14961, 15064), False, 'from statsmodels.formula.api import ols\n'), ((15186, 15283), 'statsmodels.formula.api.ols', 'ols', (['"""NPV ~ C(Position) + y_n1_war_sq + WAR_sq + Age + Qual"""'], {'data': 'train_data_batter_single'}), "('NPV ~ C(Position) + y_n1_war_sq + WAR_sq + Age + Qual', data=\n train_data_batter_single)\n", (15189, 15283), False, 'from statsmodels.formula.api import ols\n'), ((15372, 15482), 'statsmodels.formula.api.ols', 'ols', (['"""NPV ~ ISO + WAR_sq + y_n1_war_sq + y_n2_war_sq + wGDP + BABIP + Qual"""'], {'data': 'train_data_batter_single'}), "('NPV ~ ISO + WAR_sq + y_n1_war_sq + y_n2_war_sq + wGDP + BABIP + Qual',\n data=train_data_batter_single)\n", (15375, 15482), False, 'from statsmodels.formula.api import ols\n'), ((15532, 15638), 'statsmodels.formula.api.ols', 'ols', (['"""NPV ~ ISO + WAR_sq + y_n1_war_sq + y_n2_war_sq + injury_duration + Qual"""'], {'data': 'train_data_batter'}), "('NPV ~ ISO + WAR_sq + y_n1_war_sq + y_n2_war_sq + injury_duration + Qual',\n data=train_data_batter)\n", (15535, 15638), False, 'from statsmodels.formula.api import ols\n'), ((15682, 16227), 'statsmodels.formula.api.ols', 'ols', (['"""NPV ~ BBpct + Kpct + AVG + OBP + SLG + OPS + ISO + Spd + BABIP + UBR + wGDP + wSB + wRC + wRAA + wOBA + WAR + dWAR + oWAR + Year + WAR_PA + oWAR_PA + y_n1_war + y_n2_war + y_n3_war + y_n4_war + y_n5_war + y_n6_war + y_n1_wOBA + y_n2_wOBA + y_n3_wOBA + y_n4_wOBA + y_n1_war_pa + y_n2_war_pa + y_n3_war_pa + y_n4_war_pa + y_n5_war_pa + y_n6_war_pa +WAR_sq + y_n1_war_sq + y_n2_war_sq + y_n3_war_sq + y_n4_war_sq + y_n5_war_sq + y_n6_war_sq + y_n1_wOBA_sq + y_n2_wOBA_sq + Position + Age + Qual + injury_duration"""'], {'data': 'train_data_batter'}), "('NPV ~ BBpct + Kpct + AVG + OBP + SLG + OPS + ISO + Spd + BABIP + UBR + wGDP + wSB + wRC + wRAA + wOBA + WAR + dWAR + oWAR + Year + WAR_PA + oWAR_PA + y_n1_war + y_n2_war + y_n3_war + y_n4_war + y_n5_war + y_n6_war + y_n1_wOBA + y_n2_wOBA + y_n3_wOBA + y_n4_wOBA + y_n1_war_pa + y_n2_war_pa + y_n3_war_pa + y_n4_war_pa + y_n5_war_pa + y_n6_war_pa +WAR_sq + y_n1_war_sq + y_n2_war_sq + y_n3_war_sq + y_n4_war_sq + y_n5_war_sq + y_n6_war_sq + y_n1_wOBA_sq + y_n2_wOBA_sq + Position + Age + Qual + injury_duration'\n , data=train_data_batter)\n", (15685, 16227), False, 'from statsmodels.formula.api import ols\n'), ((16373, 16497), 'statsmodels.formula.api.ols', 'ols', (['"""NPV ~ Kpct + Year + y_n1_war +y_n1_wOBA + y_n2_war_pa + WAR_sq + y_n1_war_sq +Age + Qual"""'], {'data': 'train_data_batter'}), "('NPV ~ Kpct + Year + y_n1_war +y_n1_wOBA + y_n2_war_pa + WAR_sq + y_n1_war_sq +Age + Qual'\n , data=train_data_batter)\n", (16376, 16497), False, 'from statsmodels.formula.api import ols\n'), ((16661, 16761), 'statsmodels.formula.api.ols', 'ols', (['"""NPV ~ WAR_sq + Age + Qual + pos_dummy + FBv + Kpct + y_n1_war_sq"""'], {'data': 'train_data_pitcher'}), "('NPV ~ WAR_sq + Age + Qual + pos_dummy + FBv + Kpct + y_n1_war_sq',\n data=train_data_pitcher)\n", (16664, 16761), False, 'from statsmodels.formula.api import ols\n'), ((16799, 16889), 'statsmodels.formula.api.ols', 'ols', (['"""WAR ~ FBv + Kpct + BBpct + FIP + IP + wFB + pos_dummy"""'], {'data': 'train_data_pitcher'}), "('WAR ~ FBv + Kpct + BBpct + FIP + IP + wFB + pos_dummy', data=\n train_data_pitcher)\n", (16802, 16889), False, 'from statsmodels.formula.api import ols\n'), ((17033, 17125), 'statsmodels.formula.api.ols', 'ols', (['"""NPV ~ WAR_sq + Age + Qual + injury_duration + pos_dummy"""'], {'data': 'train_data_pitcher'}), "('NPV ~ WAR_sq + Age + Qual + injury_duration + pos_dummy', data=\n train_data_pitcher)\n", (17036, 17125), False, 'from statsmodels.formula.api import ols\n'), ((17158, 17256), 'statsmodels.formula.api.ols', 'ols', (['"""NPV ~ WAR_sq + Age + Qual + injury_duration + FBv + pos_dummy"""'], {'data': 'train_data_pitcher'}), "('NPV ~ WAR_sq + Age + Qual + injury_duration + FBv + pos_dummy', data=\n train_data_pitcher)\n", (17161, 17256), False, 'from statsmodels.formula.api import ols\n'), ((17286, 17399), 'statsmodels.formula.api.ols', 'ols', (['"""NPV ~ WAR_sq + Age + Qual + injury_duration + FBv + Kpct + pos_dummy + BBpct"""'], {'data': 'train_data_pitcher'}), "('NPV ~ WAR_sq + Age + Qual + injury_duration + FBv + Kpct + pos_dummy + BBpct'\n , data=train_data_pitcher)\n", (17289, 17399), False, 'from statsmodels.formula.api import ols\n'), ((17428, 17529), 'statsmodels.formula.api.ols', 'ols', (['"""NPV ~ Age + Qual + injury_duration + FBv + Kpct + CBv + pos_dummy"""'], {'data': 'train_data_pitcher'}), "('NPV ~ Age + Qual + injury_duration + FBv + Kpct + CBv + pos_dummy',\n data=train_data_pitcher)\n", (17431, 17529), False, 'from statsmodels.formula.api import ols\n'), ((17571, 17710), 'statsmodels.formula.api.ols', 'ols', (['"""NPV ~ Age + WAR_TBF + y_n1_war_tbf + y_n2_war_tbf + FBv + xFIP_sq + pos_dummy + injury_duration + Qual"""'], {'data': 'train_data_pitcher'}), "('NPV ~ Age + WAR_TBF + y_n1_war_tbf + y_n2_war_tbf + FBv + xFIP_sq + pos_dummy + injury_duration + Qual'\n , data=train_data_pitcher)\n", (17574, 17710), False, 'from statsmodels.formula.api import ols\n'), ((17833, 17965), 'statsmodels.formula.api.ols', 'ols', (['"""NPV ~ Age + WAR_TBF + y_n1_war_tbf + y_n2_war_tbf + FBv + xFIP_sq + pos_dummy + injury_duration"""'], {'data': 'multi_year_pitcher'}), "('NPV ~ Age + WAR_TBF + y_n1_war_tbf + y_n2_war_tbf + FBv + xFIP_sq + pos_dummy + injury_duration'\n , data=multi_year_pitcher)\n", (17836, 17965), False, 'from statsmodels.formula.api import ols\n')]
|
import wx
import numpy as np
import time
from wx import glcanvas
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.arrays import vbo
from OpenGL.GL import shaders
from readobj import Obj3D
__author__ = '<NAME>'
__version__ = '0.1.0'
vertexShader = """
#version 120
void main() {
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
}
"""
fragmentShader = """
#version 120
void main() {
gl_FragColor = vec4( .9, .9, .9, 1 );
}
"""
class GLFrame( glcanvas.GLCanvas ):
"""A simple class for using OpenGL with wxPython."""
near_plane = 0.1
far_plane = 100
world_pos = (0, 0, -6)
world_rot = (0, 0, 0)
def __init__(self, parent):
self.GLinitialized = False
attribList = (glcanvas.WX_GL_RGBA, # RGBA
glcanvas.WX_GL_DOUBLEBUFFER, # Double Buffered
glcanvas.WX_GL_DEPTH_SIZE, 24) # 24 bit
super(GLFrame, self).__init__( parent, attribList=attribList )
#
# Create the canvas
self.context = glcanvas.GLContext( self )
self.left_down = False
#
# Set the event handlers.
self.Bind(wx.EVT_ERASE_BACKGROUND, self.processEraseBackgroundEvent)
self.Bind(wx.EVT_SIZE, self.processSizeEvent)
self.Bind(wx.EVT_PAINT, self.processPaintEvent)
self.Bind(wx.EVT_MOUSEWHEEL, self.processWheelEvent)
self.Bind(wx.EVT_MOTION, self.processMotion)
self.Bind(wx.EVT_LEFT_DOWN, self.processLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.processLeftUp)
#
# Canvas Proxy Methods
def GetGLExtents(self):
"""Get the extents of the OpenGL canvas."""
return self.GetClientSize()
#
# wxPython Window Handlers
def processLeftDown( self, event ):
self.last_pos = event.GetPosition()
self.left_down = True
def processLeftUp( self, event ):
self.left_down = False
def processMotion( self, event ):
if self.left_down:
pos = event.GetPosition()
diff = (pos-self.last_pos)
self.world_rot = ( self.world_rot[0]+diff[1], self.world_rot[1]+diff[0], self.world_rot[2] )
# print( )
self.last_pos = pos
self.Refresh( False )
def processWheelEvent( self, event ):
delta = event.GetWheelRotation() / 100
self.world_pos = ( self.world_pos[0], self.world_pos[1], self.world_pos[2]+delta )
self.Refresh( False )
def processEraseBackgroundEvent( self, event ):
"""Process the erase background event."""
pass # Do nothing, to avoid flashing on MSWin
def processSizeEvent( self, event ):
self.Show()
self.SetCurrent( self.context )
size = self.GetGLExtents()
self.OnReshape( size.width, size.height )
self.Refresh( False )
event.Skip()
def processPaintEvent(self, event):
self.SetCurrent( self.context )
# This is a 'perfect' time to initialize OpenGL ... only if we need to
if not self.GLinitialized:
self.OnInitGL()
self.GLinitialized = True
self.OnDraw()
event.Skip()
#
# GLFrame OpenGL Event Handlers
def OnInitGL(self):
"""Initialize OpenGL for use in the window."""
glClearColor(1, 1, 1, 1)
VERTEX_SHADER = shaders.compileShader( vertexShader, GL_VERTEX_SHADER )
FRAGMENT_SHADER = shaders.compileShader( fragmentShader, GL_FRAGMENT_SHADER )
self.shader = shaders.compileProgram( VERTEX_SHADER, FRAGMENT_SHADER )
cube = Obj3D( 'testdata\cube.obj' )
data = cube.getVerticesFlat()
self.vbo = vbo.VBO( np.array( data, 'f' ) )
def OnReshape( self, width, height ):
"""Reshape the OpenGL viewport based on the dimensions of the window."""
glViewport( 0, 0, width, height )
glMatrixMode( GL_PROJECTION )
glLoadIdentity()
# glOrtho( -0.5, 0.5, -0.5, 0.5, -1, 1 )
gluPerspective( 45.0, width/height, self.near_plane, self.far_plane )
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def OnDraw( self ):
glPushMatrix()
glTranslate( self.world_pos[0], self.world_pos[1], self.world_pos[2] )
glRotated( self.world_rot[1], 0, 1, 0 )
glRotated( self.world_rot[0], 1, 0, 0 )
glClear( GL_COLOR_BUFFER_BIT )
shaders.glUseProgram( self.shader )
self.vbo.bind()
glEnableClientState( GL_VERTEX_ARRAY );
glVertexPointerf( self.vbo )
glDrawArrays( GL_TRIANGLES, 0, len( self.vbo ) )
self.vbo.unbind()
glDisableClientState( GL_VERTEX_ARRAY );
shaders.glUseProgram( 0 )
glPopMatrix()
self.SwapBuffers()
class Window( wx.Frame ):
def __init__( self, *args, **kwargs ):
super().__init__( *args, **kwargs )
self.initUI()
def initUI( self ):
panel = GLFrame(self)
panel.Bind(wx.EVT_RIGHT_DOWN, self.OnRightDown)
wx.StaticText( panel, label='Boilerplate Code', pos=( 10, 10 ) )
fmenu = wx.Menu()
self.popupMenu = wx.Menu()
fitem = fmenu.Append( wx.ID_OPEN, '&Open\tCtrl+O', 'Open file' )
self.popupMenu.Append( wx.ID_OPEN, '&Open\tCtrl+O', 'Open file' )
self.Bind( wx.EVT_MENU, self.onOpen, fitem )
fmenu.AppendSeparator()
fitem = fmenu.Append( wx.ID_EXIT, 'E&xit\tCtrl+Q', 'Exit Application' )
self.popupMenu.Append( wx.ID_EXIT, 'E&xit\tCtrl+Q', 'Exit Application' )
self.Bind(wx.EVT_MENU, self.onQuit, fitem)
mbar = wx.MenuBar()
mbar.Append( fmenu, '&File' )
self.SetMenuBar( mbar )
self.Show()
def OnRightDown(self, event):
self.PopupMenu( self.popupMenu, event.GetPosition() )
def onQuit( self, event ):
self.Close()
def onOpen( self, event ):
print( 'open' )
class Application( wx.App ):
def run( self ):
frame = Window(None, -1, 'Boilerplate Window', size=(400,300))
frame.Show()
self.MainLoop()
self.Destroy()
Application().run()
|
[
"wx.Menu",
"readobj.Obj3D",
"OpenGL.GL.shaders.glUseProgram",
"wx.glcanvas.GLContext",
"wx.StaticText",
"numpy.array",
"OpenGL.GL.shaders.compileProgram",
"OpenGL.GL.shaders.compileShader",
"wx.MenuBar"
] |
[((1079, 1103), 'wx.glcanvas.GLContext', 'glcanvas.GLContext', (['self'], {}), '(self)\n', (1097, 1103), False, 'from wx import glcanvas\n'), ((3450, 3503), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['vertexShader', 'GL_VERTEX_SHADER'], {}), '(vertexShader, GL_VERTEX_SHADER)\n', (3471, 3503), False, 'from OpenGL.GL import shaders\n'), ((3532, 3589), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['fragmentShader', 'GL_FRAGMENT_SHADER'], {}), '(fragmentShader, GL_FRAGMENT_SHADER)\n', (3553, 3589), False, 'from OpenGL.GL import shaders\n'), ((3623, 3677), 'OpenGL.GL.shaders.compileProgram', 'shaders.compileProgram', (['VERTEX_SHADER', 'FRAGMENT_SHADER'], {}), '(VERTEX_SHADER, FRAGMENT_SHADER)\n', (3645, 3677), False, 'from OpenGL.GL import shaders\n'), ((3696, 3723), 'readobj.Obj3D', 'Obj3D', (['"""testdata\\\\cube.obj"""'], {}), "('testdata\\\\cube.obj')\n", (3701, 3723), False, 'from readobj import Obj3D\n'), ((4537, 4570), 'OpenGL.GL.shaders.glUseProgram', 'shaders.glUseProgram', (['self.shader'], {}), '(self.shader)\n', (4557, 4570), False, 'from OpenGL.GL import shaders\n'), ((4822, 4845), 'OpenGL.GL.shaders.glUseProgram', 'shaders.glUseProgram', (['(0)'], {}), '(0)\n', (4842, 4845), False, 'from OpenGL.GL import shaders\n'), ((5195, 5255), 'wx.StaticText', 'wx.StaticText', (['panel'], {'label': '"""Boilerplate Code"""', 'pos': '(10, 10)'}), "(panel, label='Boilerplate Code', pos=(10, 10))\n", (5208, 5255), False, 'import wx\n'), ((5285, 5294), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (5292, 5294), False, 'import wx\n'), ((5320, 5329), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (5327, 5329), False, 'import wx\n'), ((5816, 5828), 'wx.MenuBar', 'wx.MenuBar', ([], {}), '()\n', (5826, 5828), False, 'import wx\n'), ((3791, 3810), 'numpy.array', 'np.array', (['data', '"""f"""'], {}), "(data, 'f')\n", (3799, 3810), True, 'import numpy as np\n')]
|
from glob import glob
from os import path
import pytest
import audiofile as af
import numpy as np
import audresample
def set_ones(signal, channels):
signal[channels, :] = 1
return signal
def mixdown(signal):
return np.atleast_2d(np.mean(signal, axis=0))
@pytest.mark.parametrize(
'signal, channels, mixdown, upmix, always_copy, expect',
[
# empty signal
(
np.zeros(0, dtype=np.float32),
None,
False,
None,
False,
np.zeros((1, 0), dtype=np.float32),
),
(
np.zeros((1, 0), dtype=np.float32),
None,
False,
None,
False,
np.zeros((1, 0), dtype=np.float32),
),
(
np.zeros((1, 0), dtype=np.float32),
0,
False,
None,
False,
np.zeros((1, 0), dtype=np.float32),
),
(
np.zeros((1, 0), dtype=np.float32),
1,
False,
'repeat',
False,
np.zeros((1, 0), dtype=np.float32),
),
(
np.zeros((1, 0), dtype=np.float32),
1,
False,
'zeros',
False,
np.zeros((1, 0), dtype=np.float32),
),
(
np.zeros((1, 0), dtype=np.float32),
[0, 2],
False,
'zeros',
False,
np.zeros((2, 0), dtype=np.float32),
),
# single channel
(
np.zeros((16000,)),
None,
False,
None,
False,
np.zeros((1, 16000), dtype=np.float32),
),
(
np.zeros((1, 16000), np.float32),
None,
False,
None,
False,
np.zeros((1, 16000), dtype=np.float32),
),
(
np.zeros((1, 16000), np.float32),
None,
True,
None,
False,
np.zeros((1, 16000), dtype=np.float32),
),
(
np.zeros((1, 16000), np.float32),
0,
False,
None,
False,
np.zeros((1, 16000), dtype=np.float32),
),
(
np.zeros((1, 16000), np.float32),
0,
True,
None,
False,
np.zeros((1, 16000), dtype=np.float32),
),
(
np.ones((1, 16000), np.float32),
0,
True,
'zeros',
False,
np.ones((1, 16000), dtype=np.float32),
),
(
np.ones((1, 16000), np.float32),
1,
True,
'repeat',
False,
np.ones((1, 16000), dtype=np.float32),
),
(
np.ones((1, 16000), np.float32),
1,
True,
'zeros',
False,
np.zeros((1, 16000), dtype=np.float32),
),
(
np.ones((1, 16000), np.float32),
-2,
True,
'zeros',
False,
np.ones((1, 16000), dtype=np.float32),
),
(
np.ones((1, 16000), np.float32),
[0, 2],
False,
'zeros',
False,
np.concatenate(
[
np.ones((1, 16000), dtype=np.float32),
np.zeros((1, 16000), dtype=np.float32),
]
),
),
(
np.ones((1, 16000), np.float32),
[0, 2],
True,
'zeros',
False,
0.5 * np.ones((1, 16000), dtype=np.float32),
),
# multiple channels
(
set_ones(np.zeros((4, 16000), np.float32), 2),
2,
False,
None,
False,
np.ones((1, 16000), dtype=np.float32),
),
(
set_ones(np.zeros((4, 16000), np.float32), -1),
-1,
False,
None,
False,
np.ones((1, 16000), dtype=np.float32),
),
(
set_ones(np.zeros((4, 16000), np.float32), [1, 3]),
[1, 3],
False,
None,
False,
np.ones((2, 16000), dtype=np.float32),
),
(
set_ones(np.zeros((4, 16000), np.float32), [0, 1, 2, 3]),
[0, 1, 2, 3],
False,
None,
False,
np.ones((4, 16000), dtype=np.float32),
),
(
set_ones(np.zeros((4, 16000), np.float32), [0, 1, 2]),
range(3),
False,
None,
False,
np.ones((3, 16000), dtype=np.float32),
),
(
set_ones(np.zeros((3, 16000), np.float32), 0),
[1, 0, 0],
False,
None,
False,
set_ones(np.zeros((3, 16000), np.float32), [1, 2]),
),
(
set_ones(np.zeros((3, 16000), np.float32), 0),
[3, 0, 0],
False,
'zeros',
False,
set_ones(np.zeros((3, 16000), np.float32), [1, 2]),
),
(
set_ones(np.zeros((3, 16000), np.float32), 0),
[3, 0, 0],
False,
'repeat',
False,
np.ones((3, 16000), np.float32),
),
(
set_ones(np.zeros((3, 16000), np.float32), 0),
[-6, 0, 0],
False,
'repeat',
False,
np.ones((3, 16000), np.float32),
),
# multiple channels with mixdown
(
audresample.am_fm_synth(16000, 2, 16000),
None,
True,
None,
False,
mixdown(audresample.am_fm_synth(16000, 2, 16000)),
),
(
audresample.am_fm_synth(16000, 3, 16000),
[0, 1],
True,
None,
False,
mixdown(audresample.am_fm_synth(16000, 2, 16000)),
),
# always copy
(
np.zeros((1, 16000), dtype=np.float32),
None,
False,
None,
True,
np.zeros((1, 16000), dtype=np.float32),
),
# wrong channel index
pytest.param(
np.zeros((2, 16000)),
2,
False,
None,
False,
None,
marks=pytest.mark.xfail(raises=ValueError),
),
pytest.param(
np.zeros((2, 16000)),
[0, 1, 2],
False,
None,
False,
None,
marks=pytest.mark.xfail(raises=ValueError),
),
# wrong input shape
pytest.param(
np.zeros((16000, 2, 3)),
None,
False,
None,
False,
None,
marks=pytest.mark.xfail(raises=RuntimeError),
),
# wrong upmix type
pytest.param(
np.zeros((2, 16000)),
2,
False,
'fancy',
False,
None,
marks=pytest.mark.xfail(raises=ValueError),
),
]
)
def test_resample_signal(
signal,
channels,
mixdown,
upmix,
always_copy,
expect,
):
result = audresample.remix(
signal,
channels,
mixdown,
upmix=upmix,
always_copy=always_copy,
)
np.testing.assert_equal(result, expect)
if signal.size > 0 and\
channels is None and\
not mixdown and\
signal.dtype == np.float32:
if always_copy:
assert id(signal) != id(result)
else:
assert id(signal) == id(result)
|
[
"audresample.am_fm_synth",
"audresample.remix",
"numpy.zeros",
"numpy.ones",
"numpy.mean",
"numpy.testing.assert_equal",
"pytest.mark.xfail"
] |
[((7552, 7639), 'audresample.remix', 'audresample.remix', (['signal', 'channels', 'mixdown'], {'upmix': 'upmix', 'always_copy': 'always_copy'}), '(signal, channels, mixdown, upmix=upmix, always_copy=\n always_copy)\n', (7569, 7639), False, 'import audresample\n'), ((7686, 7725), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['result', 'expect'], {}), '(result, expect)\n', (7709, 7725), True, 'import numpy as np\n'), ((247, 270), 'numpy.mean', 'np.mean', (['signal'], {'axis': '(0)'}), '(signal, axis=0)\n', (254, 270), True, 'import numpy as np\n'), ((412, 441), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (420, 441), True, 'import numpy as np\n'), ((529, 563), 'numpy.zeros', 'np.zeros', (['(1, 0)'], {'dtype': 'np.float32'}), '((1, 0), dtype=np.float32)\n', (537, 563), True, 'import numpy as np\n'), ((598, 632), 'numpy.zeros', 'np.zeros', (['(1, 0)'], {'dtype': 'np.float32'}), '((1, 0), dtype=np.float32)\n', (606, 632), True, 'import numpy as np\n'), ((720, 754), 'numpy.zeros', 'np.zeros', (['(1, 0)'], {'dtype': 'np.float32'}), '((1, 0), dtype=np.float32)\n', (728, 754), True, 'import numpy as np\n'), ((789, 823), 'numpy.zeros', 'np.zeros', (['(1, 0)'], {'dtype': 'np.float32'}), '((1, 0), dtype=np.float32)\n', (797, 823), True, 'import numpy as np\n'), ((908, 942), 'numpy.zeros', 'np.zeros', (['(1, 0)'], {'dtype': 'np.float32'}), '((1, 0), dtype=np.float32)\n', (916, 942), True, 'import numpy as np\n'), ((977, 1011), 'numpy.zeros', 'np.zeros', (['(1, 0)'], {'dtype': 'np.float32'}), '((1, 0), dtype=np.float32)\n', (985, 1011), True, 'import numpy as np\n'), ((1100, 1134), 'numpy.zeros', 'np.zeros', (['(1, 0)'], {'dtype': 'np.float32'}), '((1, 0), dtype=np.float32)\n', (1108, 1134), True, 'import numpy as np\n'), ((1169, 1203), 'numpy.zeros', 'np.zeros', (['(1, 0)'], {'dtype': 'np.float32'}), '((1, 0), dtype=np.float32)\n', (1177, 1203), True, 'import numpy as np\n'), ((1291, 1325), 'numpy.zeros', 'np.zeros', (['(1, 0)'], {'dtype': 'np.float32'}), '((1, 0), dtype=np.float32)\n', (1299, 1325), True, 'import numpy as np\n'), ((1360, 1394), 'numpy.zeros', 'np.zeros', (['(1, 0)'], {'dtype': 'np.float32'}), '((1, 0), dtype=np.float32)\n', (1368, 1394), True, 'import numpy as np\n'), ((1487, 1521), 'numpy.zeros', 'np.zeros', (['(2, 0)'], {'dtype': 'np.float32'}), '((2, 0), dtype=np.float32)\n', (1495, 1521), True, 'import numpy as np\n'), ((1581, 1599), 'numpy.zeros', 'np.zeros', (['(16000,)'], {}), '((16000,))\n', (1589, 1599), True, 'import numpy as np\n'), ((1687, 1725), 'numpy.zeros', 'np.zeros', (['(1, 16000)'], {'dtype': 'np.float32'}), '((1, 16000), dtype=np.float32)\n', (1695, 1725), True, 'import numpy as np\n'), ((1760, 1792), 'numpy.zeros', 'np.zeros', (['(1, 16000)', 'np.float32'], {}), '((1, 16000), np.float32)\n', (1768, 1792), True, 'import numpy as np\n'), ((1880, 1918), 'numpy.zeros', 'np.zeros', (['(1, 16000)'], {'dtype': 'np.float32'}), '((1, 16000), dtype=np.float32)\n', (1888, 1918), True, 'import numpy as np\n'), ((1953, 1985), 'numpy.zeros', 'np.zeros', (['(1, 16000)', 'np.float32'], {}), '((1, 16000), np.float32)\n', (1961, 1985), True, 'import numpy as np\n'), ((2072, 2110), 'numpy.zeros', 'np.zeros', (['(1, 16000)'], {'dtype': 'np.float32'}), '((1, 16000), dtype=np.float32)\n', (2080, 2110), True, 'import numpy as np\n'), ((2145, 2177), 'numpy.zeros', 'np.zeros', (['(1, 16000)', 'np.float32'], {}), '((1, 16000), np.float32)\n', (2153, 2177), True, 'import numpy as np\n'), ((2262, 2300), 'numpy.zeros', 'np.zeros', (['(1, 16000)'], {'dtype': 'np.float32'}), '((1, 16000), dtype=np.float32)\n', (2270, 2300), True, 'import numpy as np\n'), ((2335, 2367), 'numpy.zeros', 'np.zeros', (['(1, 16000)', 'np.float32'], {}), '((1, 16000), np.float32)\n', (2343, 2367), True, 'import numpy as np\n'), ((2451, 2489), 'numpy.zeros', 'np.zeros', (['(1, 16000)'], {'dtype': 'np.float32'}), '((1, 16000), dtype=np.float32)\n', (2459, 2489), True, 'import numpy as np\n'), ((2524, 2555), 'numpy.ones', 'np.ones', (['(1, 16000)', 'np.float32'], {}), '((1, 16000), np.float32)\n', (2531, 2555), True, 'import numpy as np\n'), ((2642, 2679), 'numpy.ones', 'np.ones', (['(1, 16000)'], {'dtype': 'np.float32'}), '((1, 16000), dtype=np.float32)\n', (2649, 2679), True, 'import numpy as np\n'), ((2714, 2745), 'numpy.ones', 'np.ones', (['(1, 16000)', 'np.float32'], {}), '((1, 16000), np.float32)\n', (2721, 2745), True, 'import numpy as np\n'), ((2833, 2870), 'numpy.ones', 'np.ones', (['(1, 16000)'], {'dtype': 'np.float32'}), '((1, 16000), dtype=np.float32)\n', (2840, 2870), True, 'import numpy as np\n'), ((2905, 2936), 'numpy.ones', 'np.ones', (['(1, 16000)', 'np.float32'], {}), '((1, 16000), np.float32)\n', (2912, 2936), True, 'import numpy as np\n'), ((3023, 3061), 'numpy.zeros', 'np.zeros', (['(1, 16000)'], {'dtype': 'np.float32'}), '((1, 16000), dtype=np.float32)\n', (3031, 3061), True, 'import numpy as np\n'), ((3096, 3127), 'numpy.ones', 'np.ones', (['(1, 16000)', 'np.float32'], {}), '((1, 16000), np.float32)\n', (3103, 3127), True, 'import numpy as np\n'), ((3215, 3252), 'numpy.ones', 'np.ones', (['(1, 16000)'], {'dtype': 'np.float32'}), '((1, 16000), dtype=np.float32)\n', (3222, 3252), True, 'import numpy as np\n'), ((3287, 3318), 'numpy.ones', 'np.ones', (['(1, 16000)', 'np.float32'], {}), '((1, 16000), np.float32)\n', (3294, 3318), True, 'import numpy as np\n'), ((3630, 3661), 'numpy.ones', 'np.ones', (['(1, 16000)', 'np.float32'], {}), '((1, 16000), np.float32)\n', (3637, 3661), True, 'import numpy as np\n'), ((3989, 4026), 'numpy.ones', 'np.ones', (['(1, 16000)'], {'dtype': 'np.float32'}), '((1, 16000), dtype=np.float32)\n', (3996, 4026), True, 'import numpy as np\n'), ((4193, 4230), 'numpy.ones', 'np.ones', (['(1, 16000)'], {'dtype': 'np.float32'}), '((1, 16000), dtype=np.float32)\n', (4200, 4230), True, 'import numpy as np\n'), ((4405, 4442), 'numpy.ones', 'np.ones', (['(2, 16000)'], {'dtype': 'np.float32'}), '((2, 16000), dtype=np.float32)\n', (4412, 4442), True, 'import numpy as np\n'), ((4629, 4666), 'numpy.ones', 'np.ones', (['(4, 16000)'], {'dtype': 'np.float32'}), '((4, 16000), dtype=np.float32)\n', (4636, 4666), True, 'import numpy as np\n'), ((4846, 4883), 'numpy.ones', 'np.ones', (['(3, 16000)'], {'dtype': 'np.float32'}), '((3, 16000), dtype=np.float32)\n', (4853, 4883), True, 'import numpy as np\n'), ((5509, 5540), 'numpy.ones', 'np.ones', (['(3, 16000)', 'np.float32'], {}), '((3, 16000), np.float32)\n', (5516, 5540), True, 'import numpy as np\n'), ((5718, 5749), 'numpy.ones', 'np.ones', (['(3, 16000)', 'np.float32'], {}), '((3, 16000), np.float32)\n', (5725, 5749), True, 'import numpy as np\n'), ((5825, 5865), 'audresample.am_fm_synth', 'audresample.am_fm_synth', (['(16000)', '(2)', '(16000)'], {}), '(16000, 2, 16000)\n', (5848, 5865), False, 'import audresample\n'), ((6036, 6076), 'audresample.am_fm_synth', 'audresample.am_fm_synth', (['(16000)', '(3)', '(16000)'], {}), '(16000, 3, 16000)\n', (6059, 6076), False, 'import audresample\n'), ((6271, 6309), 'numpy.zeros', 'np.zeros', (['(1, 16000)'], {'dtype': 'np.float32'}), '((1, 16000), dtype=np.float32)\n', (6279, 6309), True, 'import numpy as np\n'), ((6396, 6434), 'numpy.zeros', 'np.zeros', (['(1, 16000)'], {'dtype': 'np.float32'}), '((1, 16000), dtype=np.float32)\n', (6404, 6434), True, 'import numpy as np\n'), ((6511, 6531), 'numpy.zeros', 'np.zeros', (['(2, 16000)'], {}), '((2, 16000))\n', (6519, 6531), True, 'import numpy as np\n'), ((6723, 6743), 'numpy.zeros', 'np.zeros', (['(2, 16000)'], {}), '((2, 16000))\n', (6731, 6743), True, 'import numpy as np\n'), ((6971, 6994), 'numpy.zeros', 'np.zeros', (['(16000, 2, 3)'], {}), '((16000, 2, 3))\n', (6979, 6994), True, 'import numpy as np\n'), ((7218, 7238), 'numpy.zeros', 'np.zeros', (['(2, 16000)'], {}), '((2, 16000))\n', (7226, 7238), True, 'import numpy as np\n'), ((3759, 3796), 'numpy.ones', 'np.ones', (['(1, 16000)'], {'dtype': 'np.float32'}), '((1, 16000), dtype=np.float32)\n', (3766, 3796), True, 'import numpy as np\n'), ((3868, 3900), 'numpy.zeros', 'np.zeros', (['(4, 16000)', 'np.float32'], {}), '((4, 16000), np.float32)\n', (3876, 3900), True, 'import numpy as np\n'), ((4070, 4102), 'numpy.zeros', 'np.zeros', (['(4, 16000)', 'np.float32'], {}), '((4, 16000), np.float32)\n', (4078, 4102), True, 'import numpy as np\n'), ((4274, 4306), 'numpy.zeros', 'np.zeros', (['(4, 16000)', 'np.float32'], {}), '((4, 16000), np.float32)\n', (4282, 4306), True, 'import numpy as np\n'), ((4486, 4518), 'numpy.zeros', 'np.zeros', (['(4, 16000)', 'np.float32'], {}), '((4, 16000), np.float32)\n', (4494, 4518), True, 'import numpy as np\n'), ((4710, 4742), 'numpy.zeros', 'np.zeros', (['(4, 16000)', 'np.float32'], {}), '((4, 16000), np.float32)\n', (4718, 4742), True, 'import numpy as np\n'), ((4927, 4959), 'numpy.zeros', 'np.zeros', (['(3, 16000)', 'np.float32'], {}), '((3, 16000), np.float32)\n', (4935, 4959), True, 'import numpy as np\n'), ((5065, 5097), 'numpy.zeros', 'np.zeros', (['(3, 16000)', 'np.float32'], {}), '((3, 16000), np.float32)\n', (5073, 5097), True, 'import numpy as np\n'), ((5150, 5182), 'numpy.zeros', 'np.zeros', (['(3, 16000)', 'np.float32'], {}), '((3, 16000), np.float32)\n', (5158, 5182), True, 'import numpy as np\n'), ((5291, 5323), 'numpy.zeros', 'np.zeros', (['(3, 16000)', 'np.float32'], {}), '((3, 16000), np.float32)\n', (5299, 5323), True, 'import numpy as np\n'), ((5376, 5408), 'numpy.zeros', 'np.zeros', (['(3, 16000)', 'np.float32'], {}), '((3, 16000), np.float32)\n', (5384, 5408), True, 'import numpy as np\n'), ((5584, 5616), 'numpy.zeros', 'np.zeros', (['(3, 16000)', 'np.float32'], {}), '((3, 16000), np.float32)\n', (5592, 5616), True, 'import numpy as np\n'), ((5960, 6000), 'audresample.am_fm_synth', 'audresample.am_fm_synth', (['(16000)', '(2)', '(16000)'], {}), '(16000, 2, 16000)\n', (5983, 6000), False, 'import audresample\n'), ((6173, 6213), 'audresample.am_fm_synth', 'audresample.am_fm_synth', (['(16000)', '(2)', '(16000)'], {}), '(16000, 2, 16000)\n', (6196, 6213), False, 'import audresample\n'), ((6640, 6676), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'raises': 'ValueError'}), '(raises=ValueError)\n', (6657, 6676), False, 'import pytest\n'), ((6860, 6896), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'raises': 'ValueError'}), '(raises=ValueError)\n', (6877, 6896), False, 'import pytest\n'), ((7106, 7144), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'raises': 'RuntimeError'}), '(raises=RuntimeError)\n', (7123, 7144), False, 'import pytest\n'), ((7350, 7386), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'raises': 'ValueError'}), '(raises=ValueError)\n', (7367, 7386), False, 'import pytest\n'), ((3465, 3502), 'numpy.ones', 'np.ones', (['(1, 16000)'], {'dtype': 'np.float32'}), '((1, 16000), dtype=np.float32)\n', (3472, 3502), True, 'import numpy as np\n'), ((3524, 3562), 'numpy.zeros', 'np.zeros', (['(1, 16000)'], {'dtype': 'np.float32'}), '((1, 16000), dtype=np.float32)\n', (3532, 3562), True, 'import numpy as np\n')]
|
import requests
import json
import base64
import numpy as np
import matplotlib.pyplot as plt
import pickle
import imageio
def get_jsonstr(url):
url = "http://172.16.58.3:8089/api/problem?stuid=031804104"
response = requests.get(url)
jsonstr = json.loads(response.text)
return jsonstr
def split_image(img): # 输入为图像矩阵np
'''分割图像'''
imgs = []
for i in range(0,900,300):
for j in range(0,900,300):
imgs.append(img[i:i+300,j:j+300].tolist())
return (imgs) # 返回值是九块图像矩阵的列表
def encode_image(title_image,store_image):
'''图像编码为数字'''
current_table = [] # 图像对应的表数字编码
ans_type = list(range(1,10)) # 答案类型
for ls_title in title_image:
try:
pos_code = store_image.index(ls_title)+1
current_table.append(pos_code)
ans_type.remove(pos_code)
except:
current_table.append(0) # IndexError:空格匹配不到
return current_table,ans_type[0] # 返回表编码和答案类型
def main(json_image):
# 读取无框字符分割成9份后的图像列表
save_name = 'ls_img.pkl'
pkl_file = open(save_name, 'rb')
store_images = pickle.load(pkl_file)
pkl_file.close()
# 获取题给图像
bs64_img = base64.b64decode(json_image) # 图像是base64编码
np_img = imageio.imread(bs64_img)
title_image = split_image(np_img)
for ls_store in store_images: # 遍历存储的所有无框字符
count = 0
for ls_title in title_image: # 遍历题给图像块
if (np.array(ls_title) == 255).all() == True: # 被挖去的空白
continue # 跳过
if ls_title in ls_store: # 该图块在无框字符中
count += 1
else:
break
if count == 8: # 除空白块外都相同,则判就是该无框字符,对题给图块进行编码
current_table, ans_type = encode_image(title_image, ls_store)
return current_table,ans_type
if __name__ == "__main__":
# 读取无框字符分割成9份后的图像列表
save_name = 'ls_img.pkl'
pkl_file = open(save_name,'rb')
store_images = pickle.load(pkl_file)
pkl_file.close()
# 获取题给图像
url = "http://47.102.118.1:8089/api/problem?stuid=031804104"
response = requests.get(url)
jsonstr = json.loads(response.text)
bs64_img = base64.b64decode(jsonstr['img']) #图像是base64编码
np_img = imageio.imread(bs64_img)
title_image = split_image(np_img)
plt.imshow(np_img)
plt.show()
for ls_store in store_images: #遍历存储的所存储的无框字符
count = 0
for ls_title in title_image: #遍历题给图像块
if (np.array(ls_title) == 255).all() == True: # 被挖去的空白
continue # 跳过
if ls_title in ls_store: # 该图块在无框字符中
count += 1
else:
break
if count == 8: # 除空白块外都相同,则判就是该无框字符,对题给图块进行编码
current_table,ans_type = encode_image(title_image,ls_store)
print(current_table, ans_type)
ls = [331,332,333,334,335,336,337,338,339]
for i in range(9):
plt.subplot(ls[i])
plt.imshow(np.array(ls_store[i]))
plt.show()
for i in range(9):
plt.subplot(ls[i])
plt.imshow(np.array(title_image[i]))
plt.show()
break
|
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"json.loads",
"matplotlib.pyplot.imshow",
"imageio.imread",
"base64.b64decode",
"pickle.load",
"numpy.array",
"requests.get"
] |
[((234, 251), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (246, 251), False, 'import requests\n'), ((267, 292), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (277, 292), False, 'import json\n'), ((1137, 1158), 'pickle.load', 'pickle.load', (['pkl_file'], {}), '(pkl_file)\n', (1148, 1158), False, 'import pickle\n'), ((1213, 1241), 'base64.b64decode', 'base64.b64decode', (['json_image'], {}), '(json_image)\n', (1229, 1241), False, 'import base64\n'), ((1271, 1295), 'imageio.imread', 'imageio.imread', (['bs64_img'], {}), '(bs64_img)\n', (1285, 1295), False, 'import imageio\n'), ((2007, 2028), 'pickle.load', 'pickle.load', (['pkl_file'], {}), '(pkl_file)\n', (2018, 2028), False, 'import pickle\n'), ((2149, 2166), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2161, 2166), False, 'import requests\n'), ((2182, 2207), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (2192, 2207), False, 'import json\n'), ((2224, 2256), 'base64.b64decode', 'base64.b64decode', (["jsonstr['img']"], {}), "(jsonstr['img'])\n", (2240, 2256), False, 'import base64\n'), ((2284, 2308), 'imageio.imread', 'imageio.imread', (['bs64_img'], {}), '(bs64_img)\n', (2298, 2308), False, 'import imageio\n'), ((2353, 2371), 'matplotlib.pyplot.imshow', 'plt.imshow', (['np_img'], {}), '(np_img)\n', (2363, 2371), True, 'import matplotlib.pyplot as plt\n'), ((2377, 2387), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2385, 2387), True, 'import matplotlib.pyplot as plt\n'), ((3121, 3131), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3129, 3131), True, 'import matplotlib.pyplot as plt\n'), ((3283, 3293), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3291, 3293), True, 'import matplotlib.pyplot as plt\n'), ((3030, 3048), 'matplotlib.pyplot.subplot', 'plt.subplot', (['ls[i]'], {}), '(ls[i])\n', (3041, 3048), True, 'import matplotlib.pyplot as plt\n'), ((3189, 3207), 'matplotlib.pyplot.subplot', 'plt.subplot', (['ls[i]'], {}), '(ls[i])\n', (3200, 3207), True, 'import matplotlib.pyplot as plt\n'), ((3081, 3102), 'numpy.array', 'np.array', (['ls_store[i]'], {}), '(ls_store[i])\n', (3089, 3102), True, 'import numpy as np\n'), ((3240, 3264), 'numpy.array', 'np.array', (['title_image[i]'], {}), '(title_image[i])\n', (3248, 3264), True, 'import numpy as np\n'), ((1472, 1490), 'numpy.array', 'np.array', (['ls_title'], {}), '(ls_title)\n', (1480, 1490), True, 'import numpy as np\n'), ((2523, 2541), 'numpy.array', 'np.array', (['ls_title'], {}), '(ls_title)\n', (2531, 2541), True, 'import numpy as np\n')]
|
import numpy as np
import argparse
import imutils
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True, help = "Path to the image")
ap.add_argument("-i2", "--image2", required = True, help = "Path to the image 2")
ap.add_argument("-i3", "--image3", required = True, help = "Path to the image 3")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
# NOTE: CHAPTER 6
cv2.imshow("Original", image)
# 6.1 translation Left(-ve)/right(+ve) followed by up(-ve)/down(+ve)
M = np.float32([[1, 0, 25], [0, 1, 50]])
shifted = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
cv2.imshow("Shifted Down and Right", shifted)
# 6.1 translation
M = np.float32([[1, 0, -50], [0, 1, -90]])
shifted = cv2.warpAffine(image, M, (image.shape[0], image.shape[0]))
cv2.imshow("Shifted Up and Left", shifted)
# 6.2 in imutils.py
# 6.3 translate using imutils
shifted = imutils.translate(image, 0, 100)
cv2.imshow("Shifted Down", shifted)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 6.4 rotate counter-clockwise by default
(h, w) = image.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, 45, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
cv2.imshow("rotated by 45 degrees", rotated)
# 6.4 rotate -ve to rotate clockwise
M = cv2.getRotationMatrix2D(center, -90, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
cv2.imshow("rotated by -90 degrees", rotated)
# 6.5 move rotate to imutils.py
# 6.6 rotate using imutils.py
rotated = imutils.rotate(image, 180)
cv2.imshow("Rotated by 180 Degrees", rotated)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 6.7 resize
r = 150.0 / image.shape[1] # ratio - width = 150px
dim = (150, int(image.shape[0] * r))
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA) # could also use INTER_LINEAR
# INTER_CUBIC or INTER_NEAREST
cv2.imshow("Resized (Width)", resized)
# 6.8 resize
r = 50.0 / image.shape[1] # ratio - height = 50px
dim = (50, int(image.shape[0] * r))
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
cv2.imshow("Resized (Height)", resized)
# 6.11
# 6.9 resize in imutils.py
resized = imutils.resize(image, width = 66)
print("shape: {} pixels".format(resized.shape)) # NOTE: height width order not width height
cv2.imshow("Resized via Function", resized)
# 6.10 resize height via imutils.py
resized = imutils.resize(image, height = 110)
print("shape: {} pixels".format(resized.shape)) # NOTE: height width order not width height
cv2.imshow("Resized via Function height 50", resized)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 6.12 flipping
flipped = cv2.flip(image, 1)
cv2.imshow("Flipped Horizontally", flipped)
flipped = cv2.flip(image, 0)
cv2.imshow("Flipped Vertically", flipped)
flipped = cv2.flip(image, -1)
cv2.imshow("Flipped Horizontally & Vertically", flipped)
cv2.waitKey(0)
# 6.13 crop [y_start:y_end, x_start:x_end]
cropped = image[30:120, 240:335]
cv2.imshow("T-Rex Face", cropped)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 6.14 arithmetic
# cv2 uses max and min
print(" max of 255: {}".format(cv2.add(np.uint8([200]), np.uint8([100]))))
print(" min of 0: {}".format(cv2.add(np.uint8([ 50]), np.uint8([100]))))
# np wraps around
print("wrap around: {}".format(np.uint8([200]) + np.uint8([100])))
print("wrap around: {}".format(np.uint8([ 50]) + np.uint8([100])))
# 6.17 arithmetic on images
M = np.ones(image.shape, dtype = "uint8") * 100
added = cv2.add(image, M)
cv2.imshow("Added", added)
M = np.ones(image.shape, dtype = "uint8") *50
subtracted = cv2.subtract(image, M)
cv2.imshow("Subtracted", subtracted)
cv2.waitKey(0)
# 6.18 bitwise operations
rectangle = np.zeros((300, 300), dtype = "uint8")
cv2.rectangle(rectangle, (25, 25), (275, 275), 255, -1)
cv2.imshow("Rectangle", rectangle)
circle = np.zeros((300, 300), dtype = "uint8")
cv2.circle(circle, (150, 150), 150, 255, -1)
cv2.imshow("Circle", circle)
cv2.waitKey(0)
# 6.19 bitwise AND
bitwiseAnd = cv2.bitwise_and(rectangle, circle)
cv2.imshow("AND", bitwiseAnd)
cv2.waitKey(0)
# 6.19 bitwise OR
bitwiseOr = cv2.bitwise_or(rectangle, circle)
cv2.imshow("OR", bitwiseOr)
cv2.waitKey(0)
# 6.19 bitwise XOR
bitwiseXor = cv2.bitwise_xor(rectangle, circle)
cv2.imshow("XOR", bitwiseXor)
cv2.waitKey(0)
# 6.19 bitwise NOT
bitwiseNot = cv2.bitwise_not(circle)
cv2.imshow("NOT", bitwiseNot)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 6.20 masking
image2 = cv2.imread(args["image2"])
cv2.imshow("Original2", image2)
mask = np.zeros(image2.shape[:2], dtype = "uint8")
(cX, cY) = (image2.shape[1] // 2, image2.shape[0] // 2)
cv2.rectangle(mask, (cX - 75, cY -75), (cX + 75, cY +75), 255, -1)
cv2.imshow("Mask", mask)
masked = cv2.bitwise_and(image2, image2, mask = mask)
cv2.imshow("Mask Applied to Image", masked)
cv2.waitKey(0)
# 6.21 masking circle
mask = np.zeros(image2.shape[:2], dtype = "uint8")
cv2.circle(mask, (cX, cY), 100, 255, -1)
masked = cv2.bitwise_and(image2, image2, mask = mask)
cv2.imshow("Mask", mask)
cv2.imshow("Mask Applied to Image", masked)
cv2.waitKey(0)
# 6.22 splitting and merging channels
image3 = cv2.imread(args["image3"])
(B, G, R) = cv2.split(image3)
cv2.imshow("Red", R)
cv2.imshow("Green", G)
cv2.imshow("Blue", B)
merged = cv2.merge([B, G, R])
cv2.imshow("Merged", merged)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 6.23 merge only colour channel
zeros = np.zeros(image3.shape[:2], dtype = "uint8")
cv2.imshow("Red", cv2.merge([zeros, zeros, R]))
cv2.imshow("Green", cv2.merge([zeros, G, zeros]))
cv2.imshow("Blue", cv2.merge([B, zeros, zeros]))
cv2.waitKey(0)
cv2.destroyAllWindows()
# 6.24 colorspaces
cv2.imshow("Original", image2)
gray = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
cv2.imshow("Gray", gray)
hsv = cv2.cvtColor(image2, cv2.COLOR_BGR2HSV)
cv2.imshow("HSV", hsv)
lab = cv2.cvtColor(image2, cv2.COLOR_BGR2LAB)
cv2.imshow("L*a*b*", lab)
cv2.waitKey(0)
|
[
"argparse.ArgumentParser",
"cv2.bitwise_and",
"numpy.ones",
"cv2.warpAffine",
"cv2.rectangle",
"imutils.translate",
"imutils.resize",
"cv2.imshow",
"cv2.getRotationMatrix2D",
"cv2.subtract",
"cv2.cvtColor",
"cv2.split",
"cv2.destroyAllWindows",
"cv2.resize",
"cv2.circle",
"cv2.bitwise_not",
"cv2.bitwise_xor",
"numpy.uint8",
"cv2.waitKey",
"imutils.rotate",
"cv2.bitwise_or",
"cv2.flip",
"cv2.merge",
"cv2.add",
"numpy.float32",
"numpy.zeros",
"cv2.imread"
] |
[((67, 92), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (90, 92), False, 'import argparse\n'), ((373, 398), 'cv2.imread', 'cv2.imread', (["args['image']"], {}), "(args['image'])\n", (383, 398), False, 'import cv2\n'), ((419, 448), 'cv2.imshow', 'cv2.imshow', (['"""Original"""', 'image'], {}), "('Original', image)\n", (429, 448), False, 'import cv2\n'), ((523, 559), 'numpy.float32', 'np.float32', (['[[1, 0, 25], [0, 1, 50]]'], {}), '([[1, 0, 25], [0, 1, 50]])\n', (533, 559), True, 'import numpy as np\n'), ((570, 628), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', '(image.shape[1], image.shape[0])'], {}), '(image, M, (image.shape[1], image.shape[0]))\n', (584, 628), False, 'import cv2\n'), ((629, 674), 'cv2.imshow', 'cv2.imshow', (['"""Shifted Down and Right"""', 'shifted'], {}), "('Shifted Down and Right', shifted)\n", (639, 674), False, 'import cv2\n'), ((698, 736), 'numpy.float32', 'np.float32', (['[[1, 0, -50], [0, 1, -90]]'], {}), '([[1, 0, -50], [0, 1, -90]])\n', (708, 736), True, 'import numpy as np\n'), ((747, 805), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', '(image.shape[0], image.shape[0])'], {}), '(image, M, (image.shape[0], image.shape[0]))\n', (761, 805), False, 'import cv2\n'), ((806, 848), 'cv2.imshow', 'cv2.imshow', (['"""Shifted Up and Left"""', 'shifted'], {}), "('Shifted Up and Left', shifted)\n", (816, 848), False, 'import cv2\n'), ((910, 942), 'imutils.translate', 'imutils.translate', (['image', '(0)', '(100)'], {}), '(image, 0, 100)\n', (927, 942), False, 'import imutils\n'), ((943, 978), 'cv2.imshow', 'cv2.imshow', (['"""Shifted Down"""', 'shifted'], {}), "('Shifted Down', shifted)\n", (953, 978), False, 'import cv2\n'), ((979, 993), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (990, 993), False, 'import cv2\n'), ((994, 1017), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1015, 1017), False, 'import cv2\n'), ((1117, 1157), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['center', '(45)', '(1.0)'], {}), '(center, 45, 1.0)\n', (1140, 1157), False, 'import cv2\n'), ((1168, 1200), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', '(w, h)'], {}), '(image, M, (w, h))\n', (1182, 1200), False, 'import cv2\n'), ((1201, 1245), 'cv2.imshow', 'cv2.imshow', (['"""rotated by 45 degrees"""', 'rotated'], {}), "('rotated by 45 degrees', rotated)\n", (1211, 1245), False, 'import cv2\n'), ((1288, 1329), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['center', '(-90)', '(1.0)'], {}), '(center, -90, 1.0)\n', (1311, 1329), False, 'import cv2\n'), ((1340, 1372), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', '(w, h)'], {}), '(image, M, (w, h))\n', (1354, 1372), False, 'import cv2\n'), ((1373, 1418), 'cv2.imshow', 'cv2.imshow', (['"""rotated by -90 degrees"""', 'rotated'], {}), "('rotated by -90 degrees', rotated)\n", (1383, 1418), False, 'import cv2\n'), ((1492, 1518), 'imutils.rotate', 'imutils.rotate', (['image', '(180)'], {}), '(image, 180)\n', (1506, 1518), False, 'import imutils\n'), ((1519, 1564), 'cv2.imshow', 'cv2.imshow', (['"""Rotated by 180 Degrees"""', 'rotated'], {}), "('Rotated by 180 Degrees', rotated)\n", (1529, 1564), False, 'import cv2\n'), ((1565, 1579), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1576, 1579), False, 'import cv2\n'), ((1580, 1603), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1601, 1603), False, 'import cv2\n'), ((1717, 1769), 'cv2.resize', 'cv2.resize', (['image', 'dim'], {'interpolation': 'cv2.INTER_AREA'}), '(image, dim, interpolation=cv2.INTER_AREA)\n', (1727, 1769), False, 'import cv2\n'), ((1898, 1936), 'cv2.imshow', 'cv2.imshow', (['"""Resized (Width)"""', 'resized'], {}), "('Resized (Width)', resized)\n", (1908, 1936), False, 'import cv2\n'), ((2048, 2100), 'cv2.resize', 'cv2.resize', (['image', 'dim'], {'interpolation': 'cv2.INTER_AREA'}), '(image, dim, interpolation=cv2.INTER_AREA)\n', (2058, 2100), False, 'import cv2\n'), ((2103, 2142), 'cv2.imshow', 'cv2.imshow', (['"""Resized (Height)"""', 'resized'], {}), "('Resized (Height)', resized)\n", (2113, 2142), False, 'import cv2\n'), ((2188, 2219), 'imutils.resize', 'imutils.resize', (['image'], {'width': '(66)'}), '(image, width=66)\n', (2202, 2219), False, 'import imutils\n'), ((2314, 2357), 'cv2.imshow', 'cv2.imshow', (['"""Resized via Function"""', 'resized'], {}), "('Resized via Function', resized)\n", (2324, 2357), False, 'import cv2\n'), ((2405, 2438), 'imutils.resize', 'imutils.resize', (['image'], {'height': '(110)'}), '(image, height=110)\n', (2419, 2438), False, 'import imutils\n'), ((2533, 2586), 'cv2.imshow', 'cv2.imshow', (['"""Resized via Function height 50"""', 'resized'], {}), "('Resized via Function height 50', resized)\n", (2543, 2586), False, 'import cv2\n'), ((2587, 2601), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2598, 2601), False, 'import cv2\n'), ((2602, 2625), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2623, 2625), False, 'import cv2\n'), ((2653, 2671), 'cv2.flip', 'cv2.flip', (['image', '(1)'], {}), '(image, 1)\n', (2661, 2671), False, 'import cv2\n'), ((2672, 2715), 'cv2.imshow', 'cv2.imshow', (['"""Flipped Horizontally"""', 'flipped'], {}), "('Flipped Horizontally', flipped)\n", (2682, 2715), False, 'import cv2\n'), ((2727, 2745), 'cv2.flip', 'cv2.flip', (['image', '(0)'], {}), '(image, 0)\n', (2735, 2745), False, 'import cv2\n'), ((2746, 2787), 'cv2.imshow', 'cv2.imshow', (['"""Flipped Vertically"""', 'flipped'], {}), "('Flipped Vertically', flipped)\n", (2756, 2787), False, 'import cv2\n'), ((2799, 2818), 'cv2.flip', 'cv2.flip', (['image', '(-1)'], {}), '(image, -1)\n', (2807, 2818), False, 'import cv2\n'), ((2819, 2875), 'cv2.imshow', 'cv2.imshow', (['"""Flipped Horizontally & Vertically"""', 'flipped'], {}), "('Flipped Horizontally & Vertically', flipped)\n", (2829, 2875), False, 'import cv2\n'), ((2876, 2890), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2887, 2890), False, 'import cv2\n'), ((2968, 3001), 'cv2.imshow', 'cv2.imshow', (['"""T-Rex Face"""', 'cropped'], {}), "('T-Rex Face', cropped)\n", (2978, 3001), False, 'import cv2\n'), ((3002, 3016), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3013, 3016), False, 'import cv2\n'), ((3017, 3040), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3038, 3040), False, 'import cv2\n'), ((3534, 3551), 'cv2.add', 'cv2.add', (['image', 'M'], {}), '(image, M)\n', (3541, 3551), False, 'import cv2\n'), ((3552, 3578), 'cv2.imshow', 'cv2.imshow', (['"""Added"""', 'added'], {}), "('Added', added)\n", (3562, 3578), False, 'import cv2\n'), ((3639, 3661), 'cv2.subtract', 'cv2.subtract', (['image', 'M'], {}), '(image, M)\n', (3651, 3661), False, 'import cv2\n'), ((3662, 3698), 'cv2.imshow', 'cv2.imshow', (['"""Subtracted"""', 'subtracted'], {}), "('Subtracted', subtracted)\n", (3672, 3698), False, 'import cv2\n'), ((3699, 3713), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3710, 3713), False, 'import cv2\n'), ((3753, 3788), 'numpy.zeros', 'np.zeros', (['(300, 300)'], {'dtype': '"""uint8"""'}), "((300, 300), dtype='uint8')\n", (3761, 3788), True, 'import numpy as np\n'), ((3791, 3846), 'cv2.rectangle', 'cv2.rectangle', (['rectangle', '(25, 25)', '(275, 275)', '(255)', '(-1)'], {}), '(rectangle, (25, 25), (275, 275), 255, -1)\n', (3804, 3846), False, 'import cv2\n'), ((3847, 3881), 'cv2.imshow', 'cv2.imshow', (['"""Rectangle"""', 'rectangle'], {}), "('Rectangle', rectangle)\n", (3857, 3881), False, 'import cv2\n'), ((3892, 3927), 'numpy.zeros', 'np.zeros', (['(300, 300)'], {'dtype': '"""uint8"""'}), "((300, 300), dtype='uint8')\n", (3900, 3927), True, 'import numpy as np\n'), ((3930, 3974), 'cv2.circle', 'cv2.circle', (['circle', '(150, 150)', '(150)', '(255)', '(-1)'], {}), '(circle, (150, 150), 150, 255, -1)\n', (3940, 3974), False, 'import cv2\n'), ((3975, 4003), 'cv2.imshow', 'cv2.imshow', (['"""Circle"""', 'circle'], {}), "('Circle', circle)\n", (3985, 4003), False, 'import cv2\n'), ((4004, 4018), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4015, 4018), False, 'import cv2\n'), ((4052, 4086), 'cv2.bitwise_and', 'cv2.bitwise_and', (['rectangle', 'circle'], {}), '(rectangle, circle)\n', (4067, 4086), False, 'import cv2\n'), ((4087, 4116), 'cv2.imshow', 'cv2.imshow', (['"""AND"""', 'bitwiseAnd'], {}), "('AND', bitwiseAnd)\n", (4097, 4116), False, 'import cv2\n'), ((4117, 4131), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4128, 4131), False, 'import cv2\n'), ((4163, 4196), 'cv2.bitwise_or', 'cv2.bitwise_or', (['rectangle', 'circle'], {}), '(rectangle, circle)\n', (4177, 4196), False, 'import cv2\n'), ((4197, 4224), 'cv2.imshow', 'cv2.imshow', (['"""OR"""', 'bitwiseOr'], {}), "('OR', bitwiseOr)\n", (4207, 4224), False, 'import cv2\n'), ((4225, 4239), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4236, 4239), False, 'import cv2\n'), ((4273, 4307), 'cv2.bitwise_xor', 'cv2.bitwise_xor', (['rectangle', 'circle'], {}), '(rectangle, circle)\n', (4288, 4307), False, 'import cv2\n'), ((4308, 4337), 'cv2.imshow', 'cv2.imshow', (['"""XOR"""', 'bitwiseXor'], {}), "('XOR', bitwiseXor)\n", (4318, 4337), False, 'import cv2\n'), ((4338, 4352), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4349, 4352), False, 'import cv2\n'), ((4386, 4409), 'cv2.bitwise_not', 'cv2.bitwise_not', (['circle'], {}), '(circle)\n', (4401, 4409), False, 'import cv2\n'), ((4410, 4439), 'cv2.imshow', 'cv2.imshow', (['"""NOT"""', 'bitwiseNot'], {}), "('NOT', bitwiseNot)\n", (4420, 4439), False, 'import cv2\n'), ((4440, 4454), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4451, 4454), False, 'import cv2\n'), ((4455, 4478), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4476, 4478), False, 'import cv2\n'), ((4504, 4530), 'cv2.imread', 'cv2.imread', (["args['image2']"], {}), "(args['image2'])\n", (4514, 4530), False, 'import cv2\n'), ((4531, 4562), 'cv2.imshow', 'cv2.imshow', (['"""Original2"""', 'image2'], {}), "('Original2', image2)\n", (4541, 4562), False, 'import cv2\n'), ((4571, 4612), 'numpy.zeros', 'np.zeros', (['image2.shape[:2]'], {'dtype': '"""uint8"""'}), "(image2.shape[:2], dtype='uint8')\n", (4579, 4612), True, 'import numpy as np\n'), ((4671, 4739), 'cv2.rectangle', 'cv2.rectangle', (['mask', '(cX - 75, cY - 75)', '(cX + 75, cY + 75)', '(255)', '(-1)'], {}), '(mask, (cX - 75, cY - 75), (cX + 75, cY + 75), 255, -1)\n', (4684, 4739), False, 'import cv2\n'), ((4738, 4762), 'cv2.imshow', 'cv2.imshow', (['"""Mask"""', 'mask'], {}), "('Mask', mask)\n", (4748, 4762), False, 'import cv2\n'), ((4773, 4815), 'cv2.bitwise_and', 'cv2.bitwise_and', (['image2', 'image2'], {'mask': 'mask'}), '(image2, image2, mask=mask)\n', (4788, 4815), False, 'import cv2\n'), ((4818, 4861), 'cv2.imshow', 'cv2.imshow', (['"""Mask Applied to Image"""', 'masked'], {}), "('Mask Applied to Image', masked)\n", (4828, 4861), False, 'import cv2\n'), ((4862, 4876), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4873, 4876), False, 'import cv2\n'), ((4907, 4948), 'numpy.zeros', 'np.zeros', (['image2.shape[:2]'], {'dtype': '"""uint8"""'}), "(image2.shape[:2], dtype='uint8')\n", (4915, 4948), True, 'import numpy as np\n'), ((4951, 4991), 'cv2.circle', 'cv2.circle', (['mask', '(cX, cY)', '(100)', '(255)', '(-1)'], {}), '(mask, (cX, cY), 100, 255, -1)\n', (4961, 4991), False, 'import cv2\n'), ((5001, 5043), 'cv2.bitwise_and', 'cv2.bitwise_and', (['image2', 'image2'], {'mask': 'mask'}), '(image2, image2, mask=mask)\n', (5016, 5043), False, 'import cv2\n'), ((5046, 5070), 'cv2.imshow', 'cv2.imshow', (['"""Mask"""', 'mask'], {}), "('Mask', mask)\n", (5056, 5070), False, 'import cv2\n'), ((5071, 5114), 'cv2.imshow', 'cv2.imshow', (['"""Mask Applied to Image"""', 'masked'], {}), "('Mask Applied to Image', masked)\n", (5081, 5114), False, 'import cv2\n'), ((5115, 5129), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (5126, 5129), False, 'import cv2\n'), ((5178, 5204), 'cv2.imread', 'cv2.imread', (["args['image3']"], {}), "(args['image3'])\n", (5188, 5204), False, 'import cv2\n'), ((5217, 5234), 'cv2.split', 'cv2.split', (['image3'], {}), '(image3)\n', (5226, 5234), False, 'import cv2\n'), ((5236, 5256), 'cv2.imshow', 'cv2.imshow', (['"""Red"""', 'R'], {}), "('Red', R)\n", (5246, 5256), False, 'import cv2\n'), ((5257, 5279), 'cv2.imshow', 'cv2.imshow', (['"""Green"""', 'G'], {}), "('Green', G)\n", (5267, 5279), False, 'import cv2\n'), ((5280, 5301), 'cv2.imshow', 'cv2.imshow', (['"""Blue"""', 'B'], {}), "('Blue', B)\n", (5290, 5301), False, 'import cv2\n'), ((5312, 5332), 'cv2.merge', 'cv2.merge', (['[B, G, R]'], {}), '([B, G, R])\n', (5321, 5332), False, 'import cv2\n'), ((5333, 5361), 'cv2.imshow', 'cv2.imshow', (['"""Merged"""', 'merged'], {}), "('Merged', merged)\n", (5343, 5361), False, 'import cv2\n'), ((5362, 5376), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (5373, 5376), False, 'import cv2\n'), ((5377, 5400), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5398, 5400), False, 'import cv2\n'), ((5443, 5484), 'numpy.zeros', 'np.zeros', (['image3.shape[:2]'], {'dtype': '"""uint8"""'}), "(image3.shape[:2], dtype='uint8')\n", (5451, 5484), True, 'import numpy as np\n'), ((5634, 5648), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (5645, 5648), False, 'import cv2\n'), ((5649, 5672), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5670, 5672), False, 'import cv2\n'), ((5693, 5723), 'cv2.imshow', 'cv2.imshow', (['"""Original"""', 'image2'], {}), "('Original', image2)\n", (5703, 5723), False, 'import cv2\n'), ((5732, 5772), 'cv2.cvtColor', 'cv2.cvtColor', (['image2', 'cv2.COLOR_BGR2GRAY'], {}), '(image2, cv2.COLOR_BGR2GRAY)\n', (5744, 5772), False, 'import cv2\n'), ((5773, 5797), 'cv2.imshow', 'cv2.imshow', (['"""Gray"""', 'gray'], {}), "('Gray', gray)\n", (5783, 5797), False, 'import cv2\n'), ((5805, 5844), 'cv2.cvtColor', 'cv2.cvtColor', (['image2', 'cv2.COLOR_BGR2HSV'], {}), '(image2, cv2.COLOR_BGR2HSV)\n', (5817, 5844), False, 'import cv2\n'), ((5845, 5867), 'cv2.imshow', 'cv2.imshow', (['"""HSV"""', 'hsv'], {}), "('HSV', hsv)\n", (5855, 5867), False, 'import cv2\n'), ((5875, 5914), 'cv2.cvtColor', 'cv2.cvtColor', (['image2', 'cv2.COLOR_BGR2LAB'], {}), '(image2, cv2.COLOR_BGR2LAB)\n', (5887, 5914), False, 'import cv2\n'), ((5915, 5940), 'cv2.imshow', 'cv2.imshow', (['"""L*a*b*"""', 'lab'], {}), "('L*a*b*', lab)\n", (5925, 5940), False, 'import cv2\n'), ((5941, 5955), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (5952, 5955), False, 'import cv2\n'), ((3482, 3517), 'numpy.ones', 'np.ones', (['image.shape'], {'dtype': '"""uint8"""'}), "(image.shape, dtype='uint8')\n", (3489, 3517), True, 'import numpy as np\n'), ((3584, 3619), 'numpy.ones', 'np.ones', (['image.shape'], {'dtype': '"""uint8"""'}), "(image.shape, dtype='uint8')\n", (3591, 3619), True, 'import numpy as np\n'), ((5505, 5533), 'cv2.merge', 'cv2.merge', (['[zeros, zeros, R]'], {}), '([zeros, zeros, R])\n', (5514, 5533), False, 'import cv2\n'), ((5555, 5583), 'cv2.merge', 'cv2.merge', (['[zeros, G, zeros]'], {}), '([zeros, G, zeros])\n', (5564, 5583), False, 'import cv2\n'), ((5604, 5632), 'cv2.merge', 'cv2.merge', (['[B, zeros, zeros]'], {}), '([B, zeros, zeros])\n', (5613, 5632), False, 'import cv2\n'), ((3154, 3169), 'numpy.uint8', 'np.uint8', (['[200]'], {}), '([200])\n', (3162, 3169), True, 'import numpy as np\n'), ((3171, 3186), 'numpy.uint8', 'np.uint8', (['[100]'], {}), '([100])\n', (3179, 3186), True, 'import numpy as np\n'), ((3229, 3243), 'numpy.uint8', 'np.uint8', (['[50]'], {}), '([50])\n', (3237, 3243), True, 'import numpy as np\n'), ((3246, 3261), 'numpy.uint8', 'np.uint8', (['[100]'], {}), '([100])\n', (3254, 3261), True, 'import numpy as np\n'), ((3346, 3361), 'numpy.uint8', 'np.uint8', (['[200]'], {}), '([200])\n', (3354, 3361), True, 'import numpy as np\n'), ((3364, 3379), 'numpy.uint8', 'np.uint8', (['[100]'], {}), '([100])\n', (3372, 3379), True, 'import numpy as np\n'), ((3413, 3427), 'numpy.uint8', 'np.uint8', (['[50]'], {}), '([50])\n', (3421, 3427), True, 'import numpy as np\n'), ((3431, 3446), 'numpy.uint8', 'np.uint8', (['[100]'], {}), '([100])\n', (3439, 3446), True, 'import numpy as np\n')]
|
import pytest
import mxnet as mx
import numpy as np
from mxfusion.components.variables.runtime_variable import add_sample_dimension, is_sampled_array, get_num_samples
from mxfusion.components.distributions import Gamma, GammaMeanVariance
from mxfusion.util.testutils import numpy_array_reshape
from mxfusion.util.testutils import MockMXNetRandomGenerator
@pytest.mark.usefixtures("set_seed")
class TestGammaDistribution(object):
@pytest.mark.parametrize("dtype, mean, mean_isSamples, variance, variance_isSamples, rv, rv_isSamples, num_samples", [
(np.float64, np.random.uniform(0,10,size=(5,2)), True, np.random.uniform(1,10,size=(2)), False, np.random.uniform(1,10,size=(5,3,2)), True, 5),
(np.float64, np.random.uniform(0,10,size=(5,2)), True, np.random.uniform(1,10,size=(2)), False, np.random.uniform(1,10,size=(3,2)), False, 5),
(np.float64, np.random.uniform(0,10,size=(2)), False, np.random.uniform(1,10,size=(2)), False, np.random.uniform(1,10,size=(3,2)), False, 5),
(np.float64, np.random.uniform(0,10,size=(5,2)), True, np.random.uniform(1,10,size=(5,3,2)), True, np.random.uniform(1,10,size=(5,3,2)), True, 5),
(np.float32, np.random.uniform(0,10,size=(5,2)), True, np.random.uniform(1,10,size=(2)), False, np.random.uniform(1,10,size=(5,3,2)), True, 5),
])
def test_log_pdf_mean_variance(self, dtype, mean, mean_isSamples, variance, variance_isSamples,
rv, rv_isSamples, num_samples):
import scipy as sp
isSamples_any = any([mean_isSamples, variance_isSamples, rv_isSamples])
rv_shape = rv.shape[1:] if rv_isSamples else rv.shape
n_dim = 1 + len(rv.shape) if isSamples_any and not rv_isSamples else len(rv.shape)
mean_np = numpy_array_reshape(mean, mean_isSamples, n_dim)
variance_np = numpy_array_reshape(variance, variance_isSamples, n_dim)
rv_np = numpy_array_reshape(rv, rv_isSamples, n_dim)
beta_np = mean_np / variance_np
alpha_np = mean_np * beta_np
log_pdf_np = sp.stats.gamma.logpdf(rv_np, a=alpha_np, loc=0, scale=1./beta_np)
mean_mx = mx.nd.array(mean, dtype=dtype)
if not mean_isSamples:
mean_mx = add_sample_dimension(mx.nd, mean_mx)
variance_mx = mx.nd.array(variance, dtype=dtype)
if not variance_isSamples:
variance_mx = add_sample_dimension(mx.nd, variance_mx)
rv_mx = mx.nd.array(rv, dtype=dtype)
if not rv_isSamples:
rv_mx = add_sample_dimension(mx.nd, rv_mx)
gamma = GammaMeanVariance.define_variable(mean=mean_mx, variance=variance_mx, shape=rv_shape, dtype=dtype).factor
variables = {gamma.mean.uuid: mean_mx, gamma.variance.uuid: variance_mx, gamma.random_variable.uuid: rv_mx}
log_pdf_rt = gamma.log_pdf(F=mx.nd, variables=variables)
assert np.issubdtype(log_pdf_rt.dtype, dtype)
assert is_sampled_array(mx.nd, log_pdf_rt) == isSamples_any
if isSamples_any:
assert get_num_samples(mx.nd, log_pdf_rt) == num_samples
if np.issubdtype(dtype, np.float64):
rtol, atol = 1e-7, 1e-10
else:
rtol, atol = 1e-4, 1e-5
assert np.allclose(log_pdf_np, log_pdf_rt.asnumpy(), rtol=rtol, atol=atol)
@pytest.mark.parametrize(
"dtype, mean, mean_isSamples, variance, variance_isSamples, rv_shape, num_samples",[
(np.float64, np.random.rand(5,2), True, np.random.rand(2)+0.1, False, (3,2), 5),
(np.float64, np.random.rand(2), False, np.random.rand(5,2)+0.1, True, (3,2), 5),
(np.float64, np.random.rand(2), False, np.random.rand(2)+0.1, False, (3,2), 5),
(np.float64, np.random.rand(5,2), True, np.random.rand(5,3,2)+0.1, True, (3,2), 5),
(np.float32, np.random.rand(5,2), True, np.random.rand(2)+0.1, False, (3,2), 5),
])
def test_draw_samples_mean_variance(self, dtype, mean, mean_isSamples, variance,
variance_isSamples, rv_shape, num_samples):
n_dim = 1 + len(rv_shape)
out_shape = (num_samples,) + rv_shape
mean_np = mx.nd.array(np.broadcast_to(numpy_array_reshape(mean, mean_isSamples, n_dim), shape=out_shape), dtype=dtype)
variance_np = mx.nd.array(np.broadcast_to(numpy_array_reshape(variance, variance_isSamples, n_dim), shape=out_shape), dtype=dtype)
gamma = GammaMeanVariance.define_variable(shape=rv_shape, dtype=dtype).factor
mean_mx = mx.nd.array(mean, dtype=dtype)
if not mean_isSamples:
mean_mx = add_sample_dimension(mx.nd, mean_mx)
variance_mx = mx.nd.array(variance, dtype=dtype)
if not variance_isSamples:
variance_mx = add_sample_dimension(mx.nd, variance_mx)
variables = {gamma.mean.uuid: mean_mx, gamma.variance.uuid: variance_mx}
mx.random.seed(0)
rv_samples_rt = gamma.draw_samples(
F=mx.nd, variables=variables, num_samples=num_samples)
mx.random.seed(0)
beta_np = mean_np / variance_np
alpha_np = mean_np * beta_np
rv_samples_mx = mx.nd.random.gamma(alpha=alpha_np, beta=beta_np, dtype=dtype)
assert np.issubdtype(rv_samples_rt.dtype, dtype)
assert is_sampled_array(mx.nd, rv_samples_rt)
assert get_num_samples(mx.nd, rv_samples_rt) == num_samples
if np.issubdtype(dtype, np.float64):
rtol, atol = 1e-7, 1e-10
else:
rtol, atol = 1e-4, 1e-5
assert np.allclose(rv_samples_mx.asnumpy(), rv_samples_rt.asnumpy(), rtol=rtol, atol=atol)
@pytest.mark.parametrize("dtype, alpha, alpha_isSamples, beta, beta_isSamples, rv, rv_isSamples, num_samples", [
(np.float64, np.random.uniform(0,10,size=(5,2)), True, np.random.uniform(1,10,size=(2)), False, np.random.uniform(1,10,size=(5,3,2)), True, 5),
(np.float64, np.random.uniform(0,10,size=(5,2)), True, np.random.uniform(1,10,size=(2)), False, np.random.uniform(1,10,size=(3,2)), False, 5),
(np.float64, np.random.uniform(0,10,size=(2)), False, np.random.uniform(1,10,size=(2)), False, np.random.uniform(1,10,size=(3,2)), False, 5),
(np.float64, np.random.uniform(0,10,size=(5,2)), True, np.random.uniform(1,10,size=(5,3,2)), True, np.random.uniform(1,10,size=(5,3,2)), True, 5),
(np.float32, np.random.uniform(0,10,size=(5,2)), True, np.random.uniform(1,10,size=(2)), False, np.random.uniform(1,10,size=(5,3,2)), True, 5),
])
def test_log_pdf(self, dtype, alpha, alpha_isSamples, beta, beta_isSamples,
rv, rv_isSamples, num_samples):
import scipy as sp
isSamples_any = any([alpha_isSamples, beta_isSamples, rv_isSamples])
rv_shape = rv.shape[1:] if rv_isSamples else rv.shape
n_dim = 1 + len(rv.shape) if isSamples_any and not rv_isSamples else len(rv.shape)
alpha_np = numpy_array_reshape(alpha, alpha_isSamples, n_dim)
beta_np = numpy_array_reshape(beta, beta_isSamples, n_dim)
rv_np = numpy_array_reshape(rv, rv_isSamples, n_dim)
log_pdf_np = sp.stats.gamma.logpdf(rv_np, a=alpha_np, loc=0, scale=1./beta_np)
gamma = Gamma.define_variable(shape=rv_shape, dtype=dtype).factor
alpha_mx = mx.nd.array(alpha, dtype=dtype)
if not alpha_isSamples:
alpha_mx = add_sample_dimension(mx.nd, alpha_mx)
beta_mx = mx.nd.array(beta, dtype=dtype)
if not beta_isSamples:
beta_mx = add_sample_dimension(mx.nd, beta_mx)
rv_mx = mx.nd.array(rv, dtype=dtype)
if not rv_isSamples:
rv_mx = add_sample_dimension(mx.nd, rv_mx)
variables = {gamma.alpha.uuid: alpha_mx, gamma.beta.uuid: beta_mx, gamma.random_variable.uuid: rv_mx}
log_pdf_rt = gamma.log_pdf(F=mx.nd, variables=variables)
assert np.issubdtype(log_pdf_rt.dtype, dtype)
assert is_sampled_array(mx.nd, log_pdf_rt) == isSamples_any
if isSamples_any:
assert get_num_samples(mx.nd, log_pdf_rt) == num_samples
if np.issubdtype(dtype, np.float64):
rtol, atol = 1e-7, 1e-10
else:
rtol, atol = 1e-4, 1e-5
assert np.allclose(log_pdf_np, log_pdf_rt.asnumpy(), rtol=rtol, atol=atol)
@pytest.mark.parametrize(
"dtype, alpha, alpha_isSamples, beta, beta_isSamples, rv_shape, num_samples",[
(np.float64, np.random.rand(5,2), True, np.random.rand(2)+0.1, False, (3,2), 5),
(np.float64, np.random.rand(2), False, np.random.rand(5,2)+0.1, True, (3,2), 5),
(np.float64, np.random.rand(2), False, np.random.rand(2)+0.1, False, (3,2), 5),
(np.float64, np.random.rand(5,2), True, np.random.rand(5,3,2)+0.1, True, (3,2), 5),
(np.float32, np.random.rand(5,2), True, np.random.rand(2)+0.1, False, (3,2), 5),
])
def test_draw_samples(self, dtype, alpha, alpha_isSamples, beta,
beta_isSamples, rv_shape, num_samples):
n_dim = 1 + len(rv_shape)
out_shape = (num_samples,) + rv_shape
alpha_np = mx.nd.array(np.broadcast_to(numpy_array_reshape(alpha, alpha_isSamples, n_dim), shape=out_shape), dtype=dtype)
beta_np = mx.nd.array(np.broadcast_to(numpy_array_reshape(beta, beta_isSamples, n_dim), shape=out_shape), dtype=dtype)
gamma = Gamma.define_variable(shape=rv_shape, dtype=dtype).factor
alpha_mx = mx.nd.array(alpha, dtype=dtype)
if not alpha_isSamples:
alpha_mx = add_sample_dimension(mx.nd, alpha_mx)
beta_mx = mx.nd.array(beta, dtype=dtype)
if not beta_isSamples:
beta_mx = add_sample_dimension(mx.nd, beta_mx)
variables = {gamma.alpha.uuid: alpha_mx, gamma.beta.uuid: beta_mx}
mx.random.seed(0)
rv_samples_rt = gamma.draw_samples(
F=mx.nd, variables=variables, num_samples=num_samples)
mx.random.seed(0)
rv_samples_mx = mx.nd.random.gamma(alpha=alpha_np, beta=beta_np, dtype=dtype)
assert np.issubdtype(rv_samples_rt.dtype, dtype)
assert is_sampled_array(mx.nd, rv_samples_rt)
assert get_num_samples(mx.nd, rv_samples_rt) == num_samples
if np.issubdtype(dtype, np.float64):
rtol, atol = 1e-7, 1e-10
else:
rtol, atol = 1e-4, 1e-5
assert np.allclose(rv_samples_mx.asnumpy(), rv_samples_rt.asnumpy(), rtol=rtol, atol=atol)
|
[
"numpy.random.uniform",
"mxfusion.components.distributions.GammaMeanVariance.define_variable",
"mxfusion.components.distributions.Gamma.define_variable",
"mxfusion.components.variables.runtime_variable.is_sampled_array",
"mxnet.random.seed",
"numpy.random.rand",
"scipy.stats.gamma.logpdf",
"mxfusion.components.variables.runtime_variable.add_sample_dimension",
"mxnet.nd.array",
"mxnet.nd.random.gamma",
"mxfusion.util.testutils.numpy_array_reshape",
"pytest.mark.usefixtures",
"mxfusion.components.variables.runtime_variable.get_num_samples",
"numpy.issubdtype"
] |
[((358, 393), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""set_seed"""'], {}), "('set_seed')\n", (381, 393), False, 'import pytest\n'), ((1760, 1808), 'mxfusion.util.testutils.numpy_array_reshape', 'numpy_array_reshape', (['mean', 'mean_isSamples', 'n_dim'], {}), '(mean, mean_isSamples, n_dim)\n', (1779, 1808), False, 'from mxfusion.util.testutils import numpy_array_reshape\n'), ((1831, 1887), 'mxfusion.util.testutils.numpy_array_reshape', 'numpy_array_reshape', (['variance', 'variance_isSamples', 'n_dim'], {}), '(variance, variance_isSamples, n_dim)\n', (1850, 1887), False, 'from mxfusion.util.testutils import numpy_array_reshape\n'), ((1904, 1948), 'mxfusion.util.testutils.numpy_array_reshape', 'numpy_array_reshape', (['rv', 'rv_isSamples', 'n_dim'], {}), '(rv, rv_isSamples, n_dim)\n', (1923, 1948), False, 'from mxfusion.util.testutils import numpy_array_reshape\n'), ((2047, 2115), 'scipy.stats.gamma.logpdf', 'sp.stats.gamma.logpdf', (['rv_np'], {'a': 'alpha_np', 'loc': '(0)', 'scale': '(1.0 / beta_np)'}), '(rv_np, a=alpha_np, loc=0, scale=1.0 / beta_np)\n', (2068, 2115), True, 'import scipy as sp\n'), ((2132, 2162), 'mxnet.nd.array', 'mx.nd.array', (['mean'], {'dtype': 'dtype'}), '(mean, dtype=dtype)\n', (2143, 2162), True, 'import mxnet as mx\n'), ((2275, 2309), 'mxnet.nd.array', 'mx.nd.array', (['variance'], {'dtype': 'dtype'}), '(variance, dtype=dtype)\n', (2286, 2309), True, 'import mxnet as mx\n'), ((2428, 2456), 'mxnet.nd.array', 'mx.nd.array', (['rv'], {'dtype': 'dtype'}), '(rv, dtype=dtype)\n', (2439, 2456), True, 'import mxnet as mx\n'), ((2860, 2898), 'numpy.issubdtype', 'np.issubdtype', (['log_pdf_rt.dtype', 'dtype'], {}), '(log_pdf_rt.dtype, dtype)\n', (2873, 2898), True, 'import numpy as np\n'), ((3073, 3105), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.float64'], {}), '(dtype, np.float64)\n', (3086, 3105), True, 'import numpy as np\n'), ((4465, 4495), 'mxnet.nd.array', 'mx.nd.array', (['mean'], {'dtype': 'dtype'}), '(mean, dtype=dtype)\n', (4476, 4495), True, 'import mxnet as mx\n'), ((4608, 4642), 'mxnet.nd.array', 'mx.nd.array', (['variance'], {'dtype': 'dtype'}), '(variance, dtype=dtype)\n', (4619, 4642), True, 'import mxnet as mx\n'), ((4835, 4852), 'mxnet.random.seed', 'mx.random.seed', (['(0)'], {}), '(0)\n', (4849, 4852), True, 'import mxnet as mx\n'), ((4973, 4990), 'mxnet.random.seed', 'mx.random.seed', (['(0)'], {}), '(0)\n', (4987, 4990), True, 'import mxnet as mx\n'), ((5092, 5153), 'mxnet.nd.random.gamma', 'mx.nd.random.gamma', ([], {'alpha': 'alpha_np', 'beta': 'beta_np', 'dtype': 'dtype'}), '(alpha=alpha_np, beta=beta_np, dtype=dtype)\n', (5110, 5153), True, 'import mxnet as mx\n'), ((5170, 5211), 'numpy.issubdtype', 'np.issubdtype', (['rv_samples_rt.dtype', 'dtype'], {}), '(rv_samples_rt.dtype, dtype)\n', (5183, 5211), True, 'import numpy as np\n'), ((5227, 5265), 'mxfusion.components.variables.runtime_variable.is_sampled_array', 'is_sampled_array', (['mx.nd', 'rv_samples_rt'], {}), '(mx.nd, rv_samples_rt)\n', (5243, 5265), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, is_sampled_array, get_num_samples\n'), ((5346, 5378), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.float64'], {}), '(dtype, np.float64)\n', (5359, 5378), True, 'import numpy as np\n'), ((6868, 6918), 'mxfusion.util.testutils.numpy_array_reshape', 'numpy_array_reshape', (['alpha', 'alpha_isSamples', 'n_dim'], {}), '(alpha, alpha_isSamples, n_dim)\n', (6887, 6918), False, 'from mxfusion.util.testutils import numpy_array_reshape\n'), ((6937, 6985), 'mxfusion.util.testutils.numpy_array_reshape', 'numpy_array_reshape', (['beta', 'beta_isSamples', 'n_dim'], {}), '(beta, beta_isSamples, n_dim)\n', (6956, 6985), False, 'from mxfusion.util.testutils import numpy_array_reshape\n'), ((7002, 7046), 'mxfusion.util.testutils.numpy_array_reshape', 'numpy_array_reshape', (['rv', 'rv_isSamples', 'n_dim'], {}), '(rv, rv_isSamples, n_dim)\n', (7021, 7046), False, 'from mxfusion.util.testutils import numpy_array_reshape\n'), ((7068, 7136), 'scipy.stats.gamma.logpdf', 'sp.stats.gamma.logpdf', (['rv_np'], {'a': 'alpha_np', 'loc': '(0)', 'scale': '(1.0 / beta_np)'}), '(rv_np, a=alpha_np, loc=0, scale=1.0 / beta_np)\n', (7089, 7136), True, 'import scipy as sp\n'), ((7228, 7259), 'mxnet.nd.array', 'mx.nd.array', (['alpha'], {'dtype': 'dtype'}), '(alpha, dtype=dtype)\n', (7239, 7259), True, 'import mxnet as mx\n'), ((7371, 7401), 'mxnet.nd.array', 'mx.nd.array', (['beta'], {'dtype': 'dtype'}), '(beta, dtype=dtype)\n', (7382, 7401), True, 'import mxnet as mx\n'), ((7508, 7536), 'mxnet.nd.array', 'mx.nd.array', (['rv'], {'dtype': 'dtype'}), '(rv, dtype=dtype)\n', (7519, 7536), True, 'import mxnet as mx\n'), ((7812, 7850), 'numpy.issubdtype', 'np.issubdtype', (['log_pdf_rt.dtype', 'dtype'], {}), '(log_pdf_rt.dtype, dtype)\n', (7825, 7850), True, 'import numpy as np\n'), ((8025, 8057), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.float64'], {}), '(dtype, np.float64)\n', (8038, 8057), True, 'import numpy as np\n'), ((9372, 9403), 'mxnet.nd.array', 'mx.nd.array', (['alpha'], {'dtype': 'dtype'}), '(alpha, dtype=dtype)\n', (9383, 9403), True, 'import mxnet as mx\n'), ((9515, 9545), 'mxnet.nd.array', 'mx.nd.array', (['beta'], {'dtype': 'dtype'}), '(beta, dtype=dtype)\n', (9526, 9545), True, 'import mxnet as mx\n'), ((9720, 9737), 'mxnet.random.seed', 'mx.random.seed', (['(0)'], {}), '(0)\n', (9734, 9737), True, 'import mxnet as mx\n'), ((9858, 9875), 'mxnet.random.seed', 'mx.random.seed', (['(0)'], {}), '(0)\n', (9872, 9875), True, 'import mxnet as mx\n'), ((9900, 9961), 'mxnet.nd.random.gamma', 'mx.nd.random.gamma', ([], {'alpha': 'alpha_np', 'beta': 'beta_np', 'dtype': 'dtype'}), '(alpha=alpha_np, beta=beta_np, dtype=dtype)\n', (9918, 9961), True, 'import mxnet as mx\n'), ((9978, 10019), 'numpy.issubdtype', 'np.issubdtype', (['rv_samples_rt.dtype', 'dtype'], {}), '(rv_samples_rt.dtype, dtype)\n', (9991, 10019), True, 'import numpy as np\n'), ((10035, 10073), 'mxfusion.components.variables.runtime_variable.is_sampled_array', 'is_sampled_array', (['mx.nd', 'rv_samples_rt'], {}), '(mx.nd, rv_samples_rt)\n', (10051, 10073), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, is_sampled_array, get_num_samples\n'), ((10154, 10186), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.float64'], {}), '(dtype, np.float64)\n', (10167, 10186), True, 'import numpy as np\n'), ((2216, 2252), 'mxfusion.components.variables.runtime_variable.add_sample_dimension', 'add_sample_dimension', (['mx.nd', 'mean_mx'], {}), '(mx.nd, mean_mx)\n', (2236, 2252), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, is_sampled_array, get_num_samples\n'), ((2371, 2411), 'mxfusion.components.variables.runtime_variable.add_sample_dimension', 'add_sample_dimension', (['mx.nd', 'variance_mx'], {}), '(mx.nd, variance_mx)\n', (2391, 2411), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, is_sampled_array, get_num_samples\n'), ((2506, 2540), 'mxfusion.components.variables.runtime_variable.add_sample_dimension', 'add_sample_dimension', (['mx.nd', 'rv_mx'], {}), '(mx.nd, rv_mx)\n', (2526, 2540), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, is_sampled_array, get_num_samples\n'), ((2557, 2660), 'mxfusion.components.distributions.GammaMeanVariance.define_variable', 'GammaMeanVariance.define_variable', ([], {'mean': 'mean_mx', 'variance': 'variance_mx', 'shape': 'rv_shape', 'dtype': 'dtype'}), '(mean=mean_mx, variance=variance_mx, shape\n =rv_shape, dtype=dtype)\n', (2590, 2660), False, 'from mxfusion.components.distributions import Gamma, GammaMeanVariance\n'), ((2914, 2949), 'mxfusion.components.variables.runtime_variable.is_sampled_array', 'is_sampled_array', (['mx.nd', 'log_pdf_rt'], {}), '(mx.nd, log_pdf_rt)\n', (2930, 2949), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, is_sampled_array, get_num_samples\n'), ((4377, 4439), 'mxfusion.components.distributions.GammaMeanVariance.define_variable', 'GammaMeanVariance.define_variable', ([], {'shape': 'rv_shape', 'dtype': 'dtype'}), '(shape=rv_shape, dtype=dtype)\n', (4410, 4439), False, 'from mxfusion.components.distributions import Gamma, GammaMeanVariance\n'), ((4549, 4585), 'mxfusion.components.variables.runtime_variable.add_sample_dimension', 'add_sample_dimension', (['mx.nd', 'mean_mx'], {}), '(mx.nd, mean_mx)\n', (4569, 4585), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, is_sampled_array, get_num_samples\n'), ((4704, 4744), 'mxfusion.components.variables.runtime_variable.add_sample_dimension', 'add_sample_dimension', (['mx.nd', 'variance_mx'], {}), '(mx.nd, variance_mx)\n', (4724, 4744), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, is_sampled_array, get_num_samples\n'), ((5281, 5318), 'mxfusion.components.variables.runtime_variable.get_num_samples', 'get_num_samples', (['mx.nd', 'rv_samples_rt'], {}), '(mx.nd, rv_samples_rt)\n', (5296, 5318), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, is_sampled_array, get_num_samples\n'), ((7151, 7201), 'mxfusion.components.distributions.Gamma.define_variable', 'Gamma.define_variable', ([], {'shape': 'rv_shape', 'dtype': 'dtype'}), '(shape=rv_shape, dtype=dtype)\n', (7172, 7201), False, 'from mxfusion.components.distributions import Gamma, GammaMeanVariance\n'), ((7315, 7352), 'mxfusion.components.variables.runtime_variable.add_sample_dimension', 'add_sample_dimension', (['mx.nd', 'alpha_mx'], {}), '(mx.nd, alpha_mx)\n', (7335, 7352), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, is_sampled_array, get_num_samples\n'), ((7455, 7491), 'mxfusion.components.variables.runtime_variable.add_sample_dimension', 'add_sample_dimension', (['mx.nd', 'beta_mx'], {}), '(mx.nd, beta_mx)\n', (7475, 7491), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, is_sampled_array, get_num_samples\n'), ((7586, 7620), 'mxfusion.components.variables.runtime_variable.add_sample_dimension', 'add_sample_dimension', (['mx.nd', 'rv_mx'], {}), '(mx.nd, rv_mx)\n', (7606, 7620), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, is_sampled_array, get_num_samples\n'), ((7866, 7901), 'mxfusion.components.variables.runtime_variable.is_sampled_array', 'is_sampled_array', (['mx.nd', 'log_pdf_rt'], {}), '(mx.nd, log_pdf_rt)\n', (7882, 7901), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, is_sampled_array, get_num_samples\n'), ((9295, 9345), 'mxfusion.components.distributions.Gamma.define_variable', 'Gamma.define_variable', ([], {'shape': 'rv_shape', 'dtype': 'dtype'}), '(shape=rv_shape, dtype=dtype)\n', (9316, 9345), False, 'from mxfusion.components.distributions import Gamma, GammaMeanVariance\n'), ((9459, 9496), 'mxfusion.components.variables.runtime_variable.add_sample_dimension', 'add_sample_dimension', (['mx.nd', 'alpha_mx'], {}), '(mx.nd, alpha_mx)\n', (9479, 9496), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, is_sampled_array, get_num_samples\n'), ((9599, 9635), 'mxfusion.components.variables.runtime_variable.add_sample_dimension', 'add_sample_dimension', (['mx.nd', 'beta_mx'], {}), '(mx.nd, beta_mx)\n', (9619, 9635), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, is_sampled_array, get_num_samples\n'), ((10089, 10126), 'mxfusion.components.variables.runtime_variable.get_num_samples', 'get_num_samples', (['mx.nd', 'rv_samples_rt'], {}), '(mx.nd, rv_samples_rt)\n', (10104, 10126), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, is_sampled_array, get_num_samples\n'), ((3012, 3046), 'mxfusion.components.variables.runtime_variable.get_num_samples', 'get_num_samples', (['mx.nd', 'log_pdf_rt'], {}), '(mx.nd, log_pdf_rt)\n', (3027, 3046), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, is_sampled_array, get_num_samples\n'), ((576, 613), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)'], {'size': '(5, 2)'}), '(0, 10, size=(5, 2))\n', (593, 613), True, 'import numpy as np\n'), ((619, 651), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)'], {'size': '(2)'}), '(1, 10, size=2)\n', (636, 651), True, 'import numpy as np\n'), ((660, 700), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)'], {'size': '(5, 3, 2)'}), '(1, 10, size=(5, 3, 2))\n', (677, 700), True, 'import numpy as np\n'), ((729, 766), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)'], {'size': '(5, 2)'}), '(0, 10, size=(5, 2))\n', (746, 766), True, 'import numpy as np\n'), ((771, 803), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)'], {'size': '(2)'}), '(1, 10, size=2)\n', (788, 803), True, 'import numpy as np\n'), ((812, 849), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)'], {'size': '(3, 2)'}), '(1, 10, size=(3, 2))\n', (829, 849), True, 'import numpy as np\n'), ((880, 912), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)'], {'size': '(2)'}), '(0, 10, size=2)\n', (897, 912), True, 'import numpy as np\n'), ((921, 953), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)'], {'size': '(2)'}), '(1, 10, size=2)\n', (938, 953), True, 'import numpy as np\n'), ((962, 999), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)'], {'size': '(3, 2)'}), '(1, 10, size=(3, 2))\n', (979, 999), True, 'import numpy as np\n'), ((1030, 1067), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)'], {'size': '(5, 2)'}), '(0, 10, size=(5, 2))\n', (1047, 1067), True, 'import numpy as np\n'), ((1072, 1112), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)'], {'size': '(5, 3, 2)'}), '(1, 10, size=(5, 3, 2))\n', (1089, 1112), True, 'import numpy as np\n'), ((1116, 1156), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)'], {'size': '(5, 3, 2)'}), '(1, 10, size=(5, 3, 2))\n', (1133, 1156), True, 'import numpy as np\n'), ((1185, 1222), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)'], {'size': '(5, 2)'}), '(0, 10, size=(5, 2))\n', (1202, 1222), True, 'import numpy as np\n'), ((1227, 1259), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)'], {'size': '(2)'}), '(1, 10, size=2)\n', (1244, 1259), True, 'import numpy as np\n'), ((1268, 1308), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)'], {'size': '(5, 3, 2)'}), '(1, 10, size=(5, 3, 2))\n', (1285, 1308), True, 'import numpy as np\n'), ((4140, 4188), 'mxfusion.util.testutils.numpy_array_reshape', 'numpy_array_reshape', (['mean', 'mean_isSamples', 'n_dim'], {}), '(mean, mean_isSamples, n_dim)\n', (4159, 4188), False, 'from mxfusion.util.testutils import numpy_array_reshape\n'), ((4271, 4327), 'mxfusion.util.testutils.numpy_array_reshape', 'numpy_array_reshape', (['variance', 'variance_isSamples', 'n_dim'], {}), '(variance, variance_isSamples, n_dim)\n', (4290, 4327), False, 'from mxfusion.util.testutils import numpy_array_reshape\n'), ((3422, 3442), 'numpy.random.rand', 'np.random.rand', (['(5)', '(2)'], {}), '(5, 2)\n', (3436, 3442), True, 'import numpy as np\n'), ((3511, 3528), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (3525, 3528), True, 'import numpy as np\n'), ((3600, 3617), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (3614, 3617), True, 'import numpy as np\n'), ((3688, 3708), 'numpy.random.rand', 'np.random.rand', (['(5)', '(2)'], {}), '(5, 2)\n', (3702, 3708), True, 'import numpy as np\n'), ((3780, 3800), 'numpy.random.rand', 'np.random.rand', (['(5)', '(2)'], {}), '(5, 2)\n', (3794, 3800), True, 'import numpy as np\n'), ((7964, 7998), 'mxfusion.components.variables.runtime_variable.get_num_samples', 'get_num_samples', (['mx.nd', 'log_pdf_rt'], {}), '(mx.nd, log_pdf_rt)\n', (7979, 7998), False, 'from mxfusion.components.variables.runtime_variable import add_sample_dimension, is_sampled_array, get_num_samples\n'), ((5706, 5743), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)'], {'size': '(5, 2)'}), '(0, 10, size=(5, 2))\n', (5723, 5743), True, 'import numpy as np\n'), ((5749, 5781), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)'], {'size': '(2)'}), '(1, 10, size=2)\n', (5766, 5781), True, 'import numpy as np\n'), ((5790, 5830), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)'], {'size': '(5, 3, 2)'}), '(1, 10, size=(5, 3, 2))\n', (5807, 5830), True, 'import numpy as np\n'), ((5859, 5896), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)'], {'size': '(5, 2)'}), '(0, 10, size=(5, 2))\n', (5876, 5896), True, 'import numpy as np\n'), ((5901, 5933), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)'], {'size': '(2)'}), '(1, 10, size=2)\n', (5918, 5933), True, 'import numpy as np\n'), ((5942, 5979), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)'], {'size': '(3, 2)'}), '(1, 10, size=(3, 2))\n', (5959, 5979), True, 'import numpy as np\n'), ((6010, 6042), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)'], {'size': '(2)'}), '(0, 10, size=2)\n', (6027, 6042), True, 'import numpy as np\n'), ((6051, 6083), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)'], {'size': '(2)'}), '(1, 10, size=2)\n', (6068, 6083), True, 'import numpy as np\n'), ((6092, 6129), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)'], {'size': '(3, 2)'}), '(1, 10, size=(3, 2))\n', (6109, 6129), True, 'import numpy as np\n'), ((6160, 6197), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)'], {'size': '(5, 2)'}), '(0, 10, size=(5, 2))\n', (6177, 6197), True, 'import numpy as np\n'), ((6202, 6242), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)'], {'size': '(5, 3, 2)'}), '(1, 10, size=(5, 3, 2))\n', (6219, 6242), True, 'import numpy as np\n'), ((6246, 6286), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)'], {'size': '(5, 3, 2)'}), '(1, 10, size=(5, 3, 2))\n', (6263, 6286), True, 'import numpy as np\n'), ((6315, 6352), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)'], {'size': '(5, 2)'}), '(0, 10, size=(5, 2))\n', (6332, 6352), True, 'import numpy as np\n'), ((6357, 6389), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)'], {'size': '(2)'}), '(1, 10, size=2)\n', (6374, 6389), True, 'import numpy as np\n'), ((6398, 6438), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)'], {'size': '(5, 3, 2)'}), '(1, 10, size=(5, 3, 2))\n', (6415, 6438), True, 'import numpy as np\n'), ((9068, 9118), 'mxfusion.util.testutils.numpy_array_reshape', 'numpy_array_reshape', (['alpha', 'alpha_isSamples', 'n_dim'], {}), '(alpha, alpha_isSamples, n_dim)\n', (9087, 9118), False, 'from mxfusion.util.testutils import numpy_array_reshape\n'), ((9197, 9245), 'mxfusion.util.testutils.numpy_array_reshape', 'numpy_array_reshape', (['beta', 'beta_isSamples', 'n_dim'], {}), '(beta, beta_isSamples, n_dim)\n', (9216, 9245), False, 'from mxfusion.util.testutils import numpy_array_reshape\n'), ((8369, 8389), 'numpy.random.rand', 'np.random.rand', (['(5)', '(2)'], {}), '(5, 2)\n', (8383, 8389), True, 'import numpy as np\n'), ((8458, 8475), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (8472, 8475), True, 'import numpy as np\n'), ((8547, 8564), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (8561, 8564), True, 'import numpy as np\n'), ((8635, 8655), 'numpy.random.rand', 'np.random.rand', (['(5)', '(2)'], {}), '(5, 2)\n', (8649, 8655), True, 'import numpy as np\n'), ((8727, 8747), 'numpy.random.rand', 'np.random.rand', (['(5)', '(2)'], {}), '(5, 2)\n', (8741, 8747), True, 'import numpy as np\n'), ((3449, 3466), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (3463, 3466), True, 'import numpy as np\n'), ((3537, 3557), 'numpy.random.rand', 'np.random.rand', (['(5)', '(2)'], {}), '(5, 2)\n', (3551, 3557), True, 'import numpy as np\n'), ((3626, 3643), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (3640, 3643), True, 'import numpy as np\n'), ((3715, 3738), 'numpy.random.rand', 'np.random.rand', (['(5)', '(3)', '(2)'], {}), '(5, 3, 2)\n', (3729, 3738), True, 'import numpy as np\n'), ((3807, 3824), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (3821, 3824), True, 'import numpy as np\n'), ((8396, 8413), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (8410, 8413), True, 'import numpy as np\n'), ((8484, 8504), 'numpy.random.rand', 'np.random.rand', (['(5)', '(2)'], {}), '(5, 2)\n', (8498, 8504), True, 'import numpy as np\n'), ((8573, 8590), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (8587, 8590), True, 'import numpy as np\n'), ((8662, 8685), 'numpy.random.rand', 'np.random.rand', (['(5)', '(3)', '(2)'], {}), '(5, 3, 2)\n', (8676, 8685), True, 'import numpy as np\n'), ((8754, 8771), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (8768, 8771), True, 'import numpy as np\n')]
|
"""
Segmenting real-world sounds correctly with synthetic sounds
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
It's easy to figure out if a sound is being correcly segmented if the
signal at hand is well defined, and repeatable, like in many technological/
engineering applications. However, in bioacoustics, or
a more open-ended field recording situation, it can be very hard
to know the kind of signal that'll be recorded, or what its
parameters are.
Just because an output is produced by the package, it doesn't
always lead to a meaningful result. Given a set of parameters,
any function will produce an output as long as its sensible. This
means, with one set of parameters/methods the CF segment might
be 10ms long, while with another more lax parameter set it might
be 20ms long! Remember, as always, `GIGO <https://en.wikipedia.org/wiki/Garbage_in,_garbage_out>`_ (Garbage In, Garbage Out):P.
How to segment a sound into CF and FM segments in an accurate
way?
Synthetic calls to the rescue
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Synthetic calls are sounds that we know to have specific properties
and can be used to test if a parameter set/ segmentation method
is capable of correctly segmenting our real-world sounds and
uncovering the true underlying properties.
The `simulate_calls` module has a bunch of helper functions
which allow the creation of FM sweeps, constant frequency
tones and silences. In combination, these can be used to
get a feeling for which segmentation methods and parameter sets
work well for your real-world sound (bat, bird, cat, <insert sound source of choice>)
Generating a 'classical' CF-FM bat call
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
"""
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal as signal
from itsfm.simulate_calls import make_cffm_call,make_tone, make_fm_chirp, silence
from itsfm.view_horseshoebat_call import visualise_call
from itsfm.segment_horseshoebat_call import segment_call_into_cf_fm
from itsfm.signal_processing import dB, rms
fs = 96000
call_props = {'cf':(40000, 0.01),
'upfm':(38000,0.002),
'downfm':(30000,0.003)}
cffm_call, freq_profile = make_cffm_call(call_props, fs)
cffm_call *= signal.tukey(cffm_call.size, 0.1)
w,s = visualise_call(cffm_call, fs, fft_size=128)
# %%
# Remember, the terminal frequencies and durations of the CF-FM calls can be adjusted to the
# calls of your species of interest!!
# %%
# A multi-component bird call
# >>>>>>>>>>>>>>>>>>>>>>>>>>>
#
# Let's make a sound with two FMs and CFs, and gaps in between
fs = 44100
fm1 = make_fm_chirp(1000, 5000, 0.01, fs)
cf1 = make_tone(5000, 0.005, fs)
fm2 = make_fm_chirp(5500, 9000, 0.01, fs)
cf2 = make_tone(8000, 0.005, fs)
gap = silence(0.005, fs)
synth_birdcall = np.concatenate((gap,
fm1, gap,
cf1, gap,
fm2, gap,
cf2,
gap))
w, s = visualise_call(synth_birdcall, fs, fft_size=64)
# %%
# Let there be Noise
# >>>>>>>>>>>>>>>>>>
#
# Any kind of field recording *will* have some form of noise. Each of the
# the segmentation methods is differently susceptible to noise, and it's
# a good idea to test how well they can tolerate it. For starters, let's
# just add white noise and simulate different signal-to-noise ratios (SNR).
noisy_bird_call = synth_birdcall.copy()
noisy_bird_call += np.random.normal(0,10**(-10/20), noisy_bird_call.size)
noisy_bird_call /= np.max(np.abs(noisy_bird_call)) # keep sample values between +/- 1
# %%
# Estimate an approximate SNR by looking at the rms of the gaps to that of
# a song component
level_background = dB(rms(noisy_bird_call[gap.size]))
level_song = dB(rms(noisy_bird_call[gap.size:2*gap.size]))
snr_approx = level_song-level_background
print('The SNR is approximately: %f'%np.around(snr_approx))
w, s = visualise_call(noisy_bird_call, fs, fft_size=64)
# %%
# We could try to run the segmentation + measurement on a noisy sound straight away,
# but this might lead to poor measurements. Now, let's bandpass the audio
# to remove the ambient noise outside of the song's range.
|
[
"itsfm.view_horseshoebat_call.visualise_call",
"numpy.abs",
"itsfm.simulate_calls.make_cffm_call",
"itsfm.simulate_calls.silence",
"scipy.signal.tukey",
"itsfm.signal_processing.rms",
"itsfm.simulate_calls.make_fm_chirp",
"numpy.around",
"itsfm.simulate_calls.make_tone",
"numpy.random.normal",
"numpy.concatenate"
] |
[((2172, 2202), 'itsfm.simulate_calls.make_cffm_call', 'make_cffm_call', (['call_props', 'fs'], {}), '(call_props, fs)\n', (2186, 2202), False, 'from itsfm.simulate_calls import make_cffm_call, make_tone, make_fm_chirp, silence\n'), ((2216, 2249), 'scipy.signal.tukey', 'signal.tukey', (['cffm_call.size', '(0.1)'], {}), '(cffm_call.size, 0.1)\n', (2228, 2249), True, 'import scipy.signal as signal\n'), ((2259, 2302), 'itsfm.view_horseshoebat_call.visualise_call', 'visualise_call', (['cffm_call', 'fs'], {'fft_size': '(128)'}), '(cffm_call, fs, fft_size=128)\n', (2273, 2302), False, 'from itsfm.view_horseshoebat_call import visualise_call\n'), ((2593, 2628), 'itsfm.simulate_calls.make_fm_chirp', 'make_fm_chirp', (['(1000)', '(5000)', '(0.01)', 'fs'], {}), '(1000, 5000, 0.01, fs)\n', (2606, 2628), False, 'from itsfm.simulate_calls import make_cffm_call, make_tone, make_fm_chirp, silence\n'), ((2635, 2661), 'itsfm.simulate_calls.make_tone', 'make_tone', (['(5000)', '(0.005)', 'fs'], {}), '(5000, 0.005, fs)\n', (2644, 2661), False, 'from itsfm.simulate_calls import make_cffm_call, make_tone, make_fm_chirp, silence\n'), ((2668, 2703), 'itsfm.simulate_calls.make_fm_chirp', 'make_fm_chirp', (['(5500)', '(9000)', '(0.01)', 'fs'], {}), '(5500, 9000, 0.01, fs)\n', (2681, 2703), False, 'from itsfm.simulate_calls import make_cffm_call, make_tone, make_fm_chirp, silence\n'), ((2710, 2736), 'itsfm.simulate_calls.make_tone', 'make_tone', (['(8000)', '(0.005)', 'fs'], {}), '(8000, 0.005, fs)\n', (2719, 2736), False, 'from itsfm.simulate_calls import make_cffm_call, make_tone, make_fm_chirp, silence\n'), ((2743, 2761), 'itsfm.simulate_calls.silence', 'silence', (['(0.005)', 'fs'], {}), '(0.005, fs)\n', (2750, 2761), False, 'from itsfm.simulate_calls import make_cffm_call, make_tone, make_fm_chirp, silence\n'), ((2780, 2841), 'numpy.concatenate', 'np.concatenate', (['(gap, fm1, gap, cf1, gap, fm2, gap, cf2, gap)'], {}), '((gap, fm1, gap, cf1, gap, fm2, gap, cf2, gap))\n', (2794, 2841), True, 'import numpy as np\n'), ((3017, 3064), 'itsfm.view_horseshoebat_call.visualise_call', 'visualise_call', (['synth_birdcall', 'fs'], {'fft_size': '(64)'}), '(synth_birdcall, fs, fft_size=64)\n', (3031, 3064), False, 'from itsfm.view_horseshoebat_call import visualise_call\n'), ((3474, 3533), 'numpy.random.normal', 'np.random.normal', (['(0)', '(10 ** (-10 / 20))', 'noisy_bird_call.size'], {}), '(0, 10 ** (-10 / 20), noisy_bird_call.size)\n', (3490, 3533), True, 'import numpy as np\n'), ((3942, 3990), 'itsfm.view_horseshoebat_call.visualise_call', 'visualise_call', (['noisy_bird_call', 'fs'], {'fft_size': '(64)'}), '(noisy_bird_call, fs, fft_size=64)\n', (3956, 3990), False, 'from itsfm.view_horseshoebat_call import visualise_call\n'), ((3555, 3578), 'numpy.abs', 'np.abs', (['noisy_bird_call'], {}), '(noisy_bird_call)\n', (3561, 3578), True, 'import numpy as np\n'), ((3739, 3769), 'itsfm.signal_processing.rms', 'rms', (['noisy_bird_call[gap.size]'], {}), '(noisy_bird_call[gap.size])\n', (3742, 3769), False, 'from itsfm.signal_processing import dB, rms\n'), ((3788, 3831), 'itsfm.signal_processing.rms', 'rms', (['noisy_bird_call[gap.size:2 * gap.size]'], {}), '(noisy_bird_call[gap.size:2 * gap.size])\n', (3791, 3831), False, 'from itsfm.signal_processing import dB, rms\n'), ((3911, 3932), 'numpy.around', 'np.around', (['snr_approx'], {}), '(snr_approx)\n', (3920, 3932), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 17 15:24:18 2020
@author: dhulls
"""
from anastruct import SystemElements
import numpy as np
class TrussModel:
def HF(self, young1=None, young2=None, area1=None, area2=None, P1=None, P2=None, P3=None, P4=None, P5=None, P6=None):
ss = SystemElements()
# young1 = 2.1e11
# area1 = 2e-3
# young2 = 2.1e11
# area2 = 1e-3
ss.add_truss_element(location=[[0, 0], [4,0]], EA=(area1*young1))
ss.add_truss_element(location=[[4, 0], [8,0]], EA=(area1*young1))
ss.add_truss_element(location=[[8, 0], [12,0]], EA=(area1*young1))
ss.add_truss_element(location=[[12, 0], [16,0]], EA=(area1*young1))
ss.add_truss_element(location=[[16, 0], [20,0]], EA=(area1*young1))
ss.add_truss_element(location=[[20, 0], [24,0]], EA=(area1*young1))
ss.add_truss_element(location=[[2, 2], [6,2]], EA=(area1*young1))
ss.add_truss_element(location=[[6, 2], [10,2]], EA=(area1*young1))
ss.add_truss_element(location=[[10, 2], [14,2]], EA=(area1*young1))
ss.add_truss_element(location=[[14, 2], [18,2]], EA=(area1*young1))
ss.add_truss_element(location=[[18, 2], [22,2]], EA=(area1*young1))
ss.add_truss_element(location=[[0, 0], [2,2]], EA=(area2*young2))
ss.add_truss_element(location=[[2,2], [4,0]], EA=(area2*young2))
ss.add_truss_element(location=[[4,0], [6,2]], EA=(area2*young2))
ss.add_truss_element(location=[[6,2], [8,0]], EA=(area2*young2))
ss.add_truss_element(location=[[8,0], [10,2]], EA=(area2*young2))
ss.add_truss_element(location=[[10,2], [12,0]], EA=(area2*young2))
ss.add_truss_element(location=[[12,0], [14,2]], EA=(area2*young2))
ss.add_truss_element(location=[[14,2], [16,0]], EA=(area2*young2))
ss.add_truss_element(location=[[16,0], [18,2]], EA=(area2*young2))
ss.add_truss_element(location=[[18,2], [20,0]], EA=(area2*young2))
ss.add_truss_element(location=[[20,0], [22,2]], EA=(area2*young2))
ss.add_truss_element(location=[[22,2], [24,0]], EA=(area2*young2))
ss.add_support_hinged(node_id=1)
ss.add_support_roll(node_id=7, direction='x')
# P1 = -5e4
# P2 = -5e4
# P3 = -5e4
# P4 = -5e4
# P5 = -5e4
# P6 = -5e4
ss.point_load(node_id=8, Fy=P1)
ss.point_load(node_id=9, Fy=P2)
ss.point_load(node_id=10, Fy=P3)
ss.point_load(node_id=11, Fy=P4)
ss.point_load(node_id=12, Fy=P5)
ss.point_load(node_id=13, Fy=P6)
ss.solve()
# ss.show_structure()
# ss.show_displacement(factor=10)
K = ss.get_node_results_system(node_id=4)['uy']
return np.array(K)
def LF(self, young1=None, young2=None, area1=None, area2=None, P1=None, P2=None, P3=None, P4=None, P5=None, P6=None):
ss = SystemElements()
# young1 = 2.1e11
# area1 = 2e-3
# young2 = 2.1e11
# area2 = 1e-3
ss.add_truss_element(location=[[0, 0], [12,0]], EA=(area1*young1))
ss.add_truss_element(location=[[12, 0], [24,0]], EA=(area1*young1))
ss.add_truss_element(location=[[6, 2], [18,2]], EA=(area1*young1))
ss.add_truss_element(location=[[0, 0], [6,2]], EA=(area2*young2))
ss.add_truss_element(location=[[6,2], [12,0]], EA=(area2*young2))
ss.add_truss_element(location=[[12,0], [18,2]], EA=(area2*young2))
ss.add_truss_element(location=[[18,2], [24,0]], EA=(area2*young2))
ss.add_support_hinged(node_id=1)
ss.add_support_roll(node_id=3, direction='x')
# P1 = -5e4
# P2 = -5e4
# P3 = -5e4
# P4 = -5e4
# P5 = -5e4
# P6 = -5e4
ss.point_load(node_id=4, Fy=np.sum([P1,P2,P3]))
ss.point_load(node_id=5, Fy=np.sum([P4,P5,P6]))
ss.solve()
# ss.show_structure()
# ss.show_displacement(factor=10)
K = ss.get_node_results_system(node_id=4)['uy']
return np.array(K)
|
[
"numpy.array",
"anastruct.SystemElements",
"numpy.sum"
] |
[((333, 349), 'anastruct.SystemElements', 'SystemElements', ([], {}), '()\n', (347, 349), False, 'from anastruct import SystemElements\n'), ((2828, 2839), 'numpy.array', 'np.array', (['K'], {}), '(K)\n', (2836, 2839), True, 'import numpy as np\n'), ((2989, 3005), 'anastruct.SystemElements', 'SystemElements', ([], {}), '()\n', (3003, 3005), False, 'from anastruct import SystemElements\n'), ((4156, 4167), 'numpy.array', 'np.array', (['K'], {}), '(K)\n', (4164, 4167), True, 'import numpy as np\n'), ((3900, 3920), 'numpy.sum', 'np.sum', (['[P1, P2, P3]'], {}), '([P1, P2, P3])\n', (3906, 3920), True, 'import numpy as np\n'), ((3956, 3976), 'numpy.sum', 'np.sum', (['[P4, P5, P6]'], {}), '([P4, P5, P6])\n', (3962, 3976), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
from sklearn import datasets
from sklearn.model_selection import train_test_split
test_size = 0.25
def sampling(**kwargs):
if kwargs['dataset'] == 'moons':
X, y = datasets.make_moons(n_samples=kwargs['sample_size'],
noise=kwargs['noise'],
random_state=5)
return train_test_split(X,
y.astype(str),
test_size=kwargs['test_size'],
random_state=5), X, y
elif kwargs['dataset'] == 'circles':
X, y = datasets.make_circles(n_samples=kwargs['sample_size'],
noise=kwargs['noise'],
factor=0.5,
random_state=1)
return train_test_split(X,
y.astype(str),
test_size=kwargs['test_size'],
random_state=5), X, y
elif kwargs['dataset'] == 'LS':
X, y = datasets.make_classification(n_samples=kwargs['sample_size'],
n_features=2,
n_redundant=0,
n_informative=2,
random_state=2,
n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += kwargs['noise'] * rng.uniform(size=X.shape)
return train_test_split(X,
y.astype(str),
test_size=kwargs['test_size'],
random_state=5), X, y
else:
return ValueError('error!')
def df_split(**kwargs):
_df = kwargs['df']
return train_test_split(
_df[['x', 'y']].to_numpy(),
_df['c'].to_numpy().astype(str),
test_size=kwargs['test_size'],
random_state=5), _df[['x', 'y']].to_numpy(), _df['c'].to_numpy()
def data_split(**kwargs):
return train_test_split(kwargs['X'],
kwargs['y'].astype(str),
test_size=kwargs['test_size'],
random_state=5), kwargs['X'], kwargs['y']
|
[
"sklearn.datasets.make_circles",
"sklearn.datasets.make_classification",
"numpy.random.RandomState",
"sklearn.datasets.make_moons"
] |
[((217, 312), 'sklearn.datasets.make_moons', 'datasets.make_moons', ([], {'n_samples': "kwargs['sample_size']", 'noise': "kwargs['noise']", 'random_state': '(5)'}), "(n_samples=kwargs['sample_size'], noise=kwargs['noise'],\n random_state=5)\n", (236, 312), False, 'from sklearn import datasets\n'), ((636, 746), 'sklearn.datasets.make_circles', 'datasets.make_circles', ([], {'n_samples': "kwargs['sample_size']", 'noise': "kwargs['noise']", 'factor': '(0.5)', 'random_state': '(1)'}), "(n_samples=kwargs['sample_size'], noise=kwargs['noise'\n ], factor=0.5, random_state=1)\n", (657, 746), False, 'from sklearn import datasets\n'), ((1104, 1255), 'sklearn.datasets.make_classification', 'datasets.make_classification', ([], {'n_samples': "kwargs['sample_size']", 'n_features': '(2)', 'n_redundant': '(0)', 'n_informative': '(2)', 'random_state': '(2)', 'n_clusters_per_class': '(1)'}), "(n_samples=kwargs['sample_size'], n_features=2,\n n_redundant=0, n_informative=2, random_state=2, n_clusters_per_class=1)\n", (1132, 1255), False, 'from sklearn import datasets\n'), ((1487, 1511), 'numpy.random.RandomState', 'np.random.RandomState', (['(2)'], {}), '(2)\n', (1508, 1511), True, 'import numpy as np\n')]
|
import numpy as np
def get_predecessor(T,P):
# copy the inputs
T = np.copy(T)
P = np.copy(P)
P_size = P.shape[0]
T_size = T.shape[0]
adj = np.zeros((P_size + T_size,P_size + T_size))
# predecessor for Text
for i in range(1,T_size):
adj[i, i-1] = 1
# predecessor for Pattern
for i in range(1,P_size):
adj[T_size+i, T_size+i-1] = 1
return adj
def get_graph_struct(T, P, h_i, h_j, h_s):
# copy the inputs
T = np.copy(T)
P = np.copy(P)
P_size = P.shape[0]
T_size = T.shape[0]
adj = np.zeros((P_size + T_size,P_size + T_size))
for i in range(h_s+1, h_i):
adj[i, h_i] = 1
adj[T_size, T_size + h_j] = 1
for i in range(T_size):
adj[i, T_size+h_j] = 1
for i in range(P_size):
adj[i+T_size, h_i] = 1
return adj
def get_seq_mat(T,P):
n = T.shape[0]
m = P.shape[0]
mat = np.eye((n+m))
# connect each character to its previous
for i in range(1,n+m):
if i == n:
# don't do it for the start of the pattern
continue
mat[i, i-1] = 1
# connect each character in text to its equal charcter in the pattern
for i in range(n):
for j in range(m):
if T[i] == P[j]:
mat[i, j+n] = 1
mat[j+n, i] = 1
# connect the start of the pattern with all character upfront
mat[n, n+1:] = 1
return mat
def get_t(T, P, s):
i = s
j = 0
N = T.shape[0]
M = P.shape[0]
while i < N:
if T[i] != P[j]:
return i
j +=1
i +=1
if j >= M:
return i
return N - 1
def get_bipartite_mat(T, P, s, num_classes=3):
'''
args
-----------------------------
T: the text
P: the pattern
s: current hint s
returns
-----------------------------
mat: constructed mat as the following:
1- all irrelevant edges will have a value of 0
2- relevant edges will have a value of 1 if they are equal,
otherwise they will have a value of 2
'''
# length of the text
N = T.shape[0]
# length of the pattern
M = P.shape[0]
mat = np.zeros((N+M, N+M), dtype=np.int)
t = get_t(T, P, s)
for i in range(M):
p_char = P[i]
for j in range(s,t):
t_char = T[j]
if t_char == p_char:
mat[j, i+N] = 1
mat[i+N, j] = 1
else:
mat[j, i+N] = 2
mat[i+N, j] = 2
one_hot_mat = np.zeros((N+M, N+M, num_classes), dtype=np.int)
for i in range(len(mat)):
for j in range(len(mat[0])):
class_id = mat[i, j]
one_hot_mat[i, j, class_id] = 1
return one_hot_mat
#=== *** ===#
def get_everything_matched_to_this_point(T, P, s):
'''
return a binary mask for the pattern
'''
result = np.zeros(T.shape[0] + P.shape[0],dtype=np.int)
i = s
j = 0
while j < P.shape[0]:
if T[i] == P[j]:
result[T.shape[0]+j] = 1
i+=1
j+=1
else:
break
return result
def get_bipartite_mat_from_pattern_to_text(T, P, s):
# length of the text
N = T.shape[0]
# length of the pattern
M = P.shape[0]
mat = np.zeros((N+M, N+M), dtype=np.int)
for i in range(M):
p_char = P[i]
for j in range(s,N):
t_char = T[j]
if t_char == p_char:
mat[j, i+N] = 1
mat[i+N, j] = 1
else:
mat[j, i+N] = 2
mat[i+N, j] = 2
def get_seq_mat_i_j(T, P , i ,j, s):
n = T.shape[0]
m = P.shape[0]
mat = np.zeros((n+m, n+m))
# connect each character to its previous
# for i in range(1,n+m):
# if i == n:
# # don't do it for the start of the pattern
# continue
# mat[i, i-1] = 1
# connect node i with node j
mat[i, j+n] = 1
mat[j+n, i] = 1
# connect node s with i
mat[s, i] = 1
mat[i,s] = 1
# connect first node in P with node
mat[n,n+j] = 1
return mat
def get_edge_mat(T, P, start, end):
'''
edge between start and end
'''
mat = np.zeros((n+m,n+m))
mat[start, end] = 1
return mat
|
[
"numpy.eye",
"numpy.zeros",
"numpy.copy"
] |
[((72, 82), 'numpy.copy', 'np.copy', (['T'], {}), '(T)\n', (79, 82), True, 'import numpy as np\n'), ((89, 99), 'numpy.copy', 'np.copy', (['P'], {}), '(P)\n', (96, 99), True, 'import numpy as np\n'), ((154, 198), 'numpy.zeros', 'np.zeros', (['(P_size + T_size, P_size + T_size)'], {}), '((P_size + T_size, P_size + T_size))\n', (162, 198), True, 'import numpy as np\n'), ((451, 461), 'numpy.copy', 'np.copy', (['T'], {}), '(T)\n', (458, 461), True, 'import numpy as np\n'), ((468, 478), 'numpy.copy', 'np.copy', (['P'], {}), '(P)\n', (475, 478), True, 'import numpy as np\n'), ((533, 577), 'numpy.zeros', 'np.zeros', (['(P_size + T_size, P_size + T_size)'], {}), '((P_size + T_size, P_size + T_size))\n', (541, 577), True, 'import numpy as np\n'), ((864, 877), 'numpy.eye', 'np.eye', (['(n + m)'], {}), '(n + m)\n', (870, 877), True, 'import numpy as np\n'), ((2028, 2066), 'numpy.zeros', 'np.zeros', (['(N + M, N + M)'], {'dtype': 'np.int'}), '((N + M, N + M), dtype=np.int)\n', (2036, 2066), True, 'import numpy as np\n'), ((2325, 2376), 'numpy.zeros', 'np.zeros', (['(N + M, N + M, num_classes)'], {'dtype': 'np.int'}), '((N + M, N + M, num_classes), dtype=np.int)\n', (2333, 2376), True, 'import numpy as np\n'), ((2653, 2700), 'numpy.zeros', 'np.zeros', (['(T.shape[0] + P.shape[0])'], {'dtype': 'np.int'}), '(T.shape[0] + P.shape[0], dtype=np.int)\n', (2661, 2700), True, 'import numpy as np\n'), ((2998, 3036), 'numpy.zeros', 'np.zeros', (['(N + M, N + M)'], {'dtype': 'np.int'}), '((N + M, N + M), dtype=np.int)\n', (3006, 3036), True, 'import numpy as np\n'), ((3340, 3364), 'numpy.zeros', 'np.zeros', (['(n + m, n + m)'], {}), '((n + m, n + m))\n', (3348, 3364), True, 'import numpy as np\n'), ((3823, 3847), 'numpy.zeros', 'np.zeros', (['(n + m, n + m)'], {}), '((n + m, n + m))\n', (3831, 3847), True, 'import numpy as np\n')]
|
from __future__ import absolute_import, print_function
import os
import numpy as np
from subprocess import Popen, PIPE
from Bio.PDB.Polypeptide import aa1 as AA_STANDARD
from ....featuresComputer import FeatureComputerException
from ...seqToolManager import SeqToolManager
from .al2coWorkers.parsePsiBlast import parsePsiBlast
from utils import myMakeDir, tryToRemove
class Al2coManager(SeqToolManager):
'''
Computes al2co and processes their outputs. Extends class seqToolManager
'''
VAR_LIST= ["al2coScore", "al2coScoreNorm"]
BAD_SCORE_CONSERVATION = "-1048576" #Something went wrong tag
def __init__(self, computedFeatsRootDir, winSize=None, statusManager=None):
'''
:param computedFeatsRootDir: str. root path where results will be saved
:param winSize: int>=1 or None. The size of the windows for sliding window if desired
:param statusManager: class that implements .setStatus(msg) to communicate
'''
SeqToolManager.__init__(self, computedFeatsRootDir, winSize)
self.al2coOutPath= myMakeDir(self.computedFeatsRootDir,"al2co")
if winSize:
self.al2coPathWindowed= myMakeDir(self.computedFeatsRootDir,"al2co_wSize"+str(winSize))
else:
self.al2coPathWindowed= None
def getFinalPath(self):
'''
returns path where results are saved
:return al2coOutPath: str
'''
return self.al2coOutPath
def getFNames(self, prefixExtended):
'''
Returns a dict that contains the fnames that will be used by al2co
:param prefixExtended. prefix for output fnames.
:return list of fnames: [ fname1, fnam2, ...]
'''
al2coProc= os.path.join(self.al2coOutPath, prefixExtended+".al2co.gz")
fNames=[al2coProc]
if not self.winSize is None:
al2coWindowedOutName= os.path.join(self.al2coPathWindowed, prefixExtended+".wsize"+str(self.winSize)+".al2co.gz")
fNames+= [al2coWindowedOutName]
return fNames
def computeFromSeqStructMapper(self, seqStructMap, prefixExtended, psiblastOutName, pssmOutNameRaw):
'''
Computes al2co for the sequence seqStr, that is contained at fastaInFname. This sequence is
associated with prefixExtended as an unambiguous id
:param seqStructMap: computeFeatures.seqStep.seqToolManagers.seqExtraction.SeqStructMapper
:param prefixExtended: str. unambiguous id of the sequence that will be the prefix of output names
:param psiblastOutName: str. Path to psiblast aligments results
:param pssmOutNameRaw: str. Path to psiblast pssms results
'''
msaFname= None
prefix, chainType, chainId= self.splitExtendedPrefix(prefixExtended)[:3]
seqStr, fastaFname= seqStructMap.getSeq(chainType, chainId) # repeat as psiBlastManager can modify seqs
seqStructMap.setCurrentSeq(seqStr, chainType, chainId)
if self.checkAlreayComputed(prefixExtended):
print("Al2co already computed for %s"%prefixExtended)
return 0
fNames= self.getFNames(prefixExtended)
print("launching al2co over %s"%prefixExtended)
al2coProcName= fNames[0]
al2coRawName= os.path.join(self.al2coOutPath, prefixExtended+".fasta.csv")
try:
if os.path.isfile(psiblastOutName):
alignedSeqsDict= parsePsiBlast( inputSeq=seqStr, psiBlastOut=psiblastOutName)
filteredSeqsFname= self.runCdHit(alignedSeqsDict, inputSeq=seqStr, psiBlastOut=psiblastOutName)
msaFname= self.runClustalW(filteredSeqsFname, psiBlastOut=psiblastOutName)
cmd= [self.al2coBin, "-i", msaFname,"-m", "0", "-f", "2", "-a", "F", "-b", "50",
"-g", "0.50", "-w", "1", "-c", "0", "-o", al2coRawName, "-t", al2coProcName]
print(" ".join(cmd))
process= Popen(cmd, stdout=PIPE, stderr=PIPE)
processOut= process.communicate()
if len(processOut[1])>0:
print("Error computing al2co. Caught stdin/stderr:\n",processOut[0],processOut[1])
else:
print("Error computing al2co. Psiout does not exists for %s"%(prefixExtended))
al2coRawName=None
dataList= self.processAl2co(seqStr, seqStructMap, prefixExtended, al2coRawName, al2coProcName)
if self.winSize:
self.makeWindowed( dataList, ["al2co", "al2coNorm"], [Al2coManager.BAD_SCORE_CONSERVATION]*2, [None]*2,
fNames[1])
except (Exception, KeyboardInterrupt):
self.tryToRemoveAllFnames(prefixExtended)
raise
finally:
if msaFname: tryToRemove(msaFname)
def processAl2co(self, seq, seqStructMap, prefixExtended, al2coRaw, al2coProc):
'''
Reads al2co output file and writes another one with tabulated format, headers and
some error checking.
:param: seq: str. Sequence of the chain
:param prefixExtended: str. unambiguous id of the sequence that will be the prefix of output names
:param al2coRaw: str. Path to al2co results
:param al2coProc: str. Path where formatted results will be saved.
'''
if al2coRaw is None:
conserData = [(letter, Al2coManager.BAD_SCORE_CONSERVATION) for letter in seq]
else:
try:
conserData = self.loadRawAl2co(al2coRaw)
except IOError:
conserData= [ (letter, Al2coManager.BAD_SCORE_CONSERVATION) for letter in seq]
prefix, chainType, chainId= self.splitExtendedPrefix(prefixExtended)[:3]
# print(len(conserData)); raw_input("enter")
try:
alcoIx=0
seqIx=0
seqLen= len(seq)
letters, conserVals = zip(* conserData)
conserVals= [float(elem) for elem in conserVals]
alcoLen= len(conserData)
dataList=[]
listOfRowsToPrint=[]
mean_val= np.mean(conserVals)
std_val= np.std(conserVals)
while seqIx<seqLen and alcoIx<alcoLen:
letter= seq[seqIx]
letterAl2co, consVal= conserData[alcoIx]
if letterAl2co== letter or (letterAl2co=="-" and letter=="X"):
structIndex= seqStructMap.seqToStructIndex(chainType, chainId, seqIx, asString= True)
# print(seqIx, letter, alcoIx, structIndex)
if structIndex:
if self.filterOutLabels and structIndex[-1].isalpha():
continue
else:
structIndex=str(seqIx)+"?"
if std_val!=0:
consValNormalized= (float(consVal)- mean_val)/std_val
else:
consValNormalized=float(consVal)
dataList.append( ( (chainId, structIndex,letter), ( [consVal], [str(consValNormalized)],) ) )
listOfRowsToPrint.append( "%s %s %s %s %s"%( chainId, structIndex, letter, consVal, consValNormalized) )
alcoIx+=1
seqIx+=1
elif not letter in AA_STANDARD and letterAl2co=="-":
alcoIx+=1
seqIx+=1
elif letterAl2co=="-":
alcoIx+=1
else:
print(conserData)
print(alcoIx, seqIx)
raise ValueError("Al2co mismatch %s %s "%(letterAl2co, letter))
# print(len(listOfRowsToPrint)); raw_input("enter to continue")
self.writeResultsFromDataDictSingleChain( {chainId: listOfRowsToPrint }, outName= al2coProc)
return dataList
except (KeyboardInterrupt, Exception):
print("Exception happend computing %s"%al2coProc)
tryToRemove(al2coProc)
raise
finally:
if al2coRaw is not None:
tryToRemove(al2coRaw)
pass
def loadRawAl2co(self, filename):
'''
Loads an al2co file
:param fname: str. Path to al2co file.
:return list of strings. ["row0_Al2co","row1Al2co"...]
'''
conserv= []
for line in open(filename):
lineArray=line.split()
if lineArray[0][0].isdigit():
conserv.append(lineArray[1:3])
else:
break
return conserv
def runCdHit(self, allHits, inputSeq, psiBlastOut, pairSeqIdThr=0.95):
tmpName= os.path.basename(psiBlastOut).split(".")[0]
tmpName= os.path.join(self.tmp, tmpName)
cdhitInName= tmpName+".in-cdhit"
cdhitOutName= tmpName+".out-cdhit"
try:
with open(cdhitInName, "w") as f:
for hit in allHits:
f.write("> %s\n"%(hit["target_full_id"]))
f.write("%s\n"%(hit["targetSeq"].replace("-","")) )
if(pairSeqIdThr > .70 and pairSeqIdThr <= 1.00): n=5
elif (pairSeqIdThr <= .70 and pairSeqIdThr >= .55): n=4
elif (pairSeqIdThr < .55 and pairSeqIdThr >= .50): n=3
elif (pairSeqIdThr < .50 and pairSeqIdThr >= .40): n=2
else: raise ValueError("Error, just .4<=pairSeqIdThr<=1.00 allowed")
cdhitCmd= [self.cdHitBin, "-i", cdhitInName, "-o", cdhitOutName, "-n", str(n),
"-c", str(pairSeqIdThr), "-T", str(self.psiBlastNThrs)]
print(" ".join(cdhitCmd))
proc = Popen(cdhitCmd, stdin= PIPE, stdout=PIPE, stderr=PIPE)
output= proc.communicate()
if output== None or output[1]!="" or "There was an error cd-hit psiblast" in output[0]:
print(output)
print ("Error when parsing %s for al2Co"%psiBlastOut)
raise FeatureComputerException("Error when cd-hit %s for al2Co"%psiBlastOut)
with open(cdhitOutName, "r+") as f:
fileData = f.read()
f.seek(0, 0)
f.write("> InputSeq\n")
f.write("%s\n"%(inputSeq.replace("-","")) )
f.write(fileData+"\n")
return cdhitOutName
except (Exception, KeyboardInterrupt):
tryToRemove(cdhitOutName)
raise
finally:
tryToRemove(cdhitInName)
def runClustalW(self, filteredSeqsFname, psiBlastOut, clustalWOutName=None):
tmpFnameCommon= ".".join(filteredSeqsFname.split(".")[:-1])
if clustalWOutName is None:
clustalWOutName= tmpFnameCommon+".clustalw"
clustalCommand=[self.clustalW, "-infile=%s"%filteredSeqsFname, "-outfile=%s"%clustalWOutName, "-outorder=INPUT"]
print(" ".join(clustalCommand))
try :
proc = Popen(clustalCommand, stdin= PIPE, stdout=PIPE, stderr=PIPE)
output= proc.communicate()
if output== None or output[1]!="" or "There was an error parsing psiblast, clustalw" in output[0]:
print(output)
print ("Error when clustalw %s for al2Co"%psiBlastOut)
raise FeatureComputerException("Error when clustalw %s for al2Co"%psiBlastOut)
return clustalWOutName
except (Exception, KeyboardInterrupt):
tryToRemove(clustalWOutName)
raise
finally:
tryToRemove(filteredSeqsFname)
tryToRemove(filteredSeqsFname+".clstr")
tryToRemove( tmpFnameCommon+".dnd")
|
[
"subprocess.Popen",
"os.path.basename",
"numpy.std",
"os.path.isfile",
"numpy.mean",
"utils.tryToRemove",
"os.path.join",
"utils.myMakeDir"
] |
[((1043, 1088), 'utils.myMakeDir', 'myMakeDir', (['self.computedFeatsRootDir', '"""al2co"""'], {}), "(self.computedFeatsRootDir, 'al2co')\n", (1052, 1088), False, 'from utils import myMakeDir, tryToRemove\n'), ((1654, 1715), 'os.path.join', 'os.path.join', (['self.al2coOutPath', "(prefixExtended + '.al2co.gz')"], {}), "(self.al2coOutPath, prefixExtended + '.al2co.gz')\n", (1666, 1715), False, 'import os\n'), ((3104, 3166), 'os.path.join', 'os.path.join', (['self.al2coOutPath', "(prefixExtended + '.fasta.csv')"], {}), "(self.al2coOutPath, prefixExtended + '.fasta.csv')\n", (3116, 3166), False, 'import os\n'), ((7860, 7891), 'os.path.join', 'os.path.join', (['self.tmp', 'tmpName'], {}), '(self.tmp, tmpName)\n', (7872, 7891), False, 'import os\n'), ((3183, 3214), 'os.path.isfile', 'os.path.isfile', (['psiblastOutName'], {}), '(psiblastOutName)\n', (3197, 3214), False, 'import os\n'), ((5638, 5657), 'numpy.mean', 'np.mean', (['conserVals'], {}), '(conserVals)\n', (5645, 5657), True, 'import numpy as np\n'), ((5673, 5691), 'numpy.std', 'np.std', (['conserVals'], {}), '(conserVals)\n', (5679, 5691), True, 'import numpy as np\n'), ((8689, 8742), 'subprocess.Popen', 'Popen', (['cdhitCmd'], {'stdin': 'PIPE', 'stdout': 'PIPE', 'stderr': 'PIPE'}), '(cdhitCmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n', (8694, 8742), False, 'from subprocess import Popen, PIPE\n'), ((9388, 9412), 'utils.tryToRemove', 'tryToRemove', (['cdhitInName'], {}), '(cdhitInName)\n', (9399, 9412), False, 'from utils import myMakeDir, tryToRemove\n'), ((9821, 9880), 'subprocess.Popen', 'Popen', (['clustalCommand'], {'stdin': 'PIPE', 'stdout': 'PIPE', 'stderr': 'PIPE'}), '(clustalCommand, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n', (9826, 9880), False, 'from subprocess import Popen, PIPE\n'), ((10331, 10361), 'utils.tryToRemove', 'tryToRemove', (['filteredSeqsFname'], {}), '(filteredSeqsFname)\n', (10342, 10361), False, 'from utils import myMakeDir, tryToRemove\n'), ((10368, 10409), 'utils.tryToRemove', 'tryToRemove', (["(filteredSeqsFname + '.clstr')"], {}), "(filteredSeqsFname + '.clstr')\n", (10379, 10409), False, 'from utils import myMakeDir, tryToRemove\n'), ((10414, 10450), 'utils.tryToRemove', 'tryToRemove', (["(tmpFnameCommon + '.dnd')"], {}), "(tmpFnameCommon + '.dnd')\n", (10425, 10450), False, 'from utils import myMakeDir, tryToRemove\n'), ((3718, 3754), 'subprocess.Popen', 'Popen', (['cmd'], {'stdout': 'PIPE', 'stderr': 'PIPE'}), '(cmd, stdout=PIPE, stderr=PIPE)\n', (3723, 3754), False, 'from subprocess import Popen, PIPE\n'), ((4459, 4480), 'utils.tryToRemove', 'tryToRemove', (['msaFname'], {}), '(msaFname)\n', (4470, 4480), False, 'from utils import myMakeDir, tryToRemove\n'), ((7206, 7228), 'utils.tryToRemove', 'tryToRemove', (['al2coProc'], {}), '(al2coProc)\n', (7217, 7228), False, 'from utils import myMakeDir, tryToRemove\n'), ((7293, 7314), 'utils.tryToRemove', 'tryToRemove', (['al2coRaw'], {}), '(al2coRaw)\n', (7304, 7314), False, 'from utils import myMakeDir, tryToRemove\n'), ((9331, 9356), 'utils.tryToRemove', 'tryToRemove', (['cdhitOutName'], {}), '(cdhitOutName)\n', (9342, 9356), False, 'from utils import myMakeDir, tryToRemove\n'), ((10271, 10299), 'utils.tryToRemove', 'tryToRemove', (['clustalWOutName'], {}), '(clustalWOutName)\n', (10282, 10299), False, 'from utils import myMakeDir, tryToRemove\n'), ((7803, 7832), 'os.path.basename', 'os.path.basename', (['psiBlastOut'], {}), '(psiBlastOut)\n', (7819, 7832), False, 'import os\n')]
|
import os
import numpy as np
import cv2
from glob import glob
import tensorflow as tf
from sklearn.model_selection import train_test_split
def load_data(path, split=0.1):
images = sorted(glob(os.path.join(path, "images/*")))
masks = sorted(glob(os.path.join(path, "masks/*")))
total_size = len(images)
valid_size = int(split * total_size)
test_size = int(split * total_size)
train_x, valid_x = train_test_split(images, test_size=valid_size, random_state=42)
train_y, valid_y = train_test_split(masks, test_size=valid_size, random_state=42)
train_x, test_x = train_test_split(train_x, test_size=test_size, random_state=42)
train_y, test_y = train_test_split(train_y, test_size=test_size, random_state=42)
return (train_x, train_y), (valid_x, valid_y), (test_x, test_y)
def read_image(path):
path = path.decode()
x = cv2.imread(path, cv2.IMREAD_COLOR)
x = cv2.resize(x, (256, 256))
x = x/255.0
return x
def read_mask(path):
path = path.decode()
x = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
x = cv2.resize(x, (256, 256))
x = x/255.0
x = np.expand_dims(x, axis=-1)
return x
def tf_parse(x, y):
def _parse(x, y):
x = read_image(x)
y = read_mask(y)
return x, y
x, y = tf.numpy_function(_parse, [x, y], [tf.float64, tf.float64])
x.set_shape([256, 256, 3])
y.set_shape([256, 256, 1])
return x, y
def tf_dataset(x, y, batch=8):
dataset = tf.data.Dataset.from_tensor_slices((x, y))
dataset = dataset.map(tf_parse)
dataset = dataset.batch(batch)
dataset = dataset.repeat()
return dataset
|
[
"sklearn.model_selection.train_test_split",
"numpy.expand_dims",
"tensorflow.data.Dataset.from_tensor_slices",
"cv2.imread",
"tensorflow.numpy_function",
"os.path.join",
"cv2.resize"
] |
[((422, 485), 'sklearn.model_selection.train_test_split', 'train_test_split', (['images'], {'test_size': 'valid_size', 'random_state': '(42)'}), '(images, test_size=valid_size, random_state=42)\n', (438, 485), False, 'from sklearn.model_selection import train_test_split\n'), ((509, 571), 'sklearn.model_selection.train_test_split', 'train_test_split', (['masks'], {'test_size': 'valid_size', 'random_state': '(42)'}), '(masks, test_size=valid_size, random_state=42)\n', (525, 571), False, 'from sklearn.model_selection import train_test_split\n'), ((595, 658), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train_x'], {'test_size': 'test_size', 'random_state': '(42)'}), '(train_x, test_size=test_size, random_state=42)\n', (611, 658), False, 'from sklearn.model_selection import train_test_split\n'), ((681, 744), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train_y'], {'test_size': 'test_size', 'random_state': '(42)'}), '(train_y, test_size=test_size, random_state=42)\n', (697, 744), False, 'from sklearn.model_selection import train_test_split\n'), ((870, 904), 'cv2.imread', 'cv2.imread', (['path', 'cv2.IMREAD_COLOR'], {}), '(path, cv2.IMREAD_COLOR)\n', (880, 904), False, 'import cv2\n'), ((913, 938), 'cv2.resize', 'cv2.resize', (['x', '(256, 256)'], {}), '(x, (256, 256))\n', (923, 938), False, 'import cv2\n'), ((1023, 1061), 'cv2.imread', 'cv2.imread', (['path', 'cv2.IMREAD_GRAYSCALE'], {}), '(path, cv2.IMREAD_GRAYSCALE)\n', (1033, 1061), False, 'import cv2\n'), ((1070, 1095), 'cv2.resize', 'cv2.resize', (['x', '(256, 256)'], {}), '(x, (256, 256))\n', (1080, 1095), False, 'import cv2\n'), ((1120, 1146), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (1134, 1146), True, 'import numpy as np\n'), ((1286, 1345), 'tensorflow.numpy_function', 'tf.numpy_function', (['_parse', '[x, y]', '[tf.float64, tf.float64]'], {}), '(_parse, [x, y], [tf.float64, tf.float64])\n', (1303, 1345), True, 'import tensorflow as tf\n'), ((1470, 1512), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x, y)'], {}), '((x, y))\n', (1504, 1512), True, 'import tensorflow as tf\n'), ((198, 228), 'os.path.join', 'os.path.join', (['path', '"""images/*"""'], {}), "(path, 'images/*')\n", (210, 228), False, 'import os\n'), ((255, 284), 'os.path.join', 'os.path.join', (['path', '"""masks/*"""'], {}), "(path, 'masks/*')\n", (267, 284), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
import numpy as np
from scipy.integrate import quad
from pmutt import _ModelBase
from pmutt import constants as c
from pmutt.io.json import remove_class
class HarmonicVib(_ModelBase):
"""Vibrational modes using the harmonic approximation. Equations used
sourced from:
- <NAME>. An Introduction to Applied Statistical Thermodynamics;
<NAME> & Sons, 2010.
Attributes
----------
vib_wavenumbers : list of float
Vibrational wavenumbers (:math:`\\tilde{\\nu}`) in 1/cm
imaginary_substitute : float, optional
If this value is set, imaginary frequencies are substituted with
this value for calculations. Otherwise, imaginary frequencies are
ignored. Default is None
"""
def __init__(self, vib_wavenumbers=[], imaginary_substitute=None):
self.imaginary_substitute = imaginary_substitute
self.vib_wavenumbers = np.array(vib_wavenumbers)
@property
def vib_wavenumbers(self):
return self._vib_wavenumbers
@vib_wavenumbers.setter
def vib_wavenumbers(self, val):
self._vib_wavenumbers = val
self._valid_vib_wavenumbers = _get_valid_vib_wavenumbers(
wavenumbers=val, substitute=self.imaginary_substitute)
self._valid_vib_temperatures = c.wavenumber_to_temp(
self._valid_vib_wavenumbers)
def get_q(self, T, include_ZPE=True):
"""Calculates the partition function
:math:`q^{vib}=\\prod_i \\frac{\\exp({-\\frac{\\Theta_{V,i}}{2T}})}
{1-\\exp({-\\frac{\\Theta_{V,i}}{T}})}` if include_ZPE = True
:math:`q^{vib}=\\prod_i \\frac{1}
{1-\\exp({-\\frac{\\Theta_{V,i}}{T}})}` if include_ZPE = False
Parameters
----------
T : float
Temperature in K
include_ZPE : bool, optional
If True, includes the zero-point energy term
Returns
-------
q_vib : float
Vibrational partition function
"""
vib_dimless = self._valid_vib_temperatures / T
if include_ZPE:
qs = np.array(
np.exp(-vib_dimless / 2.) / (1. - np.exp(-vib_dimless)))
else:
qs = np.array(1. / (1. - np.exp(-vib_dimless)))
return np.prod(qs)
def get_CvoR(self, T):
"""Calculates the dimensionless heat capacity at constant volume
:math:`\\frac{C_V^{vib}}{R}=\\sum_i \\bigg(\\frac{\\Theta_{V,i}}{2T}
\\bigg)^2 \\frac{1}{\\big(\\sinh{\\frac{\\Theta_{V,i}}{2T}}\\big)^2}`
Parameters
----------
T : float
Temperature in K
Returns
-------
CvoR_vib : float
Vibrational dimensionless heat capacity at constant volume
"""
vib_dimless = self._valid_vib_temperatures / T
CvoRs = np.array([
(0.5 * vib_dimless)**2 * (1. / np.sinh(vib_dimless / 2.))**2
])
return np.sum(CvoRs)
def get_CpoR(self, T):
"""Calculates the dimensionless heat capacity at constant pressure
:math:`\\frac{C_P^{vib}}{R}=\\frac{C_V^{vib}}{R}=\\sum_i \\bigg(\\frac{
\\Theta_{V,i}}{2T}\\bigg)^2 \\frac{1}{\\big(\\sinh{\\frac{\\Theta_{V,i}}
{2T}}\\big)^2}`
Parameters
----------
T : float
Temperature in K
Returns
-------
CpoR_vib : float
Vibrational dimensionless heat capacity at constant pressure
"""
return self.get_CvoR(T=T)
def get_ZPE(self):
"""Calculates the zero point energy
:math:`ZPE=\\frac{1}{2}k_b\\sum_i \\Theta_{V,i}`
Returns
-------
zpe : float
Zero point energy in eV
"""
return 0.5 * c.kb('eV/K') * np.sum(self._valid_vib_temperatures)
def get_UoRT(self, T):
"""Calculates the dimensionless internal energy
:math:`\\frac{U^{vib}}{RT}=\\sum_i \\bigg(\\frac{\\Theta_{V,i}}{2T}+
\\frac{\\Theta_{V,i}}{T}\\frac{\\exp\\big(-\\frac{\\Theta_{V,i}}{T}
\\big)}{1-\\exp\\big(-\\frac{\\Theta_{V_i}}{T}\\big)}\\bigg)`
Parameters
----------
T : float
Temperature in K
Returns
-------
UoRT_vib : float
Vibrational dimensionless internal energy
"""
vib_dimless = self._valid_vib_temperatures / T
UoRT = np.array([
vib_dimless / 2. + vib_dimless * np.exp(-vib_dimless) /
(1. - np.exp(-vib_dimless))
])
return np.sum(UoRT)
def get_HoRT(self, T):
"""Calculates the dimensionless enthalpy
:math:`\\frac{H^{vib}}{RT}=\\frac{U^{vib}}{RT}=\\sum_i \\bigg(\\frac{
\\Theta_{V,i}}{2T}+\\frac{\\Theta_{V,i}}{T}\\frac{\\exp\\big(-\\frac{
\\Theta_{V,i}}{T}\\big)}{1-\\exp\\big(-\\frac{\\Theta_{V_i}}{T}\\big)}
\\bigg)`
Parameters
----------
T : float
Temperature in K
Returns
-------
HoRT_vib : float
Vibrational dimensionless enthalpy
"""
return self.get_UoRT(T=T)
def get_SoR(self, T):
"""Calculates the dimensionless entropy
:math:`\\frac{S^{vib}}{R}=\\sum_i \\frac{\\Theta_{V,i}}{T}\\frac{\\exp
\\big(-\\frac{\\Theta_{V,i}}{T}\\big)}{1-\\exp\\big(-\\frac{
\\Theta_{V,i}}{T}\\big)}-\\ln \\bigg(1-\\exp\\big(-\\frac{
\\Theta_{V,i}}{T}\\big)\\bigg)`
Parameters
----------
T : float
Temperature in K
Returns
-------
SoR_vib : float
Vibrational dimensionless entropy
"""
vib_dimless = self._valid_vib_temperatures / T
return np.sum([
vib_dimless * np.exp(-vib_dimless) / (1. - np.exp(-vib_dimless)) -
np.log(1. - np.exp(-vib_dimless))
])
def get_FoRT(self, T):
"""Calculates the dimensionless Helmholtz energy
:math:`\\frac{A^{vib}}{RT}=\\frac{U^{vib}}{RT}-\\frac{S^{vib}}{R}`
Parameters
----------
T : float
Temperature in K
Returns
-------
FoRT_vib : float
Vibrational dimensionless Helmholtz energy
"""
return self.get_UoRT(T=T) - self.get_SoR(T=T)
def get_GoRT(self, T):
"""Calculates the dimensionless Gibbs energy
:math:`\\frac{G^{vib}}{RT}=\\frac{H^{vib}}{RT}-\\frac{S^{vib}}{R}`
Parameters
----------
T : float
Temperature in K
Returns
-------
GoRT_vib : float
Vibrational dimensionless Gibbs energy
"""
return self.get_HoRT(T=T) - self.get_SoR(T=T)
def to_dict(self):
"""Represents object as dictionary with JSON-accepted datatypes
Returns
-------
obj_dict : dict
"""
return {
'class': str(self.__class__),
'vib_wavenumbers': list(self.vib_wavenumbers),
'imaginary_substitute': self.imaginary_substitute
}
@classmethod
def from_dict(cls, json_obj):
"""Recreate an object from the JSON representation.
Parameters
----------
json_obj : dict
JSON representation
Returns
-------
HarmonicVib : HarmonicVib object
"""
json_obj = remove_class(json_obj)
return cls(**json_obj)
def print_calc_wavenumbers(self):
"""Prints the wavenumbers that will be used in a thermodynamic
calculation. If ``self.imaginary_substitute`` is a float, then
imaginary frequencies are replaced with that value. Otherwise,
imaginary frequencies are ignored."""
print(self._valid_vib_wavenumbers)
class QRRHOVib(_ModelBase):
"""Vibrational modes using the Quasi Rigid Rotor Harmonic Oscillator
approximation. Equations source from:
* <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.
Phys. Chem. C 2015, 119 (4), 1840–1850.
* <NAME>. - A Eur. J. 2012, 18 (32), 9955–9964.
Attributes
----------
vib_wavenumber : list of float
Vibrational wavenumbers (:math:`\\tilde{\\nu}`) in 1/cm
Bav : float, optional
Average molecular moment of inertia as a limiting value of small
wavenumbers. Default is 1.e-44 kg m2
v0 : float, optional
Wavenumber to scale vibrations. Default is 100 cm :sup:`-1`
alpha : int, optional
Power to raise ratio of wavenumbers. Default is 4
imaginary_substitute : float, optional
If this value is set, imaginary frequencies are substituted with
this value for calculations. Otherwise, imaginary frequencies are
ignored. Default is None
"""
def __init__(self,
vib_wavenumbers,
Bav=1.e-44,
v0=100.,
alpha=4,
imaginary_substitute=None):
self.Bav = Bav
self.v0 = v0
self.alpha = alpha
self.imaginary_substitute = imaginary_substitute
self.vib_wavenumbers = vib_wavenumbers
@property
def vib_wavenumbers(self):
return self._vib_wavenumbers
@vib_wavenumbers.setter
def vib_wavenumbers(self, val):
self._vib_wavenumbers = val
self._valid_vib_wavenumbers = _get_valid_vib_wavenumbers(
wavenumbers=val, substitute=self.imaginary_substitute)
self._valid_vib_temperatures = c.wavenumber_to_temp(
self._valid_vib_wavenumbers)
self._valid_scaled_wavenumbers = self._get_scaled_wavenumber()
self._valid_scaled_inertia = self._get_scaled_inertia()
def _get_scaled_wavenumber(self):
"""Calculates the scaled wavenumber determining mixture of RRHO to
add.
:math:`\\omega = \\frac {1}{1 + (\\frac{\\nu_0}{\\nu})^\\alpha}`
Returns
-------
scaled_wavenumber : float
Scaled wavenumber
"""
return 1. / (1. + (self.v0 / self._valid_vib_wavenumbers)**self.alpha)
def _get_scaled_inertia(self):
"""Calculates the scaled moment of inertia.
:math:`\\mu'=\\frac {\\mu B_{av}} {\\mu + B_{av}}`
Returns
-------
mu1 : float
Scaled moment of inertia in kg*m2
"""
mu = c.wavenumber_to_inertia(self._valid_vib_wavenumbers)
return mu * self.Bav / (mu + self.Bav)
def get_q(self):
"""Calculates the partition function
Returns
-------
q_vib : float
Vibrational partition function
"""
raise NotImplementedError()
def get_CvoR(self, T):
"""Calculates the dimensionless heat capacity at constant volume
:math:`\\frac {C_{v}^{qRRHO}}{R} = \\sum_{i}\\omega_i\\frac{C_{v,i}
^{RRHO}}{R} + \\frac{1}{2}(1-\\omega_i)`
:math:`\\frac{C_{v}^{RRHO}}{R} = \\sum_{i}\\exp \\bigg(-\\frac{
\\Theta_i}{T}\\bigg) \\bigg(\\frac{\\Theta_i}{T}\\frac{1}{1-\\exp(-
\\frac{\\Theta_i}{T})}\\bigg)^2`
Parameters
----------
T : float
Temperature in K
Returns
-------
CvoR_vib : float
Vibrational dimensionless heat capacity at constant volume
"""
CvoR = []
vib_dimless = self._valid_vib_temperatures / T
for vib_dimless_i, w_i in zip(vib_dimless,
self._valid_scaled_wavenumbers):
CvoR_RRHO = np.exp(-vib_dimless_i) \
* (vib_dimless_i/(1. - np.exp(-vib_dimless_i)))**2
CvoR.append(w_i * CvoR_RRHO + 0.5 * (1. - w_i))
return np.sum(CvoR)
def get_CpoR(self, T):
"""Calculates the dimensionless heat capacity at constant pressure
:math:`\\frac{C_{P}^{qRRHO}} {R} = \\frac{C_{V}^{qRRHO}} {R}`
Parameters
----------
T : float
Temperature in K
Returns
-------
CpoR_vib : float
Vibrational dimensionless heat capacity at constant pressure
"""
return self.get_CvoR(T=T)
def get_ZPE(self):
"""Calculates the zero point energy
:math:`ZPE=\\frac{1}{2}k_b\\sum_i \\omega_i\\Theta_{V,i}`
Returns
-------
zpe : float
Zero point energy in eV
"""
return 0.5 * c.kb('eV/K') * np.dot(self._valid_vib_temperatures,
self._valid_scaled_wavenumbers)
def _get_UoRT_RRHO(self, T, vib_temperature):
"""Calculates the dimensionless RRHO contribution to internal energy
Parameters
----------
T : float
Temperature in K
vib_temperature : float
Vibrational temperature in K
Returns
-------
UoRT_RRHO : float
Dimensionless internal energy of Rigid Rotor Harmonic Oscillator
"""
vib_dimless = vib_temperature / T
return vib_dimless * (0.5 + np.exp(-vib_dimless) /
(1. - np.exp(-vib_dimless)))
def get_UoRT(self, T):
"""Calculates the dimensionless internal energy
:math:`\\frac {U^{qRRHO}}{RT} = \\sum_{i}\\omega_i\\frac{U^{RRHO}}{RT}
+ \\frac{1}{2}(1-\\omega_i)`
:math:`\\frac {U^{RRHO}_{i}}{RT} = \\frac{\\Theta_i}{T} \\bigg(
\\frac{1}{2} + \\frac{\\exp(-\\frac{\\Theta_i}{T})}{1-\\exp(-\\frac{
\\Theta_i}{T})}\\bigg)`
Parameters
----------
T : float
Temperature in K
Returns
-------
UoRT_vib : float
Vibrational dimensionless internal energy
"""
UoRT_QRRHO = []
for theta_i, w_i in zip(self._valid_vib_temperatures,
self._valid_scaled_wavenumbers):
UoRT_RRHO = self._get_UoRT_RRHO(T=T, vib_temperature=theta_i)
UoRT_QRRHO.append(w_i * UoRT_RRHO + (1. - w_i) * 0.5)
return np.sum(UoRT_QRRHO)
def get_HoRT(self, T):
"""Calculates the dimensionless enthalpy
:math:`\\frac{H^{qRRHO}} {RT} = \\frac{U^{qRRHO}} {RT}`
Parameters
----------
T : float
Temperature in K
Returns
-------
HoRT_vib : float
Vibrational dimensionless enthalpy
"""
return self.get_UoRT(T=T)
def _get_SoR_H(self, T, vib_temperature):
"""Calculates the dimensionless harmonic osccilator contribution to
entropy
Parameters
----------
T : float
Temperature in K
vib_temperature : float
Vibrational temperature in K
Returns
-------
SoR_RHHO : float
Dimensionless entropy of Rigid Rotor Harmonic Oscillator
"""
return vib_temperature/T/(np.exp(vib_temperature/T)-1) \
- np.log(1-np.exp(-vib_temperature/T))
def _get_SoR_RRHO(self, T, vib_inertia):
"""Calculates the dimensionless RRHO contribution to entropy
Parameters
----------
T : float
Temperature in K
vib_inertia : float
Vibrational inertia in kg m2
Returns
-------
SoR_RHHO : float
Dimensionless entropy of Rigid Rotor Harmonic Oscillator
"""
return 0.5 + np.log(
(8. * np.pi**3 * vib_inertia * c.kb('J/K') * T / c.h('J s')**2)**
0.5)
def get_SoR(self, T):
"""Calculates the dimensionless entropy
:math:`\\frac{S^{qRRHO}}{R}=\\sum_i\\omega_i\\frac{S_i^{H}}{R}+(1-
\\omega_i)\\frac{S_i^{RRHO}}{R}`
:math:`\\frac {S^{RRHO}_i}{R} = \\frac{1}{2} + \\log \\bigg(\\bigg[
\\frac{8\\pi^3\\mu'_ik_BT}{h^2}\\bigg]^{\\frac{1}{2}}\\bigg)`
:math:`\\frac {S^{H}_i}{R}=\\bigg(\\frac{\\Theta_i}{T}\\bigg)\\frac{1}
{\\exp(\\frac{\\Theta_i}{T})-1}-\\log\\bigg(1-\\exp(\\frac{-\\Theta_i}
{T})\\bigg)`
Parameters
----------
T : float
Temperature in K
Returns
-------
SoR_vib : float
Vibrational dimensionless entropy
"""
SoR_QRRHO = []
for theta_i, mu_i, w_i in zip(self._valid_vib_temperatures,
self._valid_scaled_inertia,
self._valid_scaled_wavenumbers):
SoR_H = self._get_SoR_H(T=T, vib_temperature=theta_i)
SoR_RRHO = self._get_SoR_RRHO(T=T, vib_inertia=mu_i)
SoR_QRRHO.append(w_i * SoR_H + (1. - w_i) * SoR_RRHO)
return np.sum(SoR_QRRHO)
def get_FoRT(self, T):
"""Calculates the dimensionless Helmholtz energy
:math:`\\frac{A^{qRRHO}}{RT} = \\frac{U^{qRRHO}}{RT}-
\\frac{S^{qRRHO}}{R}`
Parameters
----------
T : float
Temperature in K
Returns
-------
FoRT_vib : float
Vibrational dimensionless Helmholtz energy
"""
return self.get_UoRT(T=T) - self.get_SoR(T=T)
def get_GoRT(self, T):
"""Calculates the dimensionless Gibbs energy
:math:`\\frac{G^{qRRHO}}{RT} = \\frac{H^{qRRHO}}{RT}-
\\frac{S^{qRRHO}}{R}`
Parameters
----------
T : float
Temperature in K
Returns
-------
GoRT_vib : float
Vibrational dimensionless Gibbs energy
"""
return self.get_HoRT(T=T) - self.get_SoR(T=T)
def to_dict(self):
"""Represents object as dictionary with JSON-accepted datatypes
Returns
-------
obj_dict : dict
"""
return {
'class': str(self.__class__),
'vib_wavenumbers': list(self.vib_wavenumbers),
'Bav': self.Bav,
'v0': self.v0,
'alpha': self.alpha,
'imaginary_substitute': self.imaginary_substitute
}
@classmethod
def from_dict(cls, json_obj):
"""Recreate an object from the JSON representation.
Parameters
----------
json_obj : dict
JSON representation
Returns
-------
QRRHOVib : QRRHOVib object
"""
json_obj = remove_class(json_obj)
return cls(**json_obj)
def print_calc_wavenumbers(self):
"""Prints the wavenumbers that will be used in a thermodynamic
calculation. If ``self.imaginary_substitute`` is a float, then
imaginary frequencies are replaced with that value. Otherwise,
imaginary frequencies are ignored."""
print(
_get_valid_vib_wavenumbers(wavenumbers=self.vib_wavenumbers,
substitute=self.imaginary_substitute))
class EinsteinVib(_ModelBase):
"""Einstein model of a crystal. Equations used sourced from
* <NAME>. An Introduction to Applied Statistical Thermodynamics;
<NAME> & Sons, 2010.
Attributes
----------
einstein_temperature : float
Einstein temperature (:math:`\\Theta_E`) in K
interaction_energy : float, optional
Interaction energy (:math:`u`) per atom in eV. Default is 0 eV
"""
def __init__(self, einstein_temperature, interaction_energy=0.):
self.einstein_temperature = einstein_temperature
self.interaction_energy = interaction_energy
def get_q(self, T):
"""Calculates the partition function
:math:`q^{vib}=\\exp\\bigg({\\frac{-u}{k_BT}}\\bigg)\\bigg(\\frac{
\\exp(-\\frac{\\Theta_E}{2T})}{1-\\exp(\\frac{-\\Theta_E}{T})}\\bigg)`
Parameters
----------
T : float
Temperature in K
Returns
-------
q_vib : float
Vibrational partition function
"""
u = self.interaction_energy
theta_E = self.einstein_temperature
return np.exp(-u/c.kb('eV/K')/T) \
* (np.exp(-theta_E/2./T)/(1. - np.exp(-theta_E/T)))
def get_CvoR(self, T):
"""Calculates the dimensionless heat capacity at constant volume
:math:`\\frac{C_V^{vib}}{R}=3\\bigg(\\frac{\\Theta_E}{T}\\bigg)^2
\\frac{\\exp(-\\frac{\\Theta_E}{T})}{\\big(1-\\exp(\\frac{-
\\Theta_E}{T})\\big)^2}`
Parameters
----------
T : float
Temperature in K
Returns
-------
CvoR_vib : float
Vibrational dimensionless heat capacity at constant volume
"""
theta_E = self.einstein_temperature
return 3. * (theta_E / T)**2 * np.exp(
-theta_E / T) / (1 - np.exp(-theta_E / T))**2
def get_CpoR(self, T):
"""Calculates the dimensionless heat capacity at constant pressure
:math:`\\frac{C_P^{vib}}{R}=\\frac{C_V^{vib}}{R}=3\\bigg(\\frac{
\\Theta_E}{T}\\bigg)^2\\frac{\\exp(-\\frac{\\Theta_E}{T})}{\\big(1-
\\exp(\\frac{-\\Theta_E}{T})\\big)^2}`
Parameters
----------
T : float
Temperature in K
Returns
-------
CpoR_vib : float
Vibrational dimensionless heat capacity at constant pressure
"""
return self.get_CvoR(T=T)
def get_ZPE(self):
"""Calculates the zero point energy
:math:`u^0_E=u+\\frac{3}{2}\\Theta_E k_B`
Returns
-------
zpe : float
Zero point energy in eV
"""
return self.interaction_energy \
+ 1.5*self.einstein_temperature*c.kb('eV/K')
def get_UoRT(self, T):
"""Calculates the dimensionless internal energy
:math:`\\frac{U^{vib}}{RT}=\\frac{u^0_E}{k_BT}+3\\frac{\\Theta_E}{T}
\\bigg(\\frac{\\exp(-\\frac{\\Theta_E}{T})}{1-\\exp(-\\frac{\\Theta_E}
{T})}\\bigg)`
Parameters
----------
T : float
Temperature in K
Returns
-------
UoRT_vib : float
Vibrational dimensionless internal energy
"""
theta_E = self.einstein_temperature
return self.get_ZPE()/c.kb('eV/K')/T \
+ 3.*theta_E/T*np.exp(-theta_E/T)/(1. - np.exp(-theta_E/T))
def get_HoRT(self, T):
"""Calculates the dimensionless enthalpy
:math:`\\frac{H^{vib}}{RT}=\\frac{U^{vib}}{RT}=\\frac{N_A u^0_E}{k_BT}
+3\\frac{\\Theta_E}{T}\\bigg(\\frac{\\exp(-\\frac{\\Theta_E}{T})}{1-
\\exp(-\\frac{\\Theta_E}{T})}\\bigg)`
Parameters
----------
T : float
Temperature in K
Returns
-------
HoRT_vib : float
Vibrational dimensionless enthalpy
"""
return self.get_UoRT(T=T)
def get_SoR(self, T):
"""Calculates the dimensionless entropy
:math:`\\frac{S^{vib}}{R}=3\\bigg(\\frac{\\Theta_E}{T}\\frac{\\exp\\big(
\\frac{-\\Theta_E}{T}\\big)}{1-\\exp\\big(-\\frac{\\Theta_E}{T}\\big)}
\\bigg)-\\ln\\bigg(1-\\exp\\big(\\frac{-\\Theta_E}{T}\\big)\\bigg)`
Parameters
----------
T : float
Temperature in K
Returns
-------
SoR_vib : float
Vibrational dimensionless entropy
"""
theta_E = self.einstein_temperature
exp_term = np.exp(-theta_E / T)
return 3. * (theta_E / T * exp_term /
(1. - exp_term) - np.log(1. - exp_term))
def get_FoRT(self, T):
"""Calculates the dimensionless Helmholtz energy
:math:`\\frac{A^{vib}}{RT}=\\frac{U^{vib}}{RT}-\\frac{S^{vib}}{R}`
Parameters
----------
T : float
Temperature in K
Returns
-------
FoRT_vib : float
Vibrational dimensionless Helmholtz energy
"""
return self.get_UoRT(T=T) - self.get_SoR(T=T)
def get_GoRT(self, T):
"""Calculates the dimensionless Gibbs energy
:math:`\\frac{G^{vib}}{RT}=\\frac{H^{vib}}{RT}-\\frac{S^{vib}}{R}`
Parameters
----------
T : float
Temperature in K
Returns
-------
GoRT_vib : float
Vibrational dimensionless Gibbs energy
"""
return self.get_HoRT(T=T) - self.get_SoR(T=T)
def to_dict(self):
"""Represents object as dictionary with JSON-accepted datatypes
Returns
-------
obj_dict : dict
"""
return {
'class': str(self.__class__),
'einstein_temperature': self.einstein_temperature,
'interaction_energy': self.interaction_energy
}
class DebyeVib(_ModelBase):
"""Debye model of a crystal. Equations sourced from:
* <NAME>. An Introduction to Applied Statistical Thermodynamics;
<NAME> & Sons, 2010.
Attributes
----------
debye_temperature : float
Debye temperature (:math:`\\Theta_D`) in K
interaction_energy : float, optional
Interaction energy (:math:`u`) per atom in eV. Default is 0 eV
"""
def __init__(self, debye_temperature, interaction_energy):
self.debye_temperature = debye_temperature
self.interaction_energy = interaction_energy
def get_q(self, T):
"""Calculate the partition function
:math:`q^{vib} = \\exp\\bigg(-\\frac{u}{3k_B T} - \\frac{3}{8}
\\frac{\\Theta_D}{T} - G\\big(\\frac{\\Theta_D}{T}\\big)\\bigg)`
:math:`G\\bigg(\\frac{\\Theta_D}{T}\\bigg) = 3\\bigg(\\frac{T}{
\\Theta_D}\\bigg)^3\\int_0^{\\frac{\\Theta_D}{T}}x^2 \\ln
\\bigg(1-e^{-x}\\bigg)dx`
Parameters
----------
T : float
Temperature in K
Returns
-------
q : float
Partition function
"""
G = self._get_intermediate_fn(T=T, fn=self._G_integrand)
return np.exp(-self.interaction_energy/3./c.kb('eV/K')/T \
-3./8.*self.debye_temperature/T - G)
def get_CvoR(self, T):
"""Calculates dimensionless heat capacity (constant V)
:math:`\\frac {C_V^{vib}}{R} = 3K\\bigg(\\frac{\\Theta_D}{T}\\bigg)`
:math:`K\\bigg(\\frac{\\Theta_D}{T}\\bigg)=3\\bigg(\\frac{T}{\\Theta_D}
\\bigg)^3 \\int_0^{\\frac{\\Theta_D}{T}}\\frac{x^4 e^x}{(e^x-1)^2}dx`
Parameters
----------
T : float
Temperature in K
Returns
-------
CvoR : float
Dimensionless heat capacity (constant V)
"""
K = self._get_intermediate_fn(T=T, fn=self._K_integrand)
return 3. * K
def get_CpoR(self, T):
"""Calculates dimensionless heat capacity (constant P)
:math:`\\frac {C_P^{vib}}{R} = 3K\\bigg(\\frac{\\Theta_D}{T}\\bigg)`
:math:`K\\bigg(\\frac{\\Theta_D}{T}\\bigg)=3\\bigg(\\frac{T}{\\Theta_D}
\\bigg)^3 \\int_0^{\\frac{\\Theta_D}{T}}\\frac{x^4 e^x}{(e^x-1)^2}dx`
Parameters
----------
T : float
Temperature in K
Returns
-------
CpoR : float
Dimensionless heat capacity (constant P)
"""
return self.get_CvoR(T=T)
def get_UoRT(self, T):
"""Calculates dimensionless internal energy
:math:`\\frac{U^{vib}}{RT} = \\frac{u_D^o}{RT} + 3F\\bigg(\\frac{
\\Theta_D}{T}\\bigg)`
:math:`F\\bigg(\\frac{\\Theta_D}{T}\\bigg) = 3\\bigg(\\frac{T}{
\\Theta_D}\\bigg)^3 \\int_0^{\\frac{\\Theta_D}{T}} \\frac{x^3 e^x}
{e^x-1} dx`
Parameters
----------
T : float
Temperature in K
Returns
-------
UoRT : float
Dimensionless internal energy
"""
return self.get_ZPE()/c.kb('eV/K')/T \
+ 3.*self._get_intermediate_fn(T=T, fn=self._F_integrand)
def get_HoRT(self, T):
"""Calculates dimensionless enthalpy
:math:`\\frac{H^{vib}}{RT} = \\frac{u_D^o}{RT} + 3F\\bigg(\\frac{
\\Theta_D}{T}\\bigg)`
:math:`F\\bigg(\\frac{\\Theta_D}{T}\\bigg) = 3\\bigg(\\frac{T}{
\\Theta_D}\\bigg)^3 \\int_0^{\\frac{\\Theta_D}{T}} \\frac{x^3 e^x}
{e^x-1} dx`
Parameters
----------
T : float
Temperature in K
Returns
-------
HoRT : float
Dimensionless enthalpy
"""
return self.get_UoRT(T=T)
def get_SoR(self, T):
"""Calculates dimensionless entropy
:math:`\\frac{S^{vib}}{R} = 3\\bigg[F\\bigg(\\frac{\\Theta_D}{T}\\bigg)
- G\\bigg(\\frac{\\Theta_D}{T}\\bigg)\\bigg]`
:math:`F\\bigg(\\frac{\\Theta_D}{T}\\bigg) = 3\\bigg(\\frac{T}{
\\Theta_D}\\bigg)^3 \\int_0^{\\frac{\\Theta_D}{T}} \\frac{x^3 e^x}
{e^x-1} dx`
:math:`G\\bigg(\\frac{\\Theta_D}{T}\\bigg) = 3\\bigg(\\frac{T}{
\\Theta_D}\\bigg)^3\\int_0^{\\frac{\\Theta_D}{T}}x^2 \\ln
\\bigg(1-e^{-x}\\bigg)dx`
Parameters
----------
T : float
Temperature in K
Returns
-------
SoR : float
Dimensionless entropy
"""
F = self._get_intermediate_fn(T=T, fn=self._F_integrand)
G = self._get_intermediate_fn(T=T, fn=self._G_integrand)
return 3. * (F - G)
def get_FoRT(self, T):
"""Calculates dimensionless Helmholtz energy
:math:`\\frac{F^{vib}}{RT}=\\frac{U^{vib}}{RT}-\\frac{S^{vib}}{R}`
Parameters
----------
T : float
Temperature in K
Returns
-------
FoRT : float
Dimensionless Helmholtz energy
"""
return self.get_UoRT(T=T) - self.get_SoR(T=T)
def get_GoRT(self, T):
"""Calculates dimensionless Gibbs energy
:math:`\\frac{G^{vib}}{RT}=\\frac{H^{vib}}{RT}-\\frac{S^{vib}}{R}`
Parameters
----------
T : float
Temperature in K
Returns
-------
GoRT : float
Dimensionless Gibbs energy
"""
return self.get_HoRT(T=T) - self.get_SoR(T=T)
def get_ZPE(self):
"""Calculate zero point energy
:math:`u^o_D = u^o +\\frac{9}{8}R\\Theta_D`
Returns
-------
zpe : float
Zero point energy in eV
"""
return self.interaction_energy \
+ 9./8.*c.R('eV/K')*self.debye_temperature
def _G_integrand(self, x):
"""Integrand when evaluating intermediate function G.
:math:`f(x) = x^2 \\ln \\bigg(1-e^{-x}\\bigg)`
Parameters
----------
x : float
Variable of integration. Represents
:math:`\\frac{\\Theta_D}{T}}`
Returns
-------
f(x) : float
Integrand evaluated at x
"""
return np.log(1. - np.exp(-x)) * (x**2)
def _K_integrand(self, x):
"""Integrand when evaluating intermediate function K.
:math:`f(x) = \\frac {x^4 e^x}{(e^x -1)^2}`
Parameters
----------
x : float
Variable of integration. Represents
:math:`\\frac{\\Theta_D}{T}}`
Returns
-------
f(x) : float
Integrand evaluated at x
"""
return (x**4) * np.exp(x) / (np.exp(x) - 1.)**2
def _F_integrand(self, x):
"""Integrand when evaluating intermediate function F.
:math:`f(x) = \\frac {x^3 e^x}{e^x -1}`
Parameters
----------
x : float
Variable of integration. Represents
:math:`\\frac{\\Theta_D}{T}}`
Returns
-------
f(x) : float
Integrand evaluated at x
"""
return (x**3) * np.exp(x) / (np.exp(x) - 1.)
def _get_intermediate_fn(self, T, fn):
"""Calculates the intermediate function (i.e. F, G, or K)
:math:`F(x) = 3\\bigg(\\frac{T}{\\Theta_D}\\bigg)^3\\int_0^{\\frac
{\\Theta_D}{T}} f(x) dx`
Parameters
----------
T : float
Temperature in K
fn : function
Integrand function, f(x)
Returns
-------
F : float
Intermediate function evaluated at T
"""
vib_dimless = self.debye_temperature / T
integral = quad(func=fn, a=0., b=vib_dimless)[0]
return 3. * integral / vib_dimless**3
def _get_valid_vib_wavenumbers(wavenumbers, substitute=None):
"""Returns wavenumbers to use for vibration calculations. Imaginary
frequencies are expected to be negative.
Parameters
----------
wavenumbers : list of float
Wavenumbers in 1/cm
substitute : float, optional
Value to use to replace imaginary frequencies. If not specified,
imaginary frequencies are ignored. Default is None
Returns
-------
wavenumbers_out : (N,) np.ndarray
Valid wavenumbers
"""
wavenumbers_out = []
for wavenumber in wavenumbers:
if wavenumber > 0.:
# Real wavenumbers always added
wavenumbers_out.append(wavenumber)
elif substitute is not None:
# Substitute added if imaginary frequency encountered
wavenumbers_out.append(substitute)
return np.array(wavenumbers_out)
def _get_vib_dimless(wavenumbers, T, substitute=None):
"""Calculates dimensionless temperatures for the wavenumbers and
temperature specified
Parameters
----------
wavenumbers : (N,) np.ndarray
Wavenumbers in 1/cm
T : float
Temperature in K
substitute : float, optional
Value to use to replace imaginary frequencies. If not specified,
imaginary frequencies are ignored. Default is None
Returns
-------
vib_dimless : (N,) np.ndarray
Vibrational temperatures normalized by T
"""
valid_wavenumbers = _get_valid_vib_wavenumbers(wavenumbers=wavenumbers,
substitute=substitute)
vib_dimless = c.wavenumber_to_temp(valid_wavenumbers) / T
return vib_dimless
|
[
"numpy.sinh",
"numpy.sum",
"pmutt.constants.wavenumber_to_temp",
"scipy.integrate.quad",
"numpy.log",
"pmutt.constants.h",
"pmutt.io.json.remove_class",
"numpy.array",
"numpy.exp",
"pmutt.constants.R",
"numpy.dot",
"pmutt.constants.kb",
"numpy.prod",
"pmutt.constants.wavenumber_to_inertia"
] |
[((34120, 34145), 'numpy.array', 'np.array', (['wavenumbers_out'], {}), '(wavenumbers_out)\n', (34128, 34145), True, 'import numpy as np\n'), ((950, 975), 'numpy.array', 'np.array', (['vib_wavenumbers'], {}), '(vib_wavenumbers)\n', (958, 975), True, 'import numpy as np\n'), ((1332, 1381), 'pmutt.constants.wavenumber_to_temp', 'c.wavenumber_to_temp', (['self._valid_vib_wavenumbers'], {}), '(self._valid_vib_wavenumbers)\n', (1352, 1381), True, 'from pmutt import constants as c\n'), ((2336, 2347), 'numpy.prod', 'np.prod', (['qs'], {}), '(qs)\n', (2343, 2347), True, 'import numpy as np\n'), ((3028, 3041), 'numpy.sum', 'np.sum', (['CvoRs'], {}), '(CvoRs)\n', (3034, 3041), True, 'import numpy as np\n'), ((4665, 4677), 'numpy.sum', 'np.sum', (['UoRT'], {}), '(UoRT)\n', (4671, 4677), True, 'import numpy as np\n'), ((7578, 7600), 'pmutt.io.json.remove_class', 'remove_class', (['json_obj'], {}), '(json_obj)\n', (7590, 7600), False, 'from pmutt.io.json import remove_class\n'), ((9715, 9764), 'pmutt.constants.wavenumber_to_temp', 'c.wavenumber_to_temp', (['self._valid_vib_wavenumbers'], {}), '(self._valid_vib_wavenumbers)\n', (9735, 9764), True, 'from pmutt import constants as c\n'), ((10590, 10642), 'pmutt.constants.wavenumber_to_inertia', 'c.wavenumber_to_inertia', (['self._valid_vib_wavenumbers'], {}), '(self._valid_vib_wavenumbers)\n', (10613, 10642), True, 'from pmutt import constants as c\n'), ((11964, 11976), 'numpy.sum', 'np.sum', (['CvoR'], {}), '(CvoR)\n', (11970, 11976), True, 'import numpy as np\n'), ((14352, 14370), 'numpy.sum', 'np.sum', (['UoRT_QRRHO'], {}), '(UoRT_QRRHO)\n', (14358, 14370), True, 'import numpy as np\n'), ((17072, 17089), 'numpy.sum', 'np.sum', (['SoR_QRRHO'], {}), '(SoR_QRRHO)\n', (17078, 17089), True, 'import numpy as np\n'), ((18763, 18785), 'pmutt.io.json.remove_class', 'remove_class', (['json_obj'], {}), '(json_obj)\n', (18775, 18785), False, 'from pmutt.io.json import remove_class\n'), ((23877, 23897), 'numpy.exp', 'np.exp', (['(-theta_E / T)'], {}), '(-theta_E / T)\n', (23883, 23897), True, 'import numpy as np\n'), ((34914, 34953), 'pmutt.constants.wavenumber_to_temp', 'c.wavenumber_to_temp', (['valid_wavenumbers'], {}), '(valid_wavenumbers)\n', (34934, 34953), True, 'from pmutt import constants as c\n'), ((3880, 3916), 'numpy.sum', 'np.sum', (['self._valid_vib_temperatures'], {}), '(self._valid_vib_temperatures)\n', (3886, 3916), True, 'import numpy as np\n'), ((12709, 12777), 'numpy.dot', 'np.dot', (['self._valid_vib_temperatures', 'self._valid_scaled_wavenumbers'], {}), '(self._valid_vib_temperatures, self._valid_scaled_wavenumbers)\n', (12715, 12777), True, 'import numpy as np\n'), ((33135, 33170), 'scipy.integrate.quad', 'quad', ([], {'func': 'fn', 'a': '(0.0)', 'b': 'vib_dimless'}), '(func=fn, a=0.0, b=vib_dimless)\n', (33139, 33170), False, 'from scipy.integrate import quad\n'), ((3865, 3877), 'pmutt.constants.kb', 'c.kb', (['"""eV/K"""'], {}), "('eV/K')\n", (3869, 3877), True, 'from pmutt import constants as c\n'), ((11789, 11811), 'numpy.exp', 'np.exp', (['(-vib_dimless_i)'], {}), '(-vib_dimless_i)\n', (11795, 11811), True, 'import numpy as np\n'), ((12694, 12706), 'pmutt.constants.kb', 'c.kb', (['"""eV/K"""'], {}), "('eV/K')\n", (12698, 12706), True, 'from pmutt import constants as c\n'), ((20482, 20508), 'numpy.exp', 'np.exp', (['(-theta_E / 2.0 / T)'], {}), '(-theta_E / 2.0 / T)\n', (20488, 20508), True, 'import numpy as np\n'), ((21133, 21153), 'numpy.exp', 'np.exp', (['(-theta_E / T)'], {}), '(-theta_E / T)\n', (21139, 21153), True, 'import numpy as np\n'), ((22090, 22102), 'pmutt.constants.kb', 'c.kb', (['"""eV/K"""'], {}), "('eV/K')\n", (22094, 22102), True, 'from pmutt import constants as c\n'), ((23983, 24005), 'numpy.log', 'np.log', (['(1.0 - exp_term)'], {}), '(1.0 - exp_term)\n', (23989, 24005), True, 'import numpy as np\n'), ((32071, 32080), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (32077, 32080), True, 'import numpy as np\n'), ((32539, 32548), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (32545, 32548), True, 'import numpy as np\n'), ((32552, 32561), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (32558, 32561), True, 'import numpy as np\n'), ((2190, 2216), 'numpy.exp', 'np.exp', (['(-vib_dimless / 2.0)'], {}), '(-vib_dimless / 2.0)\n', (2196, 2216), True, 'import numpy as np\n'), ((13356, 13376), 'numpy.exp', 'np.exp', (['(-vib_dimless)'], {}), '(-vib_dimless)\n', (13362, 13376), True, 'import numpy as np\n'), ((15259, 15286), 'numpy.exp', 'np.exp', (['(vib_temperature / T)'], {}), '(vib_temperature / T)\n', (15265, 15286), True, 'import numpy as np\n'), ((15313, 15341), 'numpy.exp', 'np.exp', (['(-vib_temperature / T)'], {}), '(-vib_temperature / T)\n', (15319, 15341), True, 'import numpy as np\n'), ((20510, 20530), 'numpy.exp', 'np.exp', (['(-theta_E / T)'], {}), '(-theta_E / T)\n', (20516, 20530), True, 'import numpy as np\n'), ((21174, 21194), 'numpy.exp', 'np.exp', (['(-theta_E / T)'], {}), '(-theta_E / T)\n', (21180, 21194), True, 'import numpy as np\n'), ((22665, 22677), 'pmutt.constants.kb', 'c.kb', (['"""eV/K"""'], {}), "('eV/K')\n", (22669, 22677), True, 'from pmutt import constants as c\n'), ((22709, 22729), 'numpy.exp', 'np.exp', (['(-theta_E / T)'], {}), '(-theta_E / T)\n', (22715, 22729), True, 'import numpy as np\n'), ((22734, 22754), 'numpy.exp', 'np.exp', (['(-theta_E / T)'], {}), '(-theta_E / T)\n', (22740, 22754), True, 'import numpy as np\n'), ((28428, 28440), 'pmutt.constants.kb', 'c.kb', (['"""eV/K"""'], {}), "('eV/K')\n", (28432, 28440), True, 'from pmutt import constants as c\n'), ((31129, 31140), 'pmutt.constants.R', 'c.R', (['"""eV/K"""'], {}), "('eV/K')\n", (31132, 31140), True, 'from pmutt import constants as c\n'), ((31610, 31620), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (31616, 31620), True, 'import numpy as np\n'), ((32084, 32093), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (32090, 32093), True, 'import numpy as np\n'), ((2224, 2244), 'numpy.exp', 'np.exp', (['(-vib_dimless)'], {}), '(-vib_dimless)\n', (2230, 2244), True, 'import numpy as np\n'), ((2298, 2318), 'numpy.exp', 'np.exp', (['(-vib_dimless)'], {}), '(-vib_dimless)\n', (2304, 2318), True, 'import numpy as np\n'), ((13415, 13435), 'numpy.exp', 'np.exp', (['(-vib_dimless)'], {}), '(-vib_dimless)\n', (13421, 13435), True, 'import numpy as np\n'), ((20449, 20461), 'pmutt.constants.kb', 'c.kb', (['"""eV/K"""'], {}), "('eV/K')\n", (20453, 20461), True, 'from pmutt import constants as c\n'), ((2972, 2998), 'numpy.sinh', 'np.sinh', (['(vib_dimless / 2.0)'], {}), '(vib_dimless / 2.0)\n', (2979, 2998), True, 'import numpy as np\n'), ((4576, 4596), 'numpy.exp', 'np.exp', (['(-vib_dimless)'], {}), '(-vib_dimless)\n', (4582, 4596), True, 'import numpy as np\n'), ((4617, 4637), 'numpy.exp', 'np.exp', (['(-vib_dimless)'], {}), '(-vib_dimless)\n', (4623, 4637), True, 'import numpy as np\n'), ((5912, 5932), 'numpy.exp', 'np.exp', (['(-vib_dimless)'], {}), '(-vib_dimless)\n', (5918, 5932), True, 'import numpy as np\n'), ((5941, 5961), 'numpy.exp', 'np.exp', (['(-vib_dimless)'], {}), '(-vib_dimless)\n', (5947, 5961), True, 'import numpy as np\n'), ((5989, 6009), 'numpy.exp', 'np.exp', (['(-vib_dimless)'], {}), '(-vib_dimless)\n', (5995, 6009), True, 'import numpy as np\n'), ((11861, 11883), 'numpy.exp', 'np.exp', (['(-vib_dimless_i)'], {}), '(-vib_dimless_i)\n', (11867, 11883), True, 'import numpy as np\n'), ((15863, 15873), 'pmutt.constants.h', 'c.h', (['"""J s"""'], {}), "('J s')\n", (15866, 15873), True, 'from pmutt import constants as c\n'), ((26543, 26555), 'pmutt.constants.kb', 'c.kb', (['"""eV/K"""'], {}), "('eV/K')\n", (26547, 26555), True, 'from pmutt import constants as c\n'), ((15845, 15856), 'pmutt.constants.kb', 'c.kb', (['"""J/K"""'], {}), "('J/K')\n", (15849, 15856), True, 'from pmutt import constants as c\n')]
|
import os
import torch
import random
import librosa
import torchaudio
import numpy as np
from glob import glob
import nlpaug.flow as naf
import nlpaug.augmenter.audio as naa
import nlpaug.augmenter.spectrogram as nas
from torchvision.transforms import Normalize
from torch.utils.data import Dataset
from nlpaug.augmenter.audio import AudioAugmenter
from src.datasets.librispeech import WavformAugmentation, SpectrumAugmentation
from src.datasets.root_paths import DATA_ROOTS
GOOGLESPEECH_MEAN = [-46.847]
GOOGLESPEECH_STDEV = [19.151]
GOOGLESPEECH_LABELS = ['eight', 'right', 'happy', 'three', 'yes', 'up', 'no', 'stop', 'on', 'four', 'nine',
'zero', 'down', 'go', 'six', 'two', 'left', 'five', 'off', 'seven', 'one',
'cat', 'bird', 'marvin', 'wow', 'tree', 'dog', 'sheila', 'bed', 'house']
class GoogleSpeechCommands(Dataset):
def __init__(
self,
root=DATA_ROOTS['google_speech'],
train=True,
spectral_transforms=False,
wavform_transforms=False,
max_length=150526,
input_size=224,
normalize_mean=GOOGLESPEECH_MEAN,
normalize_stdev=GOOGLESPEECH_STDEV,
):
super().__init__()
assert not (spectral_transforms and wavform_transforms)
if train:
train_paths = open(os.path.join(root, 'training_list.txt'), 'r').readlines()
val_paths = open(os.path.join(root, 'validation_list.txt'), 'r').readlines()
wav_paths = train_paths + val_paths
else:
test_paths = open(os.path.join(root, 'testing_list.txt'), 'r').readlines()
wav_paths = test_paths
wav_paths = [path.strip() for path in wav_paths]
self.root = root
self.num_labels = len(GOOGLESPEECH_LABELS)
self.wav_paths = wav_paths
self.spectral_transforms = spectral_transforms
self.wavform_transforms = wavform_transforms
self.max_length = max_length
self.train = train
self.input_size = input_size
self.FILTER_SIZE = input_size
self.normalize_mean = normalize_mean
self.normalize_stdev = normalize_stdev
def __getitem__(self, index):
wav_name = self.wav_paths[index]
label_name = wav_name.split('/')[0].lower()
label = GOOGLESPEECH_LABELS.index(label_name)
wav_path = os.path.join(self.root, wav_name)
wavform, sample_rate = torchaudio.load(wav_path)
wavform = wavform[0].numpy()
if self.wavform_transforms:
transforms = WavformAugmentation(sample_rate)
wavform = transforms(wavform)
# pad to 150k frames
if len(wavform) > self.max_length:
# randomly pick which side to chop off (fix if validation)
flip = (bool(random.getrandbits(1)) if self.train else True)
padded = (wavform[:self.max_length] if flip else
wavform[-self.max_length:])
else:
padded = np.zeros(self.max_length)
padded[:len(wavform)] = wavform # pad w/ silence
hop_length_dict = {224: 672, 112: 1344, 64: 2360, 32: 4800}
spectrum = librosa.feature.melspectrogram(
padded,
sample_rate,
hop_length=hop_length_dict[self.input_size],
n_mels=self.input_size,
)
if self.spectral_transforms: # apply time and frequency masks
transforms = SpectrumAugmentation()
spectrum = transforms(spectrum)
# log mel-spectrogram
spectrum = librosa.power_to_db(spectrum**2)
spectrum = torch.from_numpy(spectrum).float()
spectrum = spectrum.unsqueeze(0)
if self.spectral_transforms: # apply noise on spectral
noise_stdev = 0.25 * self.normalize_stdev[0]
noise = torch.randn_like(spectrum) * noise_stdev
spectrum = spectrum + noise
normalize = Normalize(self.normalize_mean, self.normalize_stdev)
spectrum = normalize(spectrum)
return index, spectrum, int(label)
def __len__(self):
return len(self.wav_paths)
|
[
"torch.randn_like",
"numpy.zeros",
"librosa.feature.melspectrogram",
"src.datasets.librispeech.SpectrumAugmentation",
"librosa.power_to_db",
"random.getrandbits",
"torchaudio.load",
"torchvision.transforms.Normalize",
"os.path.join",
"src.datasets.librispeech.WavformAugmentation",
"torch.from_numpy"
] |
[((2416, 2449), 'os.path.join', 'os.path.join', (['self.root', 'wav_name'], {}), '(self.root, wav_name)\n', (2428, 2449), False, 'import os\n'), ((2482, 2507), 'torchaudio.load', 'torchaudio.load', (['wav_path'], {}), '(wav_path)\n', (2497, 2507), False, 'import torchaudio\n'), ((3222, 3347), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', (['padded', 'sample_rate'], {'hop_length': 'hop_length_dict[self.input_size]', 'n_mels': 'self.input_size'}), '(padded, sample_rate, hop_length=\n hop_length_dict[self.input_size], n_mels=self.input_size)\n', (3252, 3347), False, 'import librosa\n'), ((3616, 3650), 'librosa.power_to_db', 'librosa.power_to_db', (['(spectrum ** 2)'], {}), '(spectrum ** 2)\n', (3635, 3650), False, 'import librosa\n'), ((3988, 4040), 'torchvision.transforms.Normalize', 'Normalize', (['self.normalize_mean', 'self.normalize_stdev'], {}), '(self.normalize_mean, self.normalize_stdev)\n', (3997, 4040), False, 'from torchvision.transforms import Normalize\n'), ((2607, 2639), 'src.datasets.librispeech.WavformAugmentation', 'WavformAugmentation', (['sample_rate'], {}), '(sample_rate)\n', (2626, 2639), False, 'from src.datasets.librispeech import WavformAugmentation, SpectrumAugmentation\n'), ((3046, 3071), 'numpy.zeros', 'np.zeros', (['self.max_length'], {}), '(self.max_length)\n', (3054, 3071), True, 'import numpy as np\n'), ((3499, 3521), 'src.datasets.librispeech.SpectrumAugmentation', 'SpectrumAugmentation', ([], {}), '()\n', (3519, 3521), False, 'from src.datasets.librispeech import WavformAugmentation, SpectrumAugmentation\n'), ((3668, 3694), 'torch.from_numpy', 'torch.from_numpy', (['spectrum'], {}), '(spectrum)\n', (3684, 3694), False, 'import torch\n'), ((3886, 3912), 'torch.randn_like', 'torch.randn_like', (['spectrum'], {}), '(spectrum)\n', (3902, 3912), False, 'import torch\n'), ((2851, 2872), 'random.getrandbits', 'random.getrandbits', (['(1)'], {}), '(1)\n', (2869, 2872), False, 'import random\n'), ((1367, 1406), 'os.path.join', 'os.path.join', (['root', '"""training_list.txt"""'], {}), "(root, 'training_list.txt')\n", (1379, 1406), False, 'import os\n'), ((1454, 1495), 'os.path.join', 'os.path.join', (['root', '"""validation_list.txt"""'], {}), "(root, 'validation_list.txt')\n", (1466, 1495), False, 'import os\n'), ((1606, 1644), 'os.path.join', 'os.path.join', (['root', '"""testing_list.txt"""'], {}), "(root, 'testing_list.txt')\n", (1618, 1644), False, 'import os\n')]
|
import numpy as np
from numpy import exp, sqrt
from functools import partial
from scipy import optimize
from scipy.stats import norm
import scipy.integrate as integrate
from fox_toolbox.utils import rates
"""This module price swaption under Hull White model using Jamshidian method.
Usage example:
from hw import Jamshidian as jamsh
jamsh_price, debug = jamsh.hw_swo(swo, ref_mr, sigma_hw_jamsh, dsc_curve, estim_curve)
swo : rates.Swaption
ref_mr : float
sigma_hw_jamsh : rates.Curve
dsc_curve : rates.RateCurve
estim_curve : rates.RateCurve
"""
class Jamshidian():
def __init__(self, mr, sigma, dsc_curve, estim_curve):
assert isinstance(sigma, (float, rates.Curve)), f'sigma: float or rates.Curve, not {type(sigma)}'
self.mr = mr
self.sigma = sigma
self.dsc_curve = dsc_curve
self.estim_curve = estim_curve
@staticmethod
def sign_changes(array):
"""return number of times the sign is changed in array"""
return np.where(np.diff(np.sign(array)))[0]
@staticmethod
def _B(t, T, a):
return (1 - exp(-a * (T - t))) / a
@staticmethod
def _v(t, T, u, a):
p1 = (T - t)
p2 = - (2 / a) * exp(-a * u) * (exp(a * T) - exp(a * t))
p3 = exp(-2 * a *u) * (exp(2 * a *T) - exp(2 * a *t)) / (2 * a)
return (p1 + p2 + p3) / (a**2)
@staticmethod
def _V(t, T, u, a, sigma):
if isinstance(sigma, float):
return sigma**2 * _v(t, T, u, a)
elif isinstance(sigma, rates.Curve):
total_var = 0.
expiry = T
previous_expiries = [t_exp for t_exp in sigma.buckets if t_exp <= expiry]
previous_sigmas = list(sigma.values[:len(previous_expiries)])
if previous_expiries[-1] < expiry:
previous_sigmas.append(sigma.values[len(previous_expiries)])
previous_expiries.append(expiry)
for i in range(len(previous_expiries) - 1):
total_var += (previous_sigmas[i+1] ** 2) * _v(t, previous_expiries[i+1], u, a)
return total_var
@staticmethod
def _A(t, T, a, sigma, dsc_curve):
assert isinstance(sigma, (float, rates.Curve)), f'sigma: float or rates.Curve, not {type(sigma)}'
fwd_dsc = dsc_curve.get_fwd_dsc(t, T)
return fwd_dsc * exp(0.5*(_V(0, t, t, a, sigma) - _V(0, t, T, a, sigma)))
def get_coef(self, swo):
""" Coefficients for Put swaption from calibration basket. Jamishidian """
flt_adjs = swo.get_flt_adjustments(self.dsc_curve, self.estim_curve)
c0 = -_A(swo.expiry, swo.start_date, self.mr, self.sigma, self.dsc_curve)
c = list(map(lambda dcf, pdate, fadj: dcf * (swo.strike - fadj) * _A(swo.expiry, pdate, self.mr, self.sigma, self.dsc_curve),
swo.day_count_fractions, swo.payment_dates, flt_adjs))
c[-1] += _A(swo.expiry, swo.maturity, self.mr, self.sigma, self.dsc_curve)
c.insert(0, c0)
return np.array(c)
def get_var_x(self, expiry):
if isinstance(sigma, float):
return 1 / (2 * a) * (1 - exp(-2 * a * expiry)) * sigma ** 2
elif isinstance(sigma, rates.Curve):
total_var = 0.
previous_expiries = [t_exp for t_exp in self.sigma.buckets if t_exp <= expiry]
previous_sigmas = list(self.sigma.values[:len(previous_expiries)])
if previous_expiries[-1] < expiry:
previous_sigmas.append(self.sigma.values[len(previous_expiries)])
previous_expiries.append(expiry)
for i in range(len(previous_expiries) - 1):
total_var += 1 / (2 * self.mr) * (previous_sigmas[i+1] ** 2) * (exp(-2 * self.mr * (expiry - previous_expiries[i+1])) - exp(-2 * self.mr * (expiry - previous_expiries[i])))
return total_var
def get_b_i(self, swo):
""" array of B_i for by each payment date """
b0 = _B(swo.expiry, swo.start_date, self.mr)
b = list(map(lambda pdate: _B(swo.expiry, pdate, self.mr), swo.payment_dates))
b.insert(0, b0)
return np.array(b)
@staticmethod
def swap_value(coef, b_i, varx, x):
""" Swap function for finding x_star """
exp_b_var = exp(- b_i * sqrt(varx) * x)
return coef.dot(exp_b_var)
@staticmethod
def get_x_star(coef, b_i, varx):
x0 = .0
func = partial(swap_value, coef, b_i, varx)
# optimum = optimize.newton(func, x0=x0)
optimum = optimize.bisect(func, -6, 6)
return optimum
###TODO: continue adopting
def hw_swo_analytic(coef, b_i, varx, x_star, IsCall):
""" analytic """
sign = -1 if IsCall else 1
if IsCall: coef = np.negative(coef)
val_arr = exp(0.5 * b_i ** 2 * varx) * norm.cdf(sign*(x_star + b_i * sqrt(varx)))
return coef.dot(val_arr)
def hw_swo_numeric(coef, b_i, varx, IsCall):
if IsCall: coef = np.negative(coef)
swaption_numeric = integrate.quad(lambda x: swo_payoff(coef, b_i, varx, x) * norm.pdf(x), -10, 10)[0]
degen_swo_analytic, degen_swo_numeric = 0, 0
control_variable = degen_swo_analytic - degen_swo_numeric
return swaption_numeric + control_variable
def swo_payoff(coef, b_i, varx, x):
"""Call/Put is hidden in coef"""
swap = swap_value(coef, b_i, varx, x)
return swap if swap > 0 else 0
def hw_swo(swo, a, sigma, dsc_curve, estim_curve):
""" Main Hull White swaption function """
IsCall = False if swo.pay_rec == 'Receiver' else True
coef = get_coef(swo, a, sigma, dsc_curve, estim_curve)
b_i = get_b_i(swo, a)
varx = get_var_x(swo.expiry, a, sigma)
sgn_changes = sign_changes(coef)
change_once = len(sgn_changes) == 1
if change_once:
x_star = get_x_star(coef, b_i, varx)
debug_dict = {}
return hw_swo_analytic(coef, b_i, varx, x_star, IsCall), debug_dict
else:
debug_dict = {}
return hw_swo_numeric(coef, b_i, varx, IsCall), debug_dict
|
[
"functools.partial",
"numpy.negative",
"scipy.stats.norm.pdf",
"numpy.array",
"numpy.exp",
"numpy.sign",
"scipy.optimize.bisect",
"numpy.sqrt"
] |
[((3058, 3069), 'numpy.array', 'np.array', (['c'], {}), '(c)\n', (3066, 3069), True, 'import numpy as np\n'), ((4178, 4189), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (4186, 4189), True, 'import numpy as np\n'), ((4468, 4504), 'functools.partial', 'partial', (['swap_value', 'coef', 'b_i', 'varx'], {}), '(swap_value, coef, b_i, varx)\n', (4475, 4504), False, 'from functools import partial\n'), ((4572, 4600), 'scipy.optimize.bisect', 'optimize.bisect', (['func', '(-6)', '(6)'], {}), '(func, -6, 6)\n', (4587, 4600), False, 'from scipy import optimize\n'), ((4803, 4820), 'numpy.negative', 'np.negative', (['coef'], {}), '(coef)\n', (4814, 4820), True, 'import numpy as np\n'), ((4840, 4866), 'numpy.exp', 'exp', (['(0.5 * b_i ** 2 * varx)'], {}), '(0.5 * b_i ** 2 * varx)\n', (4843, 4866), False, 'from numpy import exp, sqrt\n'), ((5024, 5041), 'numpy.negative', 'np.negative', (['coef'], {}), '(coef)\n', (5035, 5041), True, 'import numpy as np\n'), ((1144, 1161), 'numpy.exp', 'exp', (['(-a * (T - t))'], {}), '(-a * (T - t))\n', (1147, 1161), False, 'from numpy import exp, sqrt\n'), ((1260, 1271), 'numpy.exp', 'exp', (['(-a * u)'], {}), '(-a * u)\n', (1263, 1271), False, 'from numpy import exp, sqrt\n'), ((1275, 1285), 'numpy.exp', 'exp', (['(a * T)'], {}), '(a * T)\n', (1278, 1285), False, 'from numpy import exp, sqrt\n'), ((1288, 1298), 'numpy.exp', 'exp', (['(a * t)'], {}), '(a * t)\n', (1291, 1298), False, 'from numpy import exp, sqrt\n'), ((1313, 1328), 'numpy.exp', 'exp', (['(-2 * a * u)'], {}), '(-2 * a * u)\n', (1316, 1328), False, 'from numpy import exp, sqrt\n'), ((1060, 1074), 'numpy.sign', 'np.sign', (['array'], {}), '(array)\n', (1067, 1074), True, 'import numpy as np\n'), ((1331, 1345), 'numpy.exp', 'exp', (['(2 * a * T)'], {}), '(2 * a * T)\n', (1334, 1345), False, 'from numpy import exp, sqrt\n'), ((1347, 1361), 'numpy.exp', 'exp', (['(2 * a * t)'], {}), '(2 * a * t)\n', (1350, 1361), False, 'from numpy import exp, sqrt\n'), ((4330, 4340), 'numpy.sqrt', 'sqrt', (['varx'], {}), '(varx)\n', (4334, 4340), False, 'from numpy import exp, sqrt\n'), ((3181, 3201), 'numpy.exp', 'exp', (['(-2 * a * expiry)'], {}), '(-2 * a * expiry)\n', (3184, 3201), False, 'from numpy import exp, sqrt\n'), ((5128, 5139), 'scipy.stats.norm.pdf', 'norm.pdf', (['x'], {}), '(x)\n', (5136, 5139), False, 'from scipy.stats import norm\n'), ((3776, 3831), 'numpy.exp', 'exp', (['(-2 * self.mr * (expiry - previous_expiries[i + 1]))'], {}), '(-2 * self.mr * (expiry - previous_expiries[i + 1]))\n', (3779, 3831), False, 'from numpy import exp, sqrt\n'), ((3832, 3883), 'numpy.exp', 'exp', (['(-2 * self.mr * (expiry - previous_expiries[i]))'], {}), '(-2 * self.mr * (expiry - previous_expiries[i]))\n', (3835, 3883), False, 'from numpy import exp, sqrt\n'), ((4899, 4909), 'numpy.sqrt', 'sqrt', (['varx'], {}), '(varx)\n', (4903, 4909), False, 'from numpy import exp, sqrt\n')]
|
from typing import Optional, List
import torch
import torchvision
import numpy as np
from ..basic_typing import Datasets
from ..train import SequenceArray
from ..train import SamplerRandom, SamplerSequential
import functools
import collections
import os
from ..transforms import Transform
from typing_extensions import Literal
def image_to_torch(i):
return torch.from_numpy(np.array(i).transpose((2, 0, 1))).unsqueeze(0)
def segmentation_to_torch(i):
return torch.from_numpy(np.array(i)).type(torch.int64).unsqueeze(0).unsqueeze(0)
def load_case(batch, dataset, transform):
case_ids = batch['case_id']
images = []
segmentations = []
for case_id in case_ids:
image, segmentation = dataset[case_id]
images.append(image_to_torch(image))
segmentations.append(segmentation_to_torch(segmentation))
data_batch = {
'case_id': case_ids,
'image': torch.cat(images),
'segmentation': torch.cat(segmentations)
}
if transform is not None:
data_batch = transform(data_batch)
return data_batch
def create_cityscapes_dataset(
batch_size: int = 32,
root: Optional[str] = None,
transform_train: Optional[List[Transform]] = None,
transform_valid: Optional[List[Transform]] = None,
nb_workers: int = 4,
target_type: Literal['semantic'] = 'semantic') -> Datasets:
"""
Load the cityscapes dataset. This requires to register on their website https://www.cityscapes-dataset.com/
and manually download the dataset.
The dataset is composed of 3 parts: gtCoarse, gtFine, leftImg8bit. Download each package and unzip in a
folder (e.g., `cityscapes`)
Args:
batch_size:
root: the folder containing the 3 unzipped cityscapes data `gtCoarse`, `gtFine`, `leftImg8bit`
transform_train: the transform to apply on the training batches
transform_valid: the transform to apply on the validation batches
nb_workers: the number of workers for each split allocated to the data loading and processing
target_type: the segmentation task
Returns:
a dict of splits. Each split is a :class:`trw.train.Sequence`
"""
if root is None:
# first, check if we have some environment variables configured
root = os.environ.get('TRW_DATA_ROOT')
if root is None:
# else default a standard folder
root = './data'
cityscapes_path = os.path.join(root, 'cityscapes')
train_dataset = torchvision.datasets.cityscapes.Cityscapes(cityscapes_path, mode='fine', split='train', target_type=target_type)
valid_dataset = torchvision.datasets.cityscapes.Cityscapes(cityscapes_path, mode='fine', split='val', target_type=target_type)
train_sampler = SamplerRandom(batch_size=batch_size)
train_sequence = SequenceArray({'case_id': np.arange(len(train_dataset))}, sampler=train_sampler)
train_sequence = train_sequence.map(
functools.partial(load_case, dataset=train_dataset, transform=transform_train), nb_workers=nb_workers)
valid_sampler = SamplerSequential(batch_size=batch_size)
valid_sequence = SequenceArray({'case_id': np.arange(len(valid_dataset))}, sampler=valid_sampler)
valid_sequence = valid_sequence.map(
functools.partial(load_case, dataset=valid_dataset, transform=transform_valid), nb_workers=nb_workers)
dataset = collections.OrderedDict([
('train', train_sequence),
('valid', valid_sequence)
])
return collections.OrderedDict([
('cityscapes', dataset)
])
|
[
"functools.partial",
"torch.cat",
"torchvision.datasets.cityscapes.Cityscapes",
"os.environ.get",
"numpy.array",
"collections.OrderedDict",
"os.path.join"
] |
[((2469, 2501), 'os.path.join', 'os.path.join', (['root', '"""cityscapes"""'], {}), "(root, 'cityscapes')\n", (2481, 2501), False, 'import os\n'), ((2522, 2638), 'torchvision.datasets.cityscapes.Cityscapes', 'torchvision.datasets.cityscapes.Cityscapes', (['cityscapes_path'], {'mode': '"""fine"""', 'split': '"""train"""', 'target_type': 'target_type'}), "(cityscapes_path, mode='fine',\n split='train', target_type=target_type)\n", (2564, 2638), False, 'import torchvision\n'), ((2655, 2769), 'torchvision.datasets.cityscapes.Cityscapes', 'torchvision.datasets.cityscapes.Cityscapes', (['cityscapes_path'], {'mode': '"""fine"""', 'split': '"""val"""', 'target_type': 'target_type'}), "(cityscapes_path, mode='fine',\n split='val', target_type=target_type)\n", (2697, 2769), False, 'import torchvision\n'), ((3409, 3488), 'collections.OrderedDict', 'collections.OrderedDict', (["[('train', train_sequence), ('valid', valid_sequence)]"], {}), "([('train', train_sequence), ('valid', valid_sequence)])\n", (3432, 3488), False, 'import collections\n'), ((3523, 3573), 'collections.OrderedDict', 'collections.OrderedDict', (["[('cityscapes', dataset)]"], {}), "([('cityscapes', dataset)])\n", (3546, 3573), False, 'import collections\n'), ((916, 933), 'torch.cat', 'torch.cat', (['images'], {}), '(images)\n', (925, 933), False, 'import torch\n'), ((959, 983), 'torch.cat', 'torch.cat', (['segmentations'], {}), '(segmentations)\n', (968, 983), False, 'import torch\n'), ((2327, 2358), 'os.environ.get', 'os.environ.get', (['"""TRW_DATA_ROOT"""'], {}), "('TRW_DATA_ROOT')\n", (2341, 2358), False, 'import os\n'), ((2975, 3053), 'functools.partial', 'functools.partial', (['load_case'], {'dataset': 'train_dataset', 'transform': 'transform_train'}), '(load_case, dataset=train_dataset, transform=transform_train)\n', (2992, 3053), False, 'import functools\n'), ((3291, 3369), 'functools.partial', 'functools.partial', (['load_case'], {'dataset': 'valid_dataset', 'transform': 'transform_valid'}), '(load_case, dataset=valid_dataset, transform=transform_valid)\n', (3308, 3369), False, 'import functools\n'), ((382, 393), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (390, 393), True, 'import numpy as np\n'), ((489, 500), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (497, 500), True, 'import numpy as np\n')]
|
"""
This file contains code that will kick off training and testing processes
"""
import os, sys
import argparse
import json
import numpy as np
from experiments.UNetExperiment import UNetExperiment
from data_prep.HippocampusDatasetLoader import LoadHippocampusData
from torch.utils.data import random_split
class Config:
"""
Holds configuration parameters
"""
def __init__(self):
self.name = "Basic_unet"
self.root_dir = r"data/"
self.n_epochs = 10
self.learning_rate = 0.0002
self.batch_size = 8
self.patch_size = 64
self.test_results_dir = "out/results"
self.model_name = "" # the command line provided model name to save network weights in
self.weights_name = "" # the command line provided weights file name to load network weights from
self.test = False
def set_model_name(self, m):
self.model_name = m
def set_weights_name(self, w):
self.weights_name = w
def set_test(self, t):
self.test = t
if __name__ == "__main__":
# Get configuration
# TASK: Fill in parameters of the Config class and specify directory where the data is stored and
# directory where results will go
c = Config()
parser = argparse.ArgumentParser()
parser.add_argument("--weights", "-w", help="file name for saved model weights", action="store")
parser.add_argument("--modelname", "-m", help="model weights filename used for saving this model", action="store")
parser.add_argument("--testonly", "-t", help="test only, no training", action="store_true")
args = parser.parse_args()
if args.weights:
print("Will load model weights from", args.weights)
c.set_weights_name(args.weights)
else:
print("No pretrained model weights given. Will train a new model.")
if args.modelname:
print("Will store model weights in", args.modelname)
c.set_model_name(args.modelname)
if args.testonly:
# need to also provide a weights filename if we're only testing
print("Testing mode.")
c.set_test(True)
if not args.weights:
print("Please also provide a weights filename through -w")
sys.exit()
# Load data
print("Loading data...")
# TASK: LoadHippocampusData is not complete. Go to the implementation and complete it.
data = LoadHippocampusData(c.root_dir + "TrainingSet/", y_shape = c.patch_size, z_shape = c.patch_size)
# Create test-train-val split
# In a real world scenario you would probably do multiple splits for
# multi-fold training to improve your model quality
data_len = len(data)
keys = range(data_len)
# Here, random permutation of keys array would be useful in case if we do something like
# a k-fold training and combining the results.
# TASK: create three keys in the dictionary: "train", "val" and "test". In each key, store
# the array with indices of training volumes to be used for training, validation
# and testing respectively.
train_proportion = 0.7
val_proportion = 0.2
test_proportion = 0.1
splits = [int(np.floor(train_proportion * data_len)),
int(np.floor(val_proportion * data_len)),
int(np.floor(test_proportion * data_len))]
train, val, test = random_split(keys, splits)
split = {"train": train,
"val": val,
"test": test}
# Set up and run experiment
# TASK: Class UNetExperiment has missing pieces. Go to the file and fill them in
exp = UNetExperiment(c, split, data)
# You could free up memory by deleting the dataset
# as it has been copied into loaders
del data
if not args.testonly:
# run training and validation
exp.run()
# prep and run testing
# TASK: Test method is not complete. Go to the method and complete it
results_json = exp.run_test()
results_json["config"] = vars(c)
with open(os.path.join(exp.out_dir, "results.json"), 'w') as out_file:
json.dump(results_json, out_file, indent=2, separators=(',', ': '))
|
[
"experiments.UNetExperiment.UNetExperiment",
"data_prep.HippocampusDatasetLoader.LoadHippocampusData",
"json.dump",
"argparse.ArgumentParser",
"numpy.floor",
"torch.utils.data.random_split",
"os.path.join",
"sys.exit"
] |
[((1293, 1318), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1316, 1318), False, 'import argparse\n'), ((2445, 2541), 'data_prep.HippocampusDatasetLoader.LoadHippocampusData', 'LoadHippocampusData', (["(c.root_dir + 'TrainingSet/')"], {'y_shape': 'c.patch_size', 'z_shape': 'c.patch_size'}), "(c.root_dir + 'TrainingSet/', y_shape=c.patch_size,\n z_shape=c.patch_size)\n", (2464, 2541), False, 'from data_prep.HippocampusDatasetLoader import LoadHippocampusData\n'), ((3404, 3430), 'torch.utils.data.random_split', 'random_split', (['keys', 'splits'], {}), '(keys, splits)\n', (3416, 3430), False, 'from torch.utils.data import random_split\n'), ((3655, 3685), 'experiments.UNetExperiment.UNetExperiment', 'UNetExperiment', (['c', 'split', 'data'], {}), '(c, split, data)\n', (3669, 3685), False, 'from experiments.UNetExperiment import UNetExperiment\n'), ((4138, 4205), 'json.dump', 'json.dump', (['results_json', 'out_file'], {'indent': '(2)', 'separators': "(',', ': ')"}), "(results_json, out_file, indent=2, separators=(',', ': '))\n", (4147, 4205), False, 'import json\n'), ((2280, 2290), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2288, 2290), False, 'import os, sys\n'), ((3226, 3263), 'numpy.floor', 'np.floor', (['(train_proportion * data_len)'], {}), '(train_proportion * data_len)\n', (3234, 3263), True, 'import numpy as np\n'), ((3285, 3320), 'numpy.floor', 'np.floor', (['(val_proportion * data_len)'], {}), '(val_proportion * data_len)\n', (3293, 3320), True, 'import numpy as np\n'), ((3342, 3378), 'numpy.floor', 'np.floor', (['(test_proportion * data_len)'], {}), '(test_proportion * data_len)\n', (3350, 3378), True, 'import numpy as np\n'), ((4069, 4110), 'os.path.join', 'os.path.join', (['exp.out_dir', '"""results.json"""'], {}), "(exp.out_dir, 'results.json')\n", (4081, 4110), False, 'import os, sys\n')]
|
import os
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import tensorflow.keras.backend as K
from models import load_model, load_adfmodel
import instances
# GENERAL PARAMETERS
MODE = 'joint_untargeted'
IMG_SHAPE = [28, 28]
# LOAD MODEL
model = load_model()
generator = instances.load_generator()
def get_data_sample(index):
return (
generator[index],
os.path.splitext(os.path.split(generator.filenames[index])[1])[0],
)
def store_single_result(mapping, name, fname, rate, d, subdir):
savedir = os.path.join('results', subdir, fname)
os.makedirs(savedir, exist_ok=True)
# print(mapping.shape)
mapping = np.reshape(mapping, IMG_SHAPE)
# for line in mapping:
# print(line)
# raise Exception
# for row in mapping:
# print(row)
# np.save(f'/home/Morgan/fw-rde/mnist/results/{name}.npy', mapping)
# print(np.max(mapping))
# print(np.min(mapping))
# mapping = mapping - np.min(mapping)
# mapping = mapping / np.max(mapping)
# for row in mapping:
# print(row)
plt.imsave(
os.path.join(
savedir,
f'{name}_rate-{rate}_d-{d}.png'
),
mapping.squeeze(),
cmap='Greys',
vmin=np.min(mapping),
vmax=np.max(mapping),
format='png',
)
def store_pert_img(x, s, p, name, fname, rate, d, subdir):
savedir = os.path.join('results', subdir, fname)
os.makedirs(savedir, exist_ok=True)
# print(mapping.shape)
x = np.reshape(x, IMG_SHAPE)
s = np.reshape(s, IMG_SHAPE)
p = np.reshape(p, IMG_SHAPE)
x = x + s*p
# for line in mapping:
# print(line)
# raise Exception
# np.save(f'/home/Morgan/fw-rde/mnist/results/{name}.npy', x)
plt.imsave(
os.path.join(
savedir,
f'{name}_rate-{rate}_d-{d}.png'
),
x.squeeze(),
cmap='Greys',
vmin=np.min(x),
vmax=np.max(x),
format='jpg',
)
def get_distortion(x, model=model, mode=MODE):
x_tensor = tf.constant(x, dtype=tf.float32)
s_flat = tf.placeholder(tf.float32, (np.prod(x_tensor.shape),))
s_tensor = tf.reshape(s_flat, x.shape)
p_flat = tf.placeholder(tf.float32, (np.prod(x_tensor.shape),))
p_tensor = tf.reshape(p_flat, x.shape)
pred = model.predict(x)
node = np.argpartition(pred[0, ...], -2)[-1]
# target = pred[0, node]
unprocessed = x + s_tensor * p_tensor
# network_input = (tf.tanh((unprocessed + 37.96046)/255 * 2 - 1) + 1) / 2 * 255 - 37
network_input = tf.clip_by_value(unprocessed, clip_value_min=np.min(x), clip_value_max=np.max(x))
out = model(network_input)
if mode == 'joint_untargeted':
loss = tf.squeeze(out[..., node])
gradient = K.gradients(loss, [s_flat, p_flat])
f_out = K.function([s_flat, p_flat], [loss])
f_gradient = K.function([s_flat, p_flat], [gradient])
# a = tf.random.uniform(shape=s_flat.shape)
# b = tf.random.uniform(shape=s_flat.shape)
#
# c = f_out([a, b])
# d = f_gradient([a, b])
return lambda s, p: f_out([s, p])[0], lambda s, p: f_gradient([s, p])[0][0], lambda s, p: f_gradient([s, p])[0][1], node, pred
def print_model_prediction(x, s, p):
print('\n------------------------\n')
print(np.max(x))
print(np.min(x))
print('\n------------------------\n')
print(np.max(s))
print(np.min(s))
print('\n------------------------\n')
print(np.max(p))
print(np.min(p))
print('\n------------------------\n')
s = np.reshape(s, x.shape)
p = np.reshape(p, x.shape)
pert_input = x + s * p
print(np.max(pert_input))
print(np.min(pert_input))
print('\n------------------------\n')
# for t in [x, pert_input]:
# print('\n\n\n\n')
# for row in t:
# print(row)
# raise(Exception)
# s = tf.reshape(s, x.shape)
# p = tf.reshape(p, x.shape)
# pert_input = x+s*p
pert_input = tf.convert_to_tensor(pert_input)
# pert_input = (tf.tanh((pert_input + 37.96046) / 255 * 2 - 1) + 1) / 2 * 255 - 37
pert_input = tf.clip_by_value(pert_input, clip_value_min=np.min(x), clip_value_max=np.max(x))
sess = tf.Session()
with sess.as_default():
pert_input = pert_input.eval()
print('\n------------------------\n')
print(pert_input.shape)
print(np.max(pert_input))
print(np.min(pert_input))
print('\n------------------------\n')
# pert_input[pert_input < -37.96046] = -37.96046
# pert_input[pert_input > 255-37.96046] = 255-37.96046
pred0 = model.predict(x, steps=1)
pred1 = model.predict(pert_input, steps=1)
print(f'orig pred: {pred0}')
print(f'pert pred: {pred1}')
# x, fname = get_data_sample(0)
#
# f, gs, gp, n, p = get_distortion(x)
#
# a = tf.random.uniform(shape=[28*28])
# b = tf.random.uniform(shape=[28*28])
#
# out = f(a,b)
#
#
# _=0
|
[
"instances.load_generator",
"os.makedirs",
"tensorflow.convert_to_tensor",
"tensorflow.reshape",
"tensorflow.Session",
"tensorflow.constant",
"tensorflow.keras.backend.function",
"models.load_model",
"tensorflow.keras.backend.gradients",
"numpy.argpartition",
"numpy.max",
"numpy.reshape",
"numpy.min",
"tensorflow.squeeze",
"os.path.split",
"os.path.join",
"numpy.prod"
] |
[((276, 288), 'models.load_model', 'load_model', ([], {}), '()\n', (286, 288), False, 'from models import load_model, load_adfmodel\n'), ((302, 328), 'instances.load_generator', 'instances.load_generator', ([], {}), '()\n', (326, 328), False, 'import instances\n'), ((559, 597), 'os.path.join', 'os.path.join', (['"""results"""', 'subdir', 'fname'], {}), "('results', subdir, fname)\n", (571, 597), False, 'import os\n'), ((602, 637), 'os.makedirs', 'os.makedirs', (['savedir'], {'exist_ok': '(True)'}), '(savedir, exist_ok=True)\n', (613, 637), False, 'import os\n'), ((679, 709), 'numpy.reshape', 'np.reshape', (['mapping', 'IMG_SHAPE'], {}), '(mapping, IMG_SHAPE)\n', (689, 709), True, 'import numpy as np\n'), ((1421, 1459), 'os.path.join', 'os.path.join', (['"""results"""', 'subdir', 'fname'], {}), "('results', subdir, fname)\n", (1433, 1459), False, 'import os\n'), ((1464, 1499), 'os.makedirs', 'os.makedirs', (['savedir'], {'exist_ok': '(True)'}), '(savedir, exist_ok=True)\n', (1475, 1499), False, 'import os\n'), ((1535, 1559), 'numpy.reshape', 'np.reshape', (['x', 'IMG_SHAPE'], {}), '(x, IMG_SHAPE)\n', (1545, 1559), True, 'import numpy as np\n'), ((1568, 1592), 'numpy.reshape', 'np.reshape', (['s', 'IMG_SHAPE'], {}), '(s, IMG_SHAPE)\n', (1578, 1592), True, 'import numpy as np\n'), ((1601, 1625), 'numpy.reshape', 'np.reshape', (['p', 'IMG_SHAPE'], {}), '(p, IMG_SHAPE)\n', (1611, 1625), True, 'import numpy as np\n'), ((2080, 2112), 'tensorflow.constant', 'tf.constant', (['x'], {'dtype': 'tf.float32'}), '(x, dtype=tf.float32)\n', (2091, 2112), True, 'import tensorflow as tf\n'), ((2196, 2223), 'tensorflow.reshape', 'tf.reshape', (['s_flat', 'x.shape'], {}), '(s_flat, x.shape)\n', (2206, 2223), True, 'import tensorflow as tf\n'), ((2308, 2335), 'tensorflow.reshape', 'tf.reshape', (['p_flat', 'x.shape'], {}), '(p_flat, x.shape)\n', (2318, 2335), True, 'import tensorflow as tf\n'), ((2801, 2836), 'tensorflow.keras.backend.gradients', 'K.gradients', (['loss', '[s_flat, p_flat]'], {}), '(loss, [s_flat, p_flat])\n', (2812, 2836), True, 'import tensorflow.keras.backend as K\n'), ((2849, 2885), 'tensorflow.keras.backend.function', 'K.function', (['[s_flat, p_flat]', '[loss]'], {}), '([s_flat, p_flat], [loss])\n', (2859, 2885), True, 'import tensorflow.keras.backend as K\n'), ((2903, 2943), 'tensorflow.keras.backend.function', 'K.function', (['[s_flat, p_flat]', '[gradient]'], {}), '([s_flat, p_flat], [gradient])\n', (2913, 2943), True, 'import tensorflow.keras.backend as K\n'), ((3573, 3595), 'numpy.reshape', 'np.reshape', (['s', 'x.shape'], {}), '(s, x.shape)\n', (3583, 3595), True, 'import numpy as np\n'), ((3604, 3626), 'numpy.reshape', 'np.reshape', (['p', 'x.shape'], {}), '(p, x.shape)\n', (3614, 3626), True, 'import numpy as np\n'), ((4001, 4033), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['pert_input'], {}), '(pert_input)\n', (4021, 4033), True, 'import tensorflow as tf\n'), ((4231, 4243), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4241, 4243), True, 'import tensorflow as tf\n'), ((1119, 1173), 'os.path.join', 'os.path.join', (['savedir', 'f"""{name}_rate-{rate}_d-{d}.png"""'], {}), "(savedir, f'{name}_rate-{rate}_d-{d}.png')\n", (1131, 1173), False, 'import os\n'), ((1806, 1860), 'os.path.join', 'os.path.join', (['savedir', 'f"""{name}_rate-{rate}_d-{d}.png"""'], {}), "(savedir, f'{name}_rate-{rate}_d-{d}.png')\n", (1818, 1860), False, 'import os\n'), ((2376, 2409), 'numpy.argpartition', 'np.argpartition', (['pred[0, ...]', '(-2)'], {}), '(pred[0, ...], -2)\n', (2391, 2409), True, 'import numpy as np\n'), ((2758, 2784), 'tensorflow.squeeze', 'tf.squeeze', (['out[..., node]'], {}), '(out[..., node])\n', (2768, 2784), True, 'import tensorflow as tf\n'), ((3323, 3332), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (3329, 3332), True, 'import numpy as np\n'), ((3344, 3353), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (3350, 3353), True, 'import numpy as np\n'), ((3407, 3416), 'numpy.max', 'np.max', (['s'], {}), '(s)\n', (3413, 3416), True, 'import numpy as np\n'), ((3428, 3437), 'numpy.min', 'np.min', (['s'], {}), '(s)\n', (3434, 3437), True, 'import numpy as np\n'), ((3491, 3500), 'numpy.max', 'np.max', (['p'], {}), '(p)\n', (3497, 3500), True, 'import numpy as np\n'), ((3512, 3521), 'numpy.min', 'np.min', (['p'], {}), '(p)\n', (3518, 3521), True, 'import numpy as np\n'), ((3666, 3684), 'numpy.max', 'np.max', (['pert_input'], {}), '(pert_input)\n', (3672, 3684), True, 'import numpy as np\n'), ((3696, 3714), 'numpy.min', 'np.min', (['pert_input'], {}), '(pert_input)\n', (3702, 3714), True, 'import numpy as np\n'), ((4392, 4410), 'numpy.max', 'np.max', (['pert_input'], {}), '(pert_input)\n', (4398, 4410), True, 'import numpy as np\n'), ((4422, 4440), 'numpy.min', 'np.min', (['pert_input'], {}), '(pert_input)\n', (4428, 4440), True, 'import numpy as np\n'), ((1271, 1286), 'numpy.min', 'np.min', (['mapping'], {}), '(mapping)\n', (1277, 1286), True, 'import numpy as np\n'), ((1301, 1316), 'numpy.max', 'np.max', (['mapping'], {}), '(mapping)\n', (1307, 1316), True, 'import numpy as np\n'), ((1952, 1961), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (1958, 1961), True, 'import numpy as np\n'), ((1976, 1985), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (1982, 1985), True, 'import numpy as np\n'), ((2154, 2177), 'numpy.prod', 'np.prod', (['x_tensor.shape'], {}), '(x_tensor.shape)\n', (2161, 2177), True, 'import numpy as np\n'), ((2266, 2289), 'numpy.prod', 'np.prod', (['x_tensor.shape'], {}), '(x_tensor.shape)\n', (2273, 2289), True, 'import numpy as np\n'), ((2640, 2649), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (2646, 2649), True, 'import numpy as np\n'), ((2666, 2675), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (2672, 2675), True, 'import numpy as np\n'), ((4182, 4191), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (4188, 4191), True, 'import numpy as np\n'), ((4208, 4217), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (4214, 4217), True, 'import numpy as np\n'), ((423, 464), 'os.path.split', 'os.path.split', (['generator.filenames[index]'], {}), '(generator.filenames[index])\n', (436, 464), False, 'import os\n')]
|
from abc import ABC, abstractmethod
import gym
import numpy as np
from minerl.herobraine.hero import AgentHandler
from minerl.herobraine.hero import KEYMAP
from minerl.herobraine.hero import spaces
from minerl.herobraine.hero.spaces import DiscreteRange
class CommandAction(AgentHandler):
"""
An action handler based on commands
# Todo: support blacklisting commands. (note this has to work with mergeing somehow)
"""
def __init__(self, command: str, space: gym.Space):
"""
Initializes the space of the handler with a gym.spaces.Dict
of all of the spaces for each individual command.
"""
self._command = command
super().__init__(space)
@property
def command(self):
return self._command
def to_string(self):
return self._command
def to_hero(self, x):
"""
Returns a command string for the multi command action.
:param x:
:return:
"""
cmd = ""
verb = self.command
if isinstance(x, np.ndarray):
flat = x.flatten().tolist()
flat = [str(y) for y in flat]
adjective = " ".join(flat)
elif isinstance(x, list):
adjective = " ".join([str(y) for y in x])
else:
adjective = str(x)
cmd += "{} {}".format(
verb, adjective)
return cmd
def __or__(self, other):
if not self.command == other.command:
raise ValueError("Command must be the same between {} and {}".format(self.command, other.command))
return self
class ItemListCommandAction(CommandAction):
"""
An action handler based on a list of items
The action space is determiend by the length of the list plus one
"""
def __init__(self, command: str, items: list):
"""
Initializes the space of the handler with a gym.spaces.Dict
of all of the spaces for each individual command.
"""
# TODO must check that the first element is 'none' and last elem is 'other'
self._command = command
self._items = items
self._univ_items = ['minecraft:' + item for item in items]
assert 'none' in self._items
self._default = 'none'
super().__init__(self._command, spaces.Enum(*self._items, default=self._default))
@property
def items(self):
return self._items
@property
def universal_items(self):
return self._univ_items
@property
def default(self):
return self._default
def to_hero(self, x):
"""
Returns a command string for the multi command action.
:param x:
:return:
"""
cmd = ""
verb = self._command
if isinstance(x, np.ndarray):
raise NotImplementedError
elif isinstance(x, list):
raise NotImplementedError
elif 0 < x < len(self._items):
adjective = self._items[x]
cmd += "{} {}".format(
verb, adjective)
else:
cmd += "{} NONE".format(
verb)
return cmd
def from_universal(self, x):
raise NotImplementedError()
def __or__(self, other):
"""
Merges two ItemListCommandActions into one by unioning their items.
Assert that the commands are the same.
"""
if not isinstance(other, self.__class__):
raise TypeError("other must be an instance of ItemListCommandAction")
if self._command != other._command:
raise ValueError("Command must be the same for merging")
new_items = list(set(self._items) | set(other._items))
return self.__class__(new_items)
def __eq__(self, other):
"""
Asserts equality betwen item list command actions.
"""
if not isinstance(other, ItemListCommandAction):
return False
if self._command != other._command:
return False
# Check that all items are in self._items
if not all(x in self._items for x in other._items):
return False
# Check that all items are in other._items
if not all(x in other._items for x in self._items):
return False
return True
class CraftItem(ItemListCommandAction):
"""
An action handler for crafting items
Note when used along side Craft Item Nearby, block lists must be disjoint or from_universal will fire multiple
times
"""
_command = "craft"
def to_string(self):
return "craft"
def __init__(self, items: list):
"""
Initializes the space of the handler to be one for each item in the list plus one for the
default no-craft action (command 0)
Items are minecraft resource ID's
"""
super().__init__(self._command, items)
def from_universal(self, obs):
if 'diff' in obs and 'crafted' in obs['diff'] and len(obs['diff']['crafted']) > 0:
try:
x = self._univ_items.index(obs['diff']['crafted'][0]['item'])
return obs['diff']['crafted'][0]['item'].split('minecraft:')[-1]
except ValueError:
return self._default
# return self._items.index('other')
else:
return self._default
class CraftItemNearby(CraftItem):
"""
An action handler for crafting items when agent is in view of a crafting table
Note when used along side Craft Item, item lists must be disjoint or from_universal will fire multiple times
"""
_command = "craftNearby"
def to_string(self):
return 'nearbyCraft'
class SmeltItem(CraftItem):
def from_universal(self, obs):
if 'diff' in obs and 'smelted' in obs['diff'] and len(obs['diff']['smelted']) > 0:
try:
x = self._univ_items.index(obs['diff']['smelted'][0]['item'])
return obs['diff']['smelted'][0]['item'].split('minecraft:')[-1]
except ValueError:
return self._default
# return self._items.index('other')
else:
return self._default
class SmeltItemNearby(SmeltItem):
"""
An action handler for crafting items when agent is in view of a crafting table
Note when used along side Craft Item, block lists must be disjoint or from_universal will fire multiple times
"""
_command = 'smeltNearby'
def to_string(self):
return 'nearbySmelt'
class PlaceBlock(ItemListCommandAction):
"""
An action handler for placing a specific block
"""
def to_string(self):
return 'place'
def __init__(self, blocks: list):
"""
Initializes the space of the handler to be one for each item in the list
Requires 0th item to be 'none' and last item to be 'other' coresponding to
no-op and non-listed item respectively
"""
self._items = blocks
self._command = 'place'
super().__init__(self._command, self._items)
self._prev_inv = None
# print(self._items)
# print(self._univ_items)
def from_universal(self, obs):
try:
for action in obs['custom_action']['actions'].keys():
try:
if int(action) == -99 and self._prev_inv is not None:
item_name = self._prev_inv[int(-10 + obs['hotbar'])]['name'].split("minecraft:")[-1]
if item_name not in self._items:
raise ValueError()
else:
return item_name
except ValueError:
return self._default
except TypeError:
print('Saw a type error in PlaceBlock')
raise TypeError
except KeyError:
return self._default
finally:
try:
self._prev_inv = obs['slots']['gui']['slots']
except KeyError:
self._prev_inv = None
return self._default
class EquipItem(ItemListCommandAction):
"""
An action handler for observing a list of equipped items
"""
def to_string(self):
return 'equip'
def __init__(self, items: list):
"""
Initializes the space of the handler to be one for each item in the list plus one for the
default no-craft action
"""
self._items = items
self._command = 'equip'
super().__init__(self._command, self._items)
self.previous = self._default
# print(self._items)
# print(self._univ_items)
def from_universal(self, obs):
try:
if obs['slots']['gui']['type'] == 'class net.minecraft.inventory.ContainerPlayer':
hotbar_index = int(obs['hotbar'])
item = self._univ_items.index(obs['slots']['gui']['slots'][-10 + hotbar_index]['name'])
if item != self.previous:
self.previous = item
return obs['slots']['gui']['slots'][-10 + hotbar_index]['name'].split('minecraft:')[-1]
except KeyError:
return self._default
except ValueError:
return self._default
# return self._items.index('other')
return self._default
def reset(self):
self.previous = self._default
class ContinuousMovementAction(CommandAction, ABC):
"""
Handles player control actions
"""
def add_to_mission_spec(self, mission_spec):
mission_spec.allowAllContinuousMovementCommands()
pass
class Camera(ContinuousMovementAction):
"""
Uses <delta_pitch, delta_yaw> vector in degrees to rotate the camera. pitch range [-180, 180], yaw range [-180, 180]
"""
def to_string(self):
return 'camera'
def __init__(self):
self._command = 'camera'
super().__init__(self.command, spaces.Box(low=-180, high=180, shape=[2], dtype=np.float32))
def from_universal(self, x):
if 'custom_action' in x and 'cameraYaw' in x['custom_action'] and 'cameraPitch' in x['custom_action']:
delta_pitch = x['custom_action']['cameraPitch']
delta_yaw = x['custom_action']['cameraYaw']
assert not np.isnan(np.sum(x['custom_action']['cameraYaw'])), "NAN in action!"
assert not np.isnan(np.sum(x['custom_action']['cameraPitch'])), "NAN in action!"
return np.array([-delta_pitch, -delta_yaw], dtype=np.float32)
else:
return np.array([0.0, 0.0], dtype=np.float32)
class KeyboardAction(ContinuousMovementAction):
"""
Handles keyboard actions.
"""
def to_string(self):
return self.command
def __init__(self, command, *keys):
if len(keys) == 2:
# Like move or strafe. Example: -1 for left, 1 for right
super().__init__(command, DiscreteRange(-1, 2))
else:
# Its a n-key action with discrete items.
# Eg hotbar actions
super().__init__(command, spaces.Discrete(len(keys) + 1))
self.keys = keys
def from_universal(self, x):
actions_mapped = list(x['custom_action']['actions'].keys())
# actions_mapped is just the raw key codes.
# for action in x['custom_action']['actions'].keys():
# try:
# actions_mapped += [KEYMAP[action]]
# except KeyError:
# pass
offset = self.space.begin if isinstance(self.space, DiscreteRange) else 0
default = 0
for i, key in enumerate(self.keys):
if key in actions_mapped:
if isinstance(self.space, DiscreteRange):
return i * 2 + offset
else:
return i + 1 + offset
# if "BUTTON1" in actions_mapped:
# print("BUTTON1")
# If no key waspressed.
return default
class SingleKeyboardAction(ContinuousMovementAction):
"""
Handles keyboard actions.
"""
def to_string(self):
return self.command
def __init__(self, command, key):
super().__init__(command, spaces.Discrete(2))
self.key = key
def from_universal(self, x):
if 'custom_action' in x and 'actions' in x['custom_action']:
if self.key in x['custom_action']['actions'].keys():
return 1
else:
return 0
def __or__(self, other):
"""
Combines two keyboard actions into one by unioning their keys.
"""
if not isinstance(other, KeyboardAction):
raise TypeError("other must be an instance of KeyboardAction")
new_keys = list(set(self.keys + other.keys))
return KeyboardAction(self._command, new_keys)
def __eq__(self, other):
"""
Tests for equality between two keyboard actions.
"""
if not isinstance(other, KeyboardAction):
return False
return self._command == other._command and self.keys == other.keys
|
[
"numpy.sum",
"minerl.herobraine.hero.spaces.DiscreteRange",
"numpy.array",
"minerl.herobraine.hero.spaces.Box",
"minerl.herobraine.hero.spaces.Enum",
"minerl.herobraine.hero.spaces.Discrete"
] |
[((2307, 2355), 'minerl.herobraine.hero.spaces.Enum', 'spaces.Enum', (['*self._items'], {'default': 'self._default'}), '(*self._items, default=self._default)\n', (2318, 2355), False, 'from minerl.herobraine.hero import spaces\n'), ((9995, 10054), 'minerl.herobraine.hero.spaces.Box', 'spaces.Box', ([], {'low': '(-180)', 'high': '(180)', 'shape': '[2]', 'dtype': 'np.float32'}), '(low=-180, high=180, shape=[2], dtype=np.float32)\n', (10005, 10054), False, 'from minerl.herobraine.hero import spaces\n'), ((10520, 10574), 'numpy.array', 'np.array', (['[-delta_pitch, -delta_yaw]'], {'dtype': 'np.float32'}), '([-delta_pitch, -delta_yaw], dtype=np.float32)\n', (10528, 10574), True, 'import numpy as np\n'), ((10608, 10646), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {'dtype': 'np.float32'}), '([0.0, 0.0], dtype=np.float32)\n', (10616, 10646), True, 'import numpy as np\n'), ((12241, 12259), 'minerl.herobraine.hero.spaces.Discrete', 'spaces.Discrete', (['(2)'], {}), '(2)\n', (12256, 12259), False, 'from minerl.herobraine.hero import spaces\n'), ((10973, 10993), 'minerl.herobraine.hero.spaces.DiscreteRange', 'DiscreteRange', (['(-1)', '(2)'], {}), '(-1, 2)\n', (10986, 10993), False, 'from minerl.herobraine.hero.spaces import DiscreteRange\n'), ((10349, 10388), 'numpy.sum', 'np.sum', (["x['custom_action']['cameraYaw']"], {}), "(x['custom_action']['cameraYaw'])\n", (10355, 10388), True, 'import numpy as np\n'), ((10440, 10481), 'numpy.sum', 'np.sum', (["x['custom_action']['cameraPitch']"], {}), "(x['custom_action']['cameraPitch'])\n", (10446, 10481), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import fmm3dpy as fmm
import numpy as np
#
# This is a sample code to demonstrate how to use
# the fmm libraries
#
# sample with one density, sources to sources,
# charge interactions, and potential only
#
n = 200000
nd = 1
sources = np.random.uniform(0,1,(3,n))
eps = 10**(-5)
charges = np.random.uniform(0,1,n)
out = fmm.lfmm3d(eps=eps,sources=sources,charges=charges,pg=1)
# sample with a vector of densities, sources to
# sources and targets, dipole interactions,
# potential and gradietns
nd = 3
nt = 1870
targ = np.random.uniform(0,1,(3,nt))
dipvecs = np.random.uniform(0,1,(nd,3,n))
out = fmm.lfmm3d(eps=eps,sources=sources,dipvec=dipvecs,\
targets=targ,nd=nd,pg=2,pgt=2)
|
[
"numpy.random.uniform",
"fmm3dpy.lfmm3d"
] |
[((262, 293), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(3, n)'], {}), '(0, 1, (3, n))\n', (279, 293), True, 'import numpy as np\n'), ((317, 343), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (334, 343), True, 'import numpy as np\n'), ((349, 408), 'fmm3dpy.lfmm3d', 'fmm.lfmm3d', ([], {'eps': 'eps', 'sources': 'sources', 'charges': 'charges', 'pg': '(1)'}), '(eps=eps, sources=sources, charges=charges, pg=1)\n', (359, 408), True, 'import fmm3dpy as fmm\n'), ((553, 585), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(3, nt)'], {}), '(0, 1, (3, nt))\n', (570, 585), True, 'import numpy as np\n'), ((593, 628), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(nd, 3, n)'], {}), '(0, 1, (nd, 3, n))\n', (610, 628), True, 'import numpy as np\n'), ((632, 722), 'fmm3dpy.lfmm3d', 'fmm.lfmm3d', ([], {'eps': 'eps', 'sources': 'sources', 'dipvec': 'dipvecs', 'targets': 'targ', 'nd': 'nd', 'pg': '(2)', 'pgt': '(2)'}), '(eps=eps, sources=sources, dipvec=dipvecs, targets=targ, nd=nd,\n pg=2, pgt=2)\n', (642, 722), True, 'import fmm3dpy as fmm\n')]
|
#!/usr/bin/env python
# Copyright (c) 2017, DIANA-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numbers
import numpy
import uproot.source.source
import uproot.source.cursor
from uproot.interp.interp import Interpretation
from uproot.interp.numerical import asdtype
from uproot.interp.numerical import _dimsprod
def sizes2offsets(sizes):
out = numpy.empty(len(sizes) + 1, dtype=sizes.dtype)
out[0] = 0
sizes.cumsum(out=out[1:])
return out
def _compactify(fromdata, fromstarts, fromstops, todata, tostarts, tostops):
for i in range(len(fromstarts)):
todata[tostarts[i]:tostops[i]] = fromdata[fromstarts[i]:fromstops[i]]
try:
import numba
except ImportError:
pass
else:
_compactify = numba.njit(_compactify)
class asjagged(Interpretation):
# makes __doc__ attribute mutable before Python 3.3
__metaclass__ = type.__new__(type, "type", (Interpretation.__metaclass__,), {})
def __init__(self, asdtype, skip_bytes=0):
self.asdtype = asdtype
self.skip_bytes = skip_bytes
def __repr__(self):
if self.skip_bytes == 0:
return "asjagged({0})".format(repr(self.asdtype))
else:
return "asjagged({0}, skip_bytes={1})".format(repr(self.asdtype), self.skip_bytes)
def to(self, todtype=None, todims=None, skip_bytes=None):
if skip_bytes is None:
skip_bytes = self.skip_bytes
return asjagged(self.asdtype.to(todtype, todims), skip_bytes)
@property
def identifier(self):
if self.skip_bytes == 0:
return "asjagged({0})".format(self.asdtype.identifier)
else:
return "asjagged({0}, {1})".format(self.asdtype.identifier, self.skip_bytes)
@property
def dtype(self):
subshape = self.asdtype.dtype.shape
sub = self.asdtype.dtype.subdtype
if sub is None:
tpe = self.asdtype.dtype
else:
tpe = sub[0]
return numpy.dtype((tpe, (0,) + subshape))
def empty(self):
return JaggedArray(self.asdtype.empty(), numpy.empty(0, dtype=numpy.int64), numpy.empty(0, dtype=numpy.int64))
def compatible(self, other):
return isinstance(other, asjagged) and self.asdtype.compatible(other.asdtype)
def numitems(self, numbytes, numentries):
return self.asdtype.numitems(numbytes - numentries*self.skip_bytes, numentries)
def source_numitems(self, source):
return self.asdtype.source_numitems(source.content)
def fromroot(self, data, offsets, local_entrystart, local_entrystop):
if local_entrystart == local_entrystop:
content = self.asdtype.fromroot(data, None, 0, 0)
else:
itemsize = self.asdtype.fromdtype.itemsize * _dimsprod(self.asdtype.fromdims)
if self.skip_bytes == 0:
numpy.floor_divide(offsets, itemsize, offsets)
starts = offsets[local_entrystart : local_entrystop ]
stops = offsets[local_entrystart + 1 : local_entrystop + 1]
content = self.asdtype.fromroot(data, None, starts[0], stops[-1])
else:
fromstarts = offsets[local_entrystart : local_entrystop ] + self.skip_bytes
fromstops = offsets[local_entrystart + 1 : local_entrystop + 1]
newoffsets = numpy.empty(1 + local_entrystop - local_entrystart, dtype=offsets.dtype)
newoffsets[0] = 0
numpy.cumsum(fromstops - fromstarts, out=newoffsets[1:])
newdata = numpy.empty(newoffsets[-1], dtype=data.dtype)
_compactify(data, fromstarts, fromstops, newdata, newoffsets[:-1], newoffsets[1:])
numpy.floor_divide(newoffsets, itemsize, newoffsets)
starts = newoffsets[:-1]
stops = newoffsets[1:]
content = self.asdtype.fromroot(newdata, None, 0, stops[-1])
return JaggedArray(content, starts, stops)
def destination(self, numitems, numentries):
content = self.asdtype.destination(numitems, numentries)
sizes = numpy.empty(numentries, dtype=numpy.int64)
return JaggedArray._Prep(content, sizes)
def fill(self, source, destination, itemstart, itemstop, entrystart, entrystop):
destination.sizes[entrystart:entrystop] = source.stops - source.starts
self.asdtype.fill(source.content, destination.content, itemstart, itemstop, entrystart, entrystop)
def clip(self, destination, itemstart, itemstop, entrystart, entrystop):
destination.content = self.asdtype.clip(destination.content, itemstart, itemstop, entrystart, entrystop)
destination.sizes = destination.sizes[entrystart:entrystop]
return destination
def finalize(self, destination, branch):
offsets = sizes2offsets(destination.sizes)
starts = offsets[:-1]
stops = offsets[1:]
content = self.asdtype.finalize(destination.content, branch)
leafcount = None
if len(branch.fLeaves) == 1:
leafcount = branch.fLeaves[0].fLeafCount
return JaggedArray(content, starts, stops, leafcount=leafcount)
def asstlvector(asdtype):
return asjagged(asdtype, skip_bytes=10)
def _jaggedarray_getitem(jaggedarray, index):
stopslen = len(jaggedarray.stops)
if index < 0:
index += stopslen
if 0 <= index < stopslen:
start = jaggedarray.starts[index]
stop = jaggedarray.stops[index]
return jaggedarray.content[start:stop]
else:
raise IndexError("index out of range for JaggedArray")
class JaggedArray(object):
# makes __doc__ attribute mutable before Python 3.3
__metaclass__ = type.__new__(type, "type", (type,), {})
class _Prep(object):
def __init__(self, content, sizes):
self.content = content
self.sizes = sizes
@staticmethod
def fromlists(lists):
offsets = numpy.empty(len(lists) + 1, dtype=numpy.int64)
offsets[0] = 0
stop = 0
anybool = False
anyint = False
anyfloat = False
anycomplex = False
for i, x in enumerate(lists):
offsets[i + 1] = stop = stop + len(x)
if isinstance(x, numpy.ndarray):
if issubclass(x.dtype.type, (numpy.bool, numpy.bool_)):
anybool = True
elif issubclass(x.dtype.type, numpy.integer):
anyint = True
elif issubclass(x.dtype.type, numpy.floating):
anyfloat = True
elif issubclass(x.dtype.type, numpy.complexfloating):
anycomplex = True
else:
if not anybool and not anyint and not anyfloat and not anycomplex and any(isinstance(y, bool) for y in x):
anybool = True
if not anyint and not anyfloat and not anycomplex and any(isinstance(y, int) for y in x):
anyint = True
if not anyfloat and not anycomplex and any(isinstance(y, float) for y in x):
anyfloat = True
if not anycomplex and any(isinstance(y, complex) for y in x):
anycomplex = True
if anycomplex:
dtype = numpy.dtype(numpy.complex)
elif anyfloat:
dtype = numpy.dtype(numpy.float64)
elif anyint:
dtype = numpy.dtype(numpy.int64)
elif anybool:
dtype = numpy.dtype(numpy.bool)
else:
raise TypeError("no numerical types found in lists")
starts = offsets[:-1]
stops = offsets[1:]
content = numpy.empty(offsets[-1], dtype=dtype)
for i, x in enumerate(lists):
content[starts[i]:stops[i]] = x
return JaggedArray(content, starts, stops)
def __init__(self, content, starts, stops, leafcount=None):
assert isinstance(content, numpy.ndarray)
assert isinstance(starts, numpy.ndarray) and issubclass(starts.dtype.type, numpy.integer)
assert isinstance(stops, numpy.ndarray) and issubclass(stops.dtype.type, numpy.integer)
assert len(stops.shape) == 1
assert starts.shape == stops.shape
self.content = content
self.starts = starts
self.stops = stops
self.leafcount = leafcount
def __getstate__(self):
state = self.__dict__.copy()
state["leafcount"] = None
return state
def __setstate__(self, state):
self.__dict__.update(state)
def __eq__(self, other):
return isinstance(other, JaggedArray) and numpy.array_equal(self.content, other.content) and self.aligned(other)
def __ne__(self, other):
return not self.__eq__(other)
@property
def offsets(self):
if self.starts.base is not None and self.stops.base is not None and self.starts.base is self.stops.base and \
self.starts.ctypes.data == self.starts.base.ctypes.data and \
self.stops.ctypes.data == self.stops.base.ctypes.data + self.starts.dtype.itemsize and \
len(self.starts) == len(self.starts.base) - 1 and \
len(self.stops) == len(self.starts.base) - 1:
return self.starts.base
elif numpy.array_equal(self.starts[1:], self.stops[:-1]):
return numpy.append(self.starts, self.stops[-1])
else:
raise ValueError("starts and stops are not compatible; cannot express as offsets")
def aligned(self, other):
if self.leafcount is not None and other.leafcount is not None and self.leafcount is other.leafcount:
return True
else:
return numpy.array_equal(self.starts, other.starts) and numpy.array_equal(self.stops, other.stops)
def __len__(self):
return len(self.stops)
def __getitem__(self, index):
if isinstance(index, numbers.Integral):
return _jaggedarray_getitem(self, index)
elif isinstance(index, slice):
if index.step is not None and index.step != 1:
raise NotImplementedError("cannot yet slice a JaggedArray with step != 1 (FIXME: this is possible, should be implemented)")
else:
return JaggedArray(self.content, self.starts[index], self.stops[index])
else:
raise TypeError("JaggedArray index must be an integer or a slice")
def __iter__(self):
content = self.content
starts = self.starts
stops = self.stops
for i in range(len(stops)):
yield content[starts[i]:stops[i]]
def __repr__(self, indent="", linewidth=None):
if linewidth is None:
linewidth = numpy.get_printoptions()["linewidth"]
dtypestr = repr(self.content.dtype).replace("(", "=").rstrip(")")
linewidth = linewidth - 12 - 2 - len(dtypestr)
return "jaggedarray({0})".format(self.__str__(indent=" " * 12, linewidth=linewidth))
def __str__(self, indent="", linewidth=None):
if linewidth is None:
linewidth = numpy.get_printoptions()["linewidth"]
def single(a):
if len(a) > 6:
return numpy.array_str(a[:3], max_line_width=numpy.inf).rstrip("]") + " ... " + numpy.array_str(a[-3:], max_line_width=numpy.inf).lstrip("[")
else:
return numpy.array_str(a, max_line_width=numpy.inf)
if len(self) > 10:
content = [single(self[i]) for i in range(3)] + ["..."] + [single(self[i]) for i in range(-3, 0)]
else:
content = [single(x) for x in self]
if sum(len(x) for x in content) + 2*(len(content) - 1) + 2 <= linewidth:
return "[" + ", ".join(content) + "]"
else:
return "[" + (",\n " + indent).join(content) + "]"
def tolist(self):
return [x.tolist() for x in self]
def __array__(self, dtype=None, copy=False, order="K", subok=False, ndmin=0):
if dtype is None:
dtype = self.content.dtype
elif not isinstance(dtype, numpy.dtype):
dtype = numpy.dtype(dtype)
if dtype == self.content.dtype and not copy and not subok and ndmin == 0:
return self.content
else:
return numpy.array(self.content, dtype=dtype, copy=copy, order=order, subok=subok, ndmin=ndmin)
class asvar(asjagged):
def __init__(self, genclass, skip_bytes=0, args=()):
self.genclass = genclass
super(asvar, self).__init__(asdtype(numpy.dtype(numpy.uint8)), skip_bytes=skip_bytes)
self.args = args
def __repr__(self):
return self.identifier
@property
def identifier(self):
args = []
if self.skip_bytes != 0:
args.append(", skip_bytes={0}".format(self.skip_bytes))
return "asvar({0}{1})".format(self.genclass.__name__, "".join(args))
@property
def dtype(self):
return self.genclass._dtype(self.args)
def empty(self):
return self.genclass(*((super(asvar, self).empty(),) + self.args))
def compatible(self, other):
return isinstance(other, asvar) and self.genclass is other.genclass and super(asvar, self).compatible(other) and self.args == other.args
def source_numitems(self, source):
return super(asvar, self).source_numitems(source.jaggedarray)
def fromroot(self, data, offsets, local_entrystart, local_entrystop):
return self.genclass(*((super(asvar, self).fromroot(data, offsets, local_entrystart, local_entrystop),) + self.args))
def fill(self, source, destination, itemstart, itemstop, entrystart, entrystop):
return super(asvar, self).fill(source.jaggedarray, destination, itemstart, itemstop, entrystart, entrystop)
def finalize(self, destination, branch):
return self.genclass(*((super(asvar, self).finalize(destination, branch),) + self.args))
class VariableLength(object):
def __init__(self, *args):
self.jaggedarray = args[0]
assert self.jaggedarray.content.dtype.itemsize == 1
assert len(self.jaggedarray.content.shape) == 1
self.args = args[1:]
def __len__(self):
return len(self.jaggedarray)
def __getitem__(self, index):
if isinstance(index, numbers.Integral):
return self.interpret(self.jaggedarray[index])
elif isinstance(index, slice):
return self.__class__(*((self.jaggedarray[index],) + self.args))
else:
raise TypeError("{0} index must be an integer or a slice".format(self.__class__.__name__))
def __iter__(self):
for x in self.jaggedarray:
yield self.interpret(x)
def __str__(self):
if len(self) > 6:
return "[{0} ... {1}]".format(" ".join(repr(self[i]) for i in range(3)), " ".join(repr(self[i]) for i in range(-3, 0)))
else:
return "[{0}]".format(" ".join(repr(x) for x in self))
def tolist(self):
return list(self)
@staticmethod
def interpret(item):
raise NotImplementedError
class asobjs(asvar):
def __init__(self, cls, context=None):
super(asobjs, self).__init__(JaggedObjects, skip_bytes=0, args=(cls, context))
self.cls = cls
self.context = context
@property
def identifier(self):
return "asobjs({0})".format(self.cls.__name__)
@property
def dtype(self):
return numpy.dtype((object, (0,)))
def asjaggedobjects(cls, context=None):
return asobjs(cls, context=context)
class JaggedObjects(VariableLength):
indexdtype = numpy.dtype(">i4")
def __init__(self, jaggedarray, cls, context):
super(JaggedObjects, self).__init__(jaggedarray, cls)
self._class = cls
self._context = context
def interpret(self, item):
size, = item[6:10].view(JaggedObjects.indexdtype)
source = uproot.source.source.Source(item)
cursor = uproot.source.cursor.Cursor(10)
out = [None] * size
for i in range(size):
out[i] = self._class.read(source, cursor, self._context, None)
return out
def __str__(self):
if len(self) > 6:
return "[{0}\n ...\n{1}]".format(",\n".join(("" if i == 0 else " ") + repr(self[i]) for i in range(3)), ",\n".join(" " + repr(self[i]) for i in range(-3, 0)))
else:
return "[{0}]".format(", ".join(repr(x) for x in self))
def __repr__(self):
return "<JaggedObjects of {0} at {1:012x}>".format(self._class.__name__, id(self))
def __getitem__(self, index):
if isinstance(index, numbers.Integral):
return self.interpret(self.jaggedarray[index])
elif isinstance(index, slice):
return JaggedObjects(self.jaggedarray[index], self._class, self._context)
else:
raise TypeError("{0} index must be an integer or a slice".format(self.__class__.__name__))
def asstlvectorvector(fromdtype):
return asvar(JaggedJaggedArray, skip_bytes=6, args=(numpy.dtype(fromdtype),))
class JaggedJaggedArray(VariableLength):
def __init__(self, jaggedarray, fromdtype):
super(JaggedJaggedArray, self).__init__(jaggedarray, fromdtype)
self.fromdtype = fromdtype
@classmethod
def _dtype(cls, args):
dtype, = args
return numpy.dtype((dtype, (0, 0)))
indexdtype = numpy.dtype(">i4")
def interpret(self, item):
i = 0
size, = item[i : i + 4].view(JaggedJaggedArray.indexdtype)
i += 4
out = []
while i < len(item):
size, = item[i : i + 4].view(JaggedJaggedArray.indexdtype)
i += 4
numbytes = size * self.fromdtype.itemsize
out.append(item[i : i + numbytes].view(self.fromdtype))
i += numbytes
return out
def __str__(self):
if len(self) > 6:
return "[{0} ... {1}]".format(", ".join(repr([y.tolist() for y in self[i]]) for i in range(3)), ", ".join(repr([y.tolist() for y in self[i]]) for i in range(-3, 0)))
else:
return "[{0}]".format(", ".join(repr([y.tolist() for y in x]) for x in self))
def __repr__(self):
return "jaggedjaggedarray({0})".format(str(self))
def tolist(self):
return [[y.tolist() for y in x] for x in self]
|
[
"numpy.array_str",
"numpy.empty",
"uproot.interp.numerical._dimsprod",
"numba.njit",
"numpy.dtype",
"numpy.floor_divide",
"numpy.cumsum",
"numpy.append",
"numpy.array",
"numpy.array_equal",
"numpy.get_printoptions"
] |
[((2202, 2225), 'numba.njit', 'numba.njit', (['_compactify'], {}), '(_compactify)\n', (2212, 2225), False, 'import numba\n'), ((17065, 17083), 'numpy.dtype', 'numpy.dtype', (['""">i4"""'], {}), "('>i4')\n", (17076, 17083), False, 'import numpy\n'), ((18846, 18864), 'numpy.dtype', 'numpy.dtype', (['""">i4"""'], {}), "('>i4')\n", (18857, 18864), False, 'import numpy\n'), ((3430, 3465), 'numpy.dtype', 'numpy.dtype', (['(tpe, (0,) + subshape)'], {}), '((tpe, (0,) + subshape))\n', (3441, 3465), False, 'import numpy\n'), ((5577, 5619), 'numpy.empty', 'numpy.empty', (['numentries'], {'dtype': 'numpy.int64'}), '(numentries, dtype=numpy.int64)\n', (5588, 5619), False, 'import numpy\n'), ((9156, 9193), 'numpy.empty', 'numpy.empty', (['offsets[-1]'], {'dtype': 'dtype'}), '(offsets[-1], dtype=dtype)\n', (9167, 9193), False, 'import numpy\n'), ((16901, 16928), 'numpy.dtype', 'numpy.dtype', (['(object, (0,))'], {}), '((object, (0,)))\n', (16912, 16928), False, 'import numpy\n'), ((18799, 18827), 'numpy.dtype', 'numpy.dtype', (['(dtype, (0, 0))'], {}), '((dtype, (0, 0)))\n', (18810, 18827), False, 'import numpy\n'), ((3537, 3570), 'numpy.empty', 'numpy.empty', (['(0)'], {'dtype': 'numpy.int64'}), '(0, dtype=numpy.int64)\n', (3548, 3570), False, 'import numpy\n'), ((3572, 3605), 'numpy.empty', 'numpy.empty', (['(0)'], {'dtype': 'numpy.int64'}), '(0, dtype=numpy.int64)\n', (3583, 3605), False, 'import numpy\n'), ((8770, 8796), 'numpy.dtype', 'numpy.dtype', (['numpy.complex'], {}), '(numpy.complex)\n', (8781, 8796), False, 'import numpy\n'), ((10113, 10159), 'numpy.array_equal', 'numpy.array_equal', (['self.content', 'other.content'], {}), '(self.content, other.content)\n', (10130, 10159), False, 'import numpy\n'), ((10750, 10801), 'numpy.array_equal', 'numpy.array_equal', (['self.starts[1:]', 'self.stops[:-1]'], {}), '(self.starts[1:], self.stops[:-1])\n', (10767, 10801), False, 'import numpy\n'), ((13748, 13840), 'numpy.array', 'numpy.array', (['self.content'], {'dtype': 'dtype', 'copy': 'copy', 'order': 'order', 'subok': 'subok', 'ndmin': 'ndmin'}), '(self.content, dtype=dtype, copy=copy, order=order, subok=subok,\n ndmin=ndmin)\n', (13759, 13840), False, 'import numpy\n'), ((4218, 4250), 'uproot.interp.numerical._dimsprod', '_dimsprod', (['self.asdtype.fromdims'], {}), '(self.asdtype.fromdims)\n', (4227, 4250), False, 'from uproot.interp.numerical import _dimsprod\n'), ((4304, 4350), 'numpy.floor_divide', 'numpy.floor_divide', (['offsets', 'itemsize', 'offsets'], {}), '(offsets, itemsize, offsets)\n', (4322, 4350), False, 'import numpy\n'), ((4814, 4886), 'numpy.empty', 'numpy.empty', (['(1 + local_entrystop - local_entrystart)'], {'dtype': 'offsets.dtype'}), '(1 + local_entrystop - local_entrystart, dtype=offsets.dtype)\n', (4825, 4886), False, 'import numpy\n'), ((4937, 4993), 'numpy.cumsum', 'numpy.cumsum', (['(fromstops - fromstarts)'], {'out': 'newoffsets[1:]'}), '(fromstops - fromstarts, out=newoffsets[1:])\n', (4949, 4993), False, 'import numpy\n'), ((5020, 5065), 'numpy.empty', 'numpy.empty', (['newoffsets[-1]'], {'dtype': 'data.dtype'}), '(newoffsets[-1], dtype=data.dtype)\n', (5031, 5065), False, 'import numpy\n'), ((5181, 5233), 'numpy.floor_divide', 'numpy.floor_divide', (['newoffsets', 'itemsize', 'newoffsets'], {}), '(newoffsets, itemsize, newoffsets)\n', (5199, 5233), False, 'import numpy\n'), ((8840, 8866), 'numpy.dtype', 'numpy.dtype', (['numpy.float64'], {}), '(numpy.float64)\n', (8851, 8866), False, 'import numpy\n'), ((10822, 10863), 'numpy.append', 'numpy.append', (['self.starts', 'self.stops[-1]'], {}), '(self.starts, self.stops[-1])\n', (10834, 10863), False, 'import numpy\n'), ((11170, 11214), 'numpy.array_equal', 'numpy.array_equal', (['self.starts', 'other.starts'], {}), '(self.starts, other.starts)\n', (11187, 11214), False, 'import numpy\n'), ((11219, 11261), 'numpy.array_equal', 'numpy.array_equal', (['self.stops', 'other.stops'], {}), '(self.stops, other.stops)\n', (11236, 11261), False, 'import numpy\n'), ((12192, 12216), 'numpy.get_printoptions', 'numpy.get_printoptions', ([], {}), '()\n', (12214, 12216), False, 'import numpy\n'), ((12557, 12581), 'numpy.get_printoptions', 'numpy.get_printoptions', ([], {}), '()\n', (12579, 12581), False, 'import numpy\n'), ((12845, 12889), 'numpy.array_str', 'numpy.array_str', (['a'], {'max_line_width': 'numpy.inf'}), '(a, max_line_width=numpy.inf)\n', (12860, 12889), False, 'import numpy\n'), ((13581, 13599), 'numpy.dtype', 'numpy.dtype', (['dtype'], {}), '(dtype)\n', (13592, 13599), False, 'import numpy\n'), ((13995, 14019), 'numpy.dtype', 'numpy.dtype', (['numpy.uint8'], {}), '(numpy.uint8)\n', (14006, 14019), False, 'import numpy\n'), ((18494, 18516), 'numpy.dtype', 'numpy.dtype', (['fromdtype'], {}), '(fromdtype)\n', (18505, 18516), False, 'import numpy\n'), ((8908, 8932), 'numpy.dtype', 'numpy.dtype', (['numpy.int64'], {}), '(numpy.int64)\n', (8919, 8932), False, 'import numpy\n'), ((8975, 8998), 'numpy.dtype', 'numpy.dtype', (['numpy.bool'], {}), '(numpy.bool)\n', (8986, 8998), False, 'import numpy\n'), ((12742, 12791), 'numpy.array_str', 'numpy.array_str', (['a[-3:]'], {'max_line_width': 'numpy.inf'}), '(a[-3:], max_line_width=numpy.inf)\n', (12757, 12791), False, 'import numpy\n'), ((12669, 12717), 'numpy.array_str', 'numpy.array_str', (['a[:3]'], {'max_line_width': 'numpy.inf'}), '(a[:3], max_line_width=numpy.inf)\n', (12684, 12717), False, 'import numpy\n')]
|
import numpy as np
import torch
import torch.nn.functional as F
import dataset_creator as DC
from torch import nn
from torch import optim
# import keras
def createNN(_inputSize):
input_size = _inputSize
hidden_sizes = [15,10] # 12 nodes in first hidden layer
output_size = 29 # Number of possible outputs
model = nn.Sequential(nn.Linear(input_size, hidden_sizes[0]),
nn.ReLU(),
#nn.Dropout(0.2),
#nn.Linear(hidden_sizes[0], hidden_sizes[1]),
#nn.ReLU(),
#nn.Dropout(0.3),
nn.Linear(hidden_sizes[0], output_size))
return model
def convert2tensor(x):
x = torch.FloatTensor(x)
return x
def convert2long(x):
x = torch.LongTensor(x)
return x
def switchLoader(e,it1,it2,it3,it4,it5):
switcher={
0:it1,
1:it2,
2:it3,
3:it4,
4:it5
}
return switcher.get(e,"Invalid Iterator")
def TrainNN(model,t1,t2,t3,t4,t5):
criterion = nn.CrossEntropyLoss()
#criterion = nn.CTCLoss()
#optimizer = optim.SGD(model.parameters(), lr=0.01)
optimizer = optim.Adam(model.parameters(), lr=1e-5)
epochs = 5
print_every = 1000
steps = 0
correct_train = 0
for e in range(epochs):
running_loss = 0
loaderForData = switchLoader(e,t1,t2,t3,t4,t5)
for images, labels in iter(loaderForData):
steps += 1
images = convert2tensor(images)
actual_label = labels
labels = [labels,]
labels = convert2long(labels)
labels = torch.LongTensor(labels)
optimizer.zero_grad() # Clear the gradients as gradients are accumulated
# Forward and backward passes
output = model.forward(images)
output = F.softmax(output, dim=0)
output = output.unsqueeze(dim=0)
loss = criterion(output, labels) # Calculate the loss
loss.backward() # backpropagate to get values of the new weights
optimizer.step() # Take a step to update the newly calculated weights
_, predicted = torch.max(output.data, 1)
correct_train += predicted.eq(labels.data).sum().item()
running_loss += loss.item()
if steps % print_every == 0:
print(predicted)
print(labels.data)
print("Epoch: {}/{}... ".format(e+1, epochs),
"Loss: {:.4f}".format(running_loss/print_every))
print("Ended Epoch.",str(e+1))
#Saving the model after training:
train_accuracy = 100 * correct_train / 5000
print("Train Accuracy on 1000 Elements: {}%".format(train_accuracy))
PATH = 'trained_model.pth'
torch.save(model.state_dict(), PATH)
def TestNN(model,testloader):
images = torch.FloatTensor(testloader[:17])
logits = model.forward(images)
ps = F.softmax(logits, dim=0)
ps = ps.data.numpy().squeeze()
prediction = np.argmax(ps)
print(ps)
D = DC.returnToArabicDictionary()
return D[prediction]
# def PrepareLabels():
def load_checkpoint(filepath):
model = torch.load('trained_model.pth')
return model
|
[
"dataset_creator.returnToArabicDictionary",
"torch.nn.ReLU",
"numpy.argmax",
"torch.LongTensor",
"torch.load",
"torch.nn.CrossEntropyLoss",
"torch.FloatTensor",
"torch.nn.functional.softmax",
"torch.max",
"torch.nn.Linear"
] |
[((737, 757), 'torch.FloatTensor', 'torch.FloatTensor', (['x'], {}), '(x)\n', (754, 757), False, 'import torch\n'), ((801, 820), 'torch.LongTensor', 'torch.LongTensor', (['x'], {}), '(x)\n', (817, 820), False, 'import torch\n'), ((1078, 1099), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1097, 1099), False, 'from torch import nn\n'), ((3005, 3039), 'torch.FloatTensor', 'torch.FloatTensor', (['testloader[:17]'], {}), '(testloader[:17])\n', (3022, 3039), False, 'import torch\n'), ((3085, 3109), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {'dim': '(0)'}), '(logits, dim=0)\n', (3094, 3109), True, 'import torch.nn.functional as F\n'), ((3162, 3175), 'numpy.argmax', 'np.argmax', (['ps'], {}), '(ps)\n', (3171, 3175), True, 'import numpy as np\n'), ((3198, 3227), 'dataset_creator.returnToArabicDictionary', 'DC.returnToArabicDictionary', ([], {}), '()\n', (3225, 3227), True, 'import dataset_creator as DC\n'), ((3321, 3352), 'torch.load', 'torch.load', (['"""trained_model.pth"""'], {}), "('trained_model.pth')\n", (3331, 3352), False, 'import torch\n'), ((346, 384), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'hidden_sizes[0]'], {}), '(input_size, hidden_sizes[0])\n', (355, 384), False, 'from torch import nn\n'), ((412, 421), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (419, 421), False, 'from torch import nn\n'), ((647, 686), 'torch.nn.Linear', 'nn.Linear', (['hidden_sizes[0]', 'output_size'], {}), '(hidden_sizes[0], output_size)\n', (656, 686), False, 'from torch import nn\n'), ((1706, 1730), 'torch.LongTensor', 'torch.LongTensor', (['labels'], {}), '(labels)\n', (1722, 1730), False, 'import torch\n'), ((1944, 1968), 'torch.nn.functional.softmax', 'F.softmax', (['output'], {'dim': '(0)'}), '(output, dim=0)\n', (1953, 1968), True, 'import torch.nn.functional as F\n'), ((2279, 2304), 'torch.max', 'torch.max', (['output.data', '(1)'], {}), '(output.data, 1)\n', (2288, 2304), False, 'import torch\n')]
|
# coding: utf-8
# In[1]:
import nengo
import nengo_spa as spa
import numpy as np
# In[2]:
from matplotlib import pyplot as plt
# In[3]:
#create semantic pointers
words = [ 'CAT', 'BLUE', 'RED']
colors = ['RED', 'BLUE']
fingers = ['INDEX', 'MIDDLE']
D = 16 #we reduced it from 32 cause of capacity of our computers
vocab = spa.Vocabulary(D)
vocab.populate(';'.join(words))
vocab.populate('COLOR; WORD')
vocab.populate(';'.join(fingers))
stimuli = []
for i in range(10):
w = np.random.choice(colors)
c = np.random.choice(colors)
stimuli.append((w,c))
# # No recurrent connections
# In[4]:
model = spa.Network()
with model:
t_stim = 0.5
t_isi = 0.5
def word_func(t):
index = int (t / (t_stim + t_isi))
t = t % (t_stim + t_isi)
if t < t_isi:
return '0'
else:
return stimuli[index%len(stimuli)][0]
def color_func(t):
index = int (t / (t_stim + t_isi))
t = t % (t_stim + t_isi)
if t < t_isi:
return '0'
else:
return stimuli[index%len(stimuli)][1]
stim_w = spa.Transcode(word_func, output_vocab=vocab)
#create node for pre processing color to mimic delay
pre_stim_c = spa.Transcode(color_func, output_vocab=vocab)
stim_c = spa.State(vocab)
#reduced amount of neurons to increase volatility of attention
attention = spa.State(vocab, neurons_per_dimension=10)
spa.sym.WORD * 0.45 + spa.sym.COLOR * 0.55 >> attention
wm = spa.State(vocab)
nengo.Connection(pre_stim_c.output, stim_c.input, synapse=0.3)
#added gain for action selection to be triggered
(spa.sym.COLOR*stim_c+spa.sym.WORD*stim_w)*~attention*2 >> wm
finger = spa.State(vocab)
with spa.ActionSelection():
spa.ifmax( spa.dot(wm, spa.sym.BLUE),
spa.sym.INDEX >> finger)
spa.ifmax(spa.dot(wm, spa.sym.RED),
spa.sym.MIDDLE >> finger)
spa.ifmax(0.5,
spa.semantic_pointer.Zero(D) >> finger)
# In[5]:
with model:
p_input_word = nengo.Probe(stim_w.output)
p_input_color = nengo.Probe(pre_stim_c.output)
p_wm = nengo.Probe(wm.output)
p_finger = nengo.Probe(finger.output)
# In[6]:
with nengo.Simulator(model) as sim:
sim.run(5)
# In[7]:
figuge, axs = plt.subplots(ncols=1, nrows=4, figsize=(10, 10))
axs[0].plot(sim.trange(), spa.similarity(sim.data[p_input_word], vocab))
axs[0].legend(vocab.keys(), loc='right')
axs[1].plot(sim.trange(), spa.similarity(sim.data[p_input_color], vocab))
axs[1].legend(vocab.keys(), loc='right')
axs[2].plot(sim.trange(), spa.similarity(sim.data[p_wm], vocab))
axs[2].legend(vocab.keys(), loc='right')
axs[3].plot(sim.trange(), spa.similarity(sim.data[p_finger], vocab))
axs[3].legend(vocab.keys(), loc='right')
# The delay in processing 'color' vs 'word' was successful. However the model without recurrent wm always responds incorrectly (to 'word'), as it responds to the first input to wm. Thus we decided to add recurrent feedback to the wm nodes, to achive accumulation of evidence.
# # Yes recurrent connections
# In[4]:
model_rec = spa.Network()
with model_rec:
#we changed durations, to avoid intertrial effects (wm overlap)
t_stim = 0.3
t_isi = 0.7
def word_func(t):
index = int (t / (t_stim + t_isi))
t = t % (t_stim + t_isi)
if t < t_isi:
return '0'
else:
return stimuli[index%len(stimuli)][0]
def color_func(t):
#instead of achieving delay via additional node, for better control we present 'color' later than 'word'
t -= 0.1
index = int (t / (t_stim + t_isi))
t = t % (t_stim + t_isi)
if t < t_isi:
return '0'
else:
return stimuli[index%len(stimuli)][1]
stim_w = spa.Transcode(word_func, output_vocab=vocab)
stim_c = spa.Transcode(color_func, output_vocab=vocab)
rec_weight_input = 1
rec_weight_feedback = 0.5
wm_w = spa.State(vocab, feedback=rec_weight_feedback)
wm_c = spa.State(vocab, feedback=rec_weight_feedback)
stim_w * rec_weight_input >> wm_w
stim_c * rec_weight_input >> wm_c
attention = spa.State(vocab, neurons_per_dimension=10)
#we reduced attentional difference to give higher chance to'word'
spa.sym.WORD * 0.48 + spa.sym.COLOR * 0.52 >> attention
wm = spa.State(vocab, feedback=rec_weight_feedback)
(spa.sym.COLOR * wm_c + spa.sym.WORD * wm_w) * ~attention * rec_weight_input * 2 >> wm
finger = spa.State(vocab)
with spa.ActionSelection():
spa.ifmax( spa.dot(wm, spa.sym.BLUE),
spa.sym.INDEX >> finger)
spa.ifmax(spa.dot(wm, spa.sym.RED),
spa.sym.MIDDLE >> finger)
spa.ifmax(0.5,
spa.semantic_pointer.Zero(D) >> finger)
# In[5]:
with model_rec:
p_input_word = nengo.Probe(stim_w.output)
p_input_color = nengo.Probe(stim_c.output)
p_wm_word = nengo.Probe(wm_w.output)
p_wm_color = nengo.Probe(wm_c.output)
p_wm = nengo.Probe(wm.output)
p_finger = nengo.Probe(finger.output)
# In[11]:
stimuli = []
for i in range(10):
w = np.random.choice(colors)
c = np.random.choice(colors)
stimuli.append((w,c))
# In[12]:
with nengo.Simulator(model_rec) as sim_rec:
sim_rec.run(10)
# In[13]:
figuge, axs = plt.subplots(ncols=1, nrows=6, figsize=(10, 10))
axs[0].plot(sim_rec.trange(), spa.similarity(sim_rec.data[p_input_word], vocab))
axs[0].legend(vocab.keys(), loc='right')
axs[1].plot(sim_rec.trange(), spa.similarity(sim_rec.data[p_input_color], vocab))
axs[1].legend(vocab.keys(), loc='right')
axs[2].plot(sim_rec.trange(), spa.similarity(sim_rec.data[p_wm_word], vocab))
axs[2].legend(vocab.keys(), loc='right')
axs[3].plot(sim_rec.trange(), spa.similarity(sim_rec.data[p_wm_color], vocab))
axs[3].legend(vocab.keys(), loc='right')
axs[4].plot(sim_rec.trange(), spa.similarity(sim_rec.data[p_wm], vocab))
axs[4].legend(vocab.keys(), loc='right')
axs[5].plot(sim_rec.trange(), spa.similarity(sim_rec.data[p_finger], vocab))
axs[5].legend(vocab.keys(), loc='right')
# This is the closest result that shows mistakes (at least we can interpret it that way): 4,5,7 & 8 timepoints where both fingers are selected (wrong finger is the first one)
|
[
"nengo_spa.Vocabulary",
"nengo_spa.State",
"nengo_spa.Transcode",
"nengo.Probe",
"nengo_spa.Network",
"nengo.Simulator",
"nengo_spa.semantic_pointer.Zero",
"nengo_spa.similarity",
"nengo_spa.ActionSelection",
"numpy.random.choice",
"nengo_spa.dot",
"nengo.Connection",
"matplotlib.pyplot.subplots"
] |
[((335, 352), 'nengo_spa.Vocabulary', 'spa.Vocabulary', (['D'], {}), '(D)\n', (349, 352), True, 'import nengo_spa as spa\n'), ((626, 639), 'nengo_spa.Network', 'spa.Network', ([], {}), '()\n', (637, 639), True, 'import nengo_spa as spa\n'), ((2326, 2374), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(1)', 'nrows': '(4)', 'figsize': '(10, 10)'}), '(ncols=1, nrows=4, figsize=(10, 10))\n', (2338, 2374), True, 'from matplotlib import pyplot as plt\n'), ((3157, 3170), 'nengo_spa.Network', 'spa.Network', ([], {}), '()\n', (3168, 3170), True, 'import nengo_spa as spa\n'), ((5410, 5458), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(1)', 'nrows': '(6)', 'figsize': '(10, 10)'}), '(ncols=1, nrows=6, figsize=(10, 10))\n', (5422, 5458), True, 'from matplotlib import pyplot as plt\n'), ((491, 515), 'numpy.random.choice', 'np.random.choice', (['colors'], {}), '(colors)\n', (507, 515), True, 'import numpy as np\n'), ((524, 548), 'numpy.random.choice', 'np.random.choice', (['colors'], {}), '(colors)\n', (540, 548), True, 'import numpy as np\n'), ((1117, 1161), 'nengo_spa.Transcode', 'spa.Transcode', (['word_func'], {'output_vocab': 'vocab'}), '(word_func, output_vocab=vocab)\n', (1130, 1161), True, 'import nengo_spa as spa\n'), ((1237, 1282), 'nengo_spa.Transcode', 'spa.Transcode', (['color_func'], {'output_vocab': 'vocab'}), '(color_func, output_vocab=vocab)\n', (1250, 1282), True, 'import nengo_spa as spa\n'), ((1300, 1316), 'nengo_spa.State', 'spa.State', (['vocab'], {}), '(vocab)\n', (1309, 1316), True, 'import nengo_spa as spa\n'), ((1404, 1446), 'nengo_spa.State', 'spa.State', (['vocab'], {'neurons_per_dimension': '(10)'}), '(vocab, neurons_per_dimension=10)\n', (1413, 1446), True, 'import nengo_spa as spa\n'), ((1520, 1536), 'nengo_spa.State', 'spa.State', (['vocab'], {}), '(vocab)\n', (1529, 1536), True, 'import nengo_spa as spa\n'), ((1545, 1607), 'nengo.Connection', 'nengo.Connection', (['pre_stim_c.output', 'stim_c.input'], {'synapse': '(0.3)'}), '(pre_stim_c.output, stim_c.input, synapse=0.3)\n', (1561, 1607), False, 'import nengo\n'), ((1744, 1760), 'nengo_spa.State', 'spa.State', (['vocab'], {}), '(vocab)\n', (1753, 1760), True, 'import nengo_spa as spa\n'), ((2081, 2107), 'nengo.Probe', 'nengo.Probe', (['stim_w.output'], {}), '(stim_w.output)\n', (2092, 2107), False, 'import nengo\n'), ((2128, 2158), 'nengo.Probe', 'nengo.Probe', (['pre_stim_c.output'], {}), '(pre_stim_c.output)\n', (2139, 2158), False, 'import nengo\n'), ((2170, 2192), 'nengo.Probe', 'nengo.Probe', (['wm.output'], {}), '(wm.output)\n', (2181, 2192), False, 'import nengo\n'), ((2208, 2234), 'nengo.Probe', 'nengo.Probe', (['finger.output'], {}), '(finger.output)\n', (2219, 2234), False, 'import nengo\n'), ((2253, 2275), 'nengo.Simulator', 'nengo.Simulator', (['model'], {}), '(model)\n', (2268, 2275), False, 'import nengo\n'), ((2402, 2447), 'nengo_spa.similarity', 'spa.similarity', (['sim.data[p_input_word]', 'vocab'], {}), '(sim.data[p_input_word], vocab)\n', (2416, 2447), True, 'import nengo_spa as spa\n'), ((2517, 2563), 'nengo_spa.similarity', 'spa.similarity', (['sim.data[p_input_color]', 'vocab'], {}), '(sim.data[p_input_color], vocab)\n', (2531, 2563), True, 'import nengo_spa as spa\n'), ((2633, 2670), 'nengo_spa.similarity', 'spa.similarity', (['sim.data[p_wm]', 'vocab'], {}), '(sim.data[p_wm], vocab)\n', (2647, 2670), True, 'import nengo_spa as spa\n'), ((2740, 2781), 'nengo_spa.similarity', 'spa.similarity', (['sim.data[p_finger]', 'vocab'], {}), '(sim.data[p_finger], vocab)\n', (2754, 2781), True, 'import nengo_spa as spa\n'), ((3850, 3894), 'nengo_spa.Transcode', 'spa.Transcode', (['word_func'], {'output_vocab': 'vocab'}), '(word_func, output_vocab=vocab)\n', (3863, 3894), True, 'import nengo_spa as spa\n'), ((3908, 3953), 'nengo_spa.Transcode', 'spa.Transcode', (['color_func'], {'output_vocab': 'vocab'}), '(color_func, output_vocab=vocab)\n', (3921, 3953), True, 'import nengo_spa as spa\n'), ((4034, 4080), 'nengo_spa.State', 'spa.State', (['vocab'], {'feedback': 'rec_weight_feedback'}), '(vocab, feedback=rec_weight_feedback)\n', (4043, 4080), True, 'import nengo_spa as spa\n'), ((4092, 4138), 'nengo_spa.State', 'spa.State', (['vocab'], {'feedback': 'rec_weight_feedback'}), '(vocab, feedback=rec_weight_feedback)\n', (4101, 4138), True, 'import nengo_spa as spa\n'), ((4245, 4287), 'nengo_spa.State', 'spa.State', (['vocab'], {'neurons_per_dimension': '(10)'}), '(vocab, neurons_per_dimension=10)\n', (4254, 4287), True, 'import nengo_spa as spa\n'), ((4431, 4477), 'nengo_spa.State', 'spa.State', (['vocab'], {'feedback': 'rec_weight_feedback'}), '(vocab, feedback=rec_weight_feedback)\n', (4440, 4477), True, 'import nengo_spa as spa\n'), ((4591, 4607), 'nengo_spa.State', 'spa.State', (['vocab'], {}), '(vocab)\n', (4600, 4607), True, 'import nengo_spa as spa\n'), ((4932, 4958), 'nengo.Probe', 'nengo.Probe', (['stim_w.output'], {}), '(stim_w.output)\n', (4943, 4958), False, 'import nengo\n'), ((4979, 5005), 'nengo.Probe', 'nengo.Probe', (['stim_c.output'], {}), '(stim_c.output)\n', (4990, 5005), False, 'import nengo\n'), ((5022, 5046), 'nengo.Probe', 'nengo.Probe', (['wm_w.output'], {}), '(wm_w.output)\n', (5033, 5046), False, 'import nengo\n'), ((5064, 5088), 'nengo.Probe', 'nengo.Probe', (['wm_c.output'], {}), '(wm_c.output)\n', (5075, 5088), False, 'import nengo\n'), ((5100, 5122), 'nengo.Probe', 'nengo.Probe', (['wm.output'], {}), '(wm.output)\n', (5111, 5122), False, 'import nengo\n'), ((5138, 5164), 'nengo.Probe', 'nengo.Probe', (['finger.output'], {}), '(finger.output)\n', (5149, 5164), False, 'import nengo\n'), ((5220, 5244), 'numpy.random.choice', 'np.random.choice', (['colors'], {}), '(colors)\n', (5236, 5244), True, 'import numpy as np\n'), ((5253, 5277), 'numpy.random.choice', 'np.random.choice', (['colors'], {}), '(colors)\n', (5269, 5277), True, 'import numpy as np\n'), ((5323, 5349), 'nengo.Simulator', 'nengo.Simulator', (['model_rec'], {}), '(model_rec)\n', (5338, 5349), False, 'import nengo\n'), ((5490, 5539), 'nengo_spa.similarity', 'spa.similarity', (['sim_rec.data[p_input_word]', 'vocab'], {}), '(sim_rec.data[p_input_word], vocab)\n', (5504, 5539), True, 'import nengo_spa as spa\n'), ((5613, 5663), 'nengo_spa.similarity', 'spa.similarity', (['sim_rec.data[p_input_color]', 'vocab'], {}), '(sim_rec.data[p_input_color], vocab)\n', (5627, 5663), True, 'import nengo_spa as spa\n'), ((5737, 5783), 'nengo_spa.similarity', 'spa.similarity', (['sim_rec.data[p_wm_word]', 'vocab'], {}), '(sim_rec.data[p_wm_word], vocab)\n', (5751, 5783), True, 'import nengo_spa as spa\n'), ((5857, 5904), 'nengo_spa.similarity', 'spa.similarity', (['sim_rec.data[p_wm_color]', 'vocab'], {}), '(sim_rec.data[p_wm_color], vocab)\n', (5871, 5904), True, 'import nengo_spa as spa\n'), ((5978, 6019), 'nengo_spa.similarity', 'spa.similarity', (['sim_rec.data[p_wm]', 'vocab'], {}), '(sim_rec.data[p_wm], vocab)\n', (5992, 6019), True, 'import nengo_spa as spa\n'), ((6093, 6138), 'nengo_spa.similarity', 'spa.similarity', (['sim_rec.data[p_finger]', 'vocab'], {}), '(sim_rec.data[p_finger], vocab)\n', (6107, 6138), True, 'import nengo_spa as spa\n'), ((1774, 1795), 'nengo_spa.ActionSelection', 'spa.ActionSelection', ([], {}), '()\n', (1793, 1795), True, 'import nengo_spa as spa\n'), ((4621, 4642), 'nengo_spa.ActionSelection', 'spa.ActionSelection', ([], {}), '()\n', (4640, 4642), True, 'import nengo_spa as spa\n'), ((1816, 1841), 'nengo_spa.dot', 'spa.dot', (['wm', 'spa.sym.BLUE'], {}), '(wm, spa.sym.BLUE)\n', (1823, 1841), True, 'import nengo_spa as spa\n'), ((1898, 1922), 'nengo_spa.dot', 'spa.dot', (['wm', 'spa.sym.RED'], {}), '(wm, spa.sym.RED)\n', (1905, 1922), True, 'import nengo_spa as spa\n'), ((4663, 4688), 'nengo_spa.dot', 'spa.dot', (['wm', 'spa.sym.BLUE'], {}), '(wm, spa.sym.BLUE)\n', (4670, 4688), True, 'import nengo_spa as spa\n'), ((4745, 4769), 'nengo_spa.dot', 'spa.dot', (['wm', 'spa.sym.RED'], {}), '(wm, spa.sym.RED)\n', (4752, 4769), True, 'import nengo_spa as spa\n'), ((1997, 2025), 'nengo_spa.semantic_pointer.Zero', 'spa.semantic_pointer.Zero', (['D'], {}), '(D)\n', (2022, 2025), True, 'import nengo_spa as spa\n'), ((4844, 4872), 'nengo_spa.semantic_pointer.Zero', 'spa.semantic_pointer.Zero', (['D'], {}), '(D)\n', (4869, 4872), True, 'import nengo_spa as spa\n')]
|
import tensorflow as tf
from attention import AttentionLayer
from tensorflow.keras.models import load_model
import numpy as np
from tensorflow.keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from text_cleaner import text_cleaner,rareword_coverage
import pickle
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
def decode_sequence(input_seq,encoder_model,decoder_model,target_word_index,reverse_target_word_index,max_summary_len):
e_out,e_h,e_c=encoder_model.predict(input_seq)
target_seq=np.zeros((1,1))
target_seq[0,0]=target_word_index['sostok']
stop_condition=False
decoded_sentence=''
while not stop_condition:
output_tokens,h,c=decoder_model.predict([target_seq]+[e_out,e_h,e_c])
sampled_token_index=np.argmax(output_tokens[0,-1,:])
sampled_token=reverse_target_word_index[sampled_token_index]
if(sampled_token!='eostok'):
decoded_sentence+=' '+sampled_token
if (sampled_token=='eostok') or len(decoded_sentence.split())>=(max_summary_len-1):
stop_condition=True
target_seq=np.zeros((1,1))
target_seq[0,0]=sampled_token_index
e_h,e_c=h,c
return decoded_sentence
def predict(test_value):
max_text_len=30
max_summary_len=8
#test_value="Gave me such a caffeine overdose I had the shakes, a racing heart and an anxiety attack. Plus it tastes unbelievably bad. I'll stick with coffee, tea and soda, thanks."
cleaned_text=[]
cleaned_text.append(text_cleaner(test_value,0))
cleaned_text=np.array(cleaned_text)
short_text=[]
for i in range(len(cleaned_text)):
if len(cleaned_text[i].split())<=max_text_len:
short_text.append(cleaned_text[i])
x_tr_test=short_text
file=open('X_training_value.pkl','rb')
x_trained_text=pickle.load(file)
file.close()
#x_trained_text=np.append(x_trained_text,x_tr_test)
x_tokenizer=Tokenizer()
x_tokenizer.fit_on_texts(x_trained_text)
cnt,tot_cnt,freq,tot_freq=rareword_coverage(4,x_tokenizer)
x_tokenizer=Tokenizer(num_words=tot_cnt-cnt)
x_tokenizer.fit_on_texts(list(x_trained_text))
x_tr_seq=x_tokenizer.texts_to_sequences(x_tr_test)
x_tr=pad_sequences(x_tr_seq,maxlen=max_text_len,padding='post')
y_tokenizer=Tokenizer()
reverse_target_word_index=dict(map(reversed, y_tokenizer.word_index.items()))
file=open('reverse_target_word_index.pkl','rb')
reverse_target_word_index=pickle.load(file)
file.close()
file=open('reverse_source_word_index.pkl','rb')
reverse_source_word_index=pickle.load(file)
file.close()
file=open('target_word_index.pkl','rb')
target_word_index=pickle.load(file)
file.close()
max_summary_len=8
#target_word_index=y_tokenizer.word_index
encoder_model=load_model('encoder_model.h5',custom_objects={'AttentionLayer' : AttentionLayer})
decoder_model=load_model('decoder_model.h5',custom_objects={'AttentionLayer' : AttentionLayer})
return decode_sequence(x_tr.reshape(1,max_text_len),encoder_model,decoder_model,target_word_index,reverse_target_word_index,max_summary_len)
#print(predict("Gave me such a caffeine overdose I had the shakes, a racing heart and an anxiety attack. Plus it tastes unbelievably bad. I'll stick with coffee, tea and soda, thanks."))
|
[
"text_cleaner.text_cleaner",
"tensorflow.keras.preprocessing.text.Tokenizer",
"tensorflow.keras.models.load_model",
"numpy.argmax",
"keras.preprocessing.sequence.pad_sequences",
"numpy.zeros",
"pickle.load",
"tensorflow.compat.v1.logging.set_verbosity",
"numpy.array",
"text_cleaner.rareword_coverage"
] |
[((311, 373), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (345, 373), True, 'import tensorflow as tf\n'), ((561, 577), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (569, 577), True, 'import numpy as np\n'), ((1610, 1632), 'numpy.array', 'np.array', (['cleaned_text'], {}), '(cleaned_text)\n', (1618, 1632), True, 'import numpy as np\n'), ((1884, 1901), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (1895, 1901), False, 'import pickle\n'), ((1993, 2004), 'tensorflow.keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {}), '()\n', (2002, 2004), False, 'from tensorflow.keras.preprocessing.text import Tokenizer\n'), ((2081, 2114), 'text_cleaner.rareword_coverage', 'rareword_coverage', (['(4)', 'x_tokenizer'], {}), '(4, x_tokenizer)\n', (2098, 2114), False, 'from text_cleaner import text_cleaner, rareword_coverage\n'), ((2132, 2166), 'tensorflow.keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': '(tot_cnt - cnt)'}), '(num_words=tot_cnt - cnt)\n', (2141, 2166), False, 'from tensorflow.keras.preprocessing.text import Tokenizer\n'), ((2281, 2341), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['x_tr_seq'], {'maxlen': 'max_text_len', 'padding': '"""post"""'}), "(x_tr_seq, maxlen=max_text_len, padding='post')\n", (2294, 2341), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((2357, 2368), 'tensorflow.keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {}), '()\n', (2366, 2368), False, 'from tensorflow.keras.preprocessing.text import Tokenizer\n'), ((2533, 2550), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (2544, 2550), False, 'import pickle\n'), ((2650, 2667), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (2661, 2667), False, 'import pickle\n'), ((2751, 2768), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (2762, 2768), False, 'import pickle\n'), ((2873, 2958), 'tensorflow.keras.models.load_model', 'load_model', (['"""encoder_model.h5"""'], {'custom_objects': "{'AttentionLayer': AttentionLayer}"}), "('encoder_model.h5', custom_objects={'AttentionLayer':\n AttentionLayer})\n", (2883, 2958), False, 'from tensorflow.keras.models import load_model\n'), ((2973, 3058), 'tensorflow.keras.models.load_model', 'load_model', (['"""decoder_model.h5"""'], {'custom_objects': "{'AttentionLayer': AttentionLayer}"}), "('decoder_model.h5', custom_objects={'AttentionLayer':\n AttentionLayer})\n", (2983, 3058), False, 'from tensorflow.keras.models import load_model\n'), ((811, 845), 'numpy.argmax', 'np.argmax', (['output_tokens[0, -1, :]'], {}), '(output_tokens[0, -1, :])\n', (820, 845), True, 'import numpy as np\n'), ((1151, 1167), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (1159, 1167), True, 'import numpy as np\n'), ((1565, 1592), 'text_cleaner.text_cleaner', 'text_cleaner', (['test_value', '(0)'], {}), '(test_value, 0)\n', (1577, 1592), False, 'from text_cleaner import text_cleaner, rareword_coverage\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 6 20:17:09 2018
@author: tyler
"""
import numpy as np
import sys
#%%
def karger(G,vertex_label,vertex_degree,size_V):
size_V = len(vertex_label)
#N = int(size_V*(1-1/np.sqrt(2)))
iteration_schedule = [size_V-2]
for N in iteration_schedule:
for n in range(N):
# if n%1000==0: print('iteration:',n)
# uniformly at random pick e = (v0,v1)
cs0 = np.cumsum(vertex_degree)
rand_idx0 = np.random.randint(cs0[-1])
e0 = np.searchsorted(cs0,rand_idx0,side='right')
#cs1 = np.cumsum(np.append(G[e0,e0:],G[:e0,e0]))
cs1 = np.cumsum(G[e0])
rand_idx1 = np.random.randint(vertex_degree[e0])
e1 = np.searchsorted(cs1,rand_idx1,side='right')
if(G[e0,e1] == 0):
print('picked empty edge')
v0 = e0
v1 = e1
# bring edges from v1 into v0
# add new edges to v0
G[v0] += G[v1]
G[:,v0] += G[v1]
new_edge_count = vertex_degree[v1] - G[v0,v0] #- G[v1,v1]
# delete old edges from v1
G[v1] = 0
G[:,v1] = 0
# delete any created loops
G[v0,v0] = 0
np.putmask(vertex_label,vertex_label==v1,v0)
vertex_degree[v0] += new_edge_count
vertex_degree[v1] = 0
nz = np.nonzero(vertex_degree)[0]
if(len(nz) != 2):
print('did not find well defined cut')
SN0 = np.where(vertex_label == nz[0])[0]
SN1 = np.where(vertex_label == nz[1])[0]
if len(SN0) + len(SN1) != size_V:
print('lost nodes')
if len(SN0) < len(SN1):
cut = SN0
else:
cut = SN1
return cut,vertex_degree[nz[0]]
#%%
#python p1.py z N ID
z = sys.argv[1] # 0,1,2,3
N = int(sys.argv[2]) # integer number of runs
ID = sys.argv[3] # output file id
#%%
E_raw = np.loadtxt('b'+str(z)+'.in',dtype='int')
min_E = np.min(E_raw)
E = E_raw - min_E
size_V = np.max(E)+1
G = np.zeros((size_V,size_V),dtype='int64')
vertex_degree = np.zeros(size_V,dtype='int64')
for e0,e1 in E:
vertex_degree[e0] += 1;
vertex_degree[e1] += 1;
G[min(e0,e1),max(e0,e1)] += 1;
G[max(e0,e1),min(e0,e1)] += 1;
vertex_label = np.arange(size_V,dtype='int64') # gives index of supervertex containg vertex
#%%
f=open('b'+z+'/cuts_'+ID+'.dat','ab')
g=open('b'+z+'/cut_sizes_'+ID+'.dat','ab')
#
for n in range(N):
if n%500 == 0:
print(ID+'_trial :', n,' of ',N)
vl,cut_size = karger(np.copy(G),np.copy(vertex_label),np.copy(vertex_degree),size_V)
np.savetxt(f,[vl],fmt='%d',delimiter=',')
np.savetxt(g,[cut_size],fmt='%d',delimiter=',')
f.close()
g.close()
|
[
"numpy.copy",
"numpy.savetxt",
"numpy.zeros",
"numpy.searchsorted",
"numpy.nonzero",
"numpy.cumsum",
"numpy.min",
"numpy.max",
"numpy.arange",
"numpy.where",
"numpy.random.randint",
"numpy.putmask"
] |
[((2209, 2222), 'numpy.min', 'np.min', (['E_raw'], {}), '(E_raw)\n', (2215, 2222), True, 'import numpy as np\n'), ((2267, 2308), 'numpy.zeros', 'np.zeros', (['(size_V, size_V)'], {'dtype': '"""int64"""'}), "((size_V, size_V), dtype='int64')\n", (2275, 2308), True, 'import numpy as np\n'), ((2323, 2354), 'numpy.zeros', 'np.zeros', (['size_V'], {'dtype': '"""int64"""'}), "(size_V, dtype='int64')\n", (2331, 2354), True, 'import numpy as np\n'), ((2513, 2545), 'numpy.arange', 'np.arange', (['size_V'], {'dtype': '"""int64"""'}), "(size_V, dtype='int64')\n", (2522, 2545), True, 'import numpy as np\n'), ((2250, 2259), 'numpy.max', 'np.max', (['E'], {}), '(E)\n', (2256, 2259), True, 'import numpy as np\n'), ((2855, 2899), 'numpy.savetxt', 'np.savetxt', (['f', '[vl]'], {'fmt': '"""%d"""', 'delimiter': '""","""'}), "(f, [vl], fmt='%d', delimiter=',')\n", (2865, 2899), True, 'import numpy as np\n'), ((2901, 2951), 'numpy.savetxt', 'np.savetxt', (['g', '[cut_size]'], {'fmt': '"""%d"""', 'delimiter': '""","""'}), "(g, [cut_size], fmt='%d', delimiter=',')\n", (2911, 2951), True, 'import numpy as np\n'), ((1615, 1640), 'numpy.nonzero', 'np.nonzero', (['vertex_degree'], {}), '(vertex_degree)\n', (1625, 1640), True, 'import numpy as np\n'), ((1733, 1764), 'numpy.where', 'np.where', (['(vertex_label == nz[0])'], {}), '(vertex_label == nz[0])\n', (1741, 1764), True, 'import numpy as np\n'), ((1778, 1809), 'numpy.where', 'np.where', (['(vertex_label == nz[1])'], {}), '(vertex_label == nz[1])\n', (1786, 1809), True, 'import numpy as np\n'), ((2783, 2793), 'numpy.copy', 'np.copy', (['G'], {}), '(G)\n', (2790, 2793), True, 'import numpy as np\n'), ((2794, 2815), 'numpy.copy', 'np.copy', (['vertex_label'], {}), '(vertex_label)\n', (2801, 2815), True, 'import numpy as np\n'), ((2816, 2838), 'numpy.copy', 'np.copy', (['vertex_degree'], {}), '(vertex_degree)\n', (2823, 2838), True, 'import numpy as np\n'), ((498, 522), 'numpy.cumsum', 'np.cumsum', (['vertex_degree'], {}), '(vertex_degree)\n', (507, 522), True, 'import numpy as np\n'), ((547, 573), 'numpy.random.randint', 'np.random.randint', (['cs0[-1]'], {}), '(cs0[-1])\n', (564, 573), True, 'import numpy as np\n'), ((591, 636), 'numpy.searchsorted', 'np.searchsorted', (['cs0', 'rand_idx0'], {'side': '"""right"""'}), "(cs0, rand_idx0, side='right')\n", (606, 636), True, 'import numpy as np\n'), ((727, 743), 'numpy.cumsum', 'np.cumsum', (['G[e0]'], {}), '(G[e0])\n', (736, 743), True, 'import numpy as np\n'), ((768, 804), 'numpy.random.randint', 'np.random.randint', (['vertex_degree[e0]'], {}), '(vertex_degree[e0])\n', (785, 804), True, 'import numpy as np\n'), ((822, 867), 'numpy.searchsorted', 'np.searchsorted', (['cs1', 'rand_idx1'], {'side': '"""right"""'}), "(cs1, rand_idx1, side='right')\n", (837, 867), True, 'import numpy as np\n'), ((1435, 1483), 'numpy.putmask', 'np.putmask', (['vertex_label', '(vertex_label == v1)', 'v0'], {}), '(vertex_label, vertex_label == v1, v0)\n', (1445, 1483), True, 'import numpy as np\n')]
|
#! /usr/bin/env python
import rospy
import actionlib
import dynamic_reconfigure.client
from riptide_msgs.msg import AttitudeCommand, LinearCommand, Imu
from std_msgs.msg import Float32, Float64, Int32
import riptide_controllers.msg
import time
import math
import numpy as np
def angleDiff(a, b):
return ((a-b+180) % 360)-180
class GateManeuver(object):
ROLL_P = 2
CRUISE_VELOCITY = 45
DRIVE_FORCE = 30
def __init__(self):
self.rollPub = rospy.Publisher(
"/command/roll", AttitudeCommand, queue_size=5)
self.yawPub = rospy.Publisher(
"/command/yaw", AttitudeCommand, queue_size=5)
self.XPub = rospy.Publisher(
"/command/x", LinearCommand, queue_size=5)
self.YPub = rospy.Publisher(
"/command/y", LinearCommand, queue_size=5)
self.ZPub = rospy.Publisher(
"/command/force_z", Float64, queue_size=5)
self._as = actionlib.SimpleActionServer(
"gate_maneuver", riptide_controllers.msg.GateManeuverAction, execute_cb=self.execute_cb, auto_start=False)
self._as.start()
def execute_cb(self, goal):
rospy.loginfo("Starting gate maneuver")
self.startAngle = rospy.wait_for_message("/state/imu", Imu).rpy_deg.z
self.angleTraveled = 0
self.pastHalf = False
self.yawPub.publish(self.CRUISE_VELOCITY, AttitudeCommand.VELOCITY)
self.rollPub.publish(self.CRUISE_VELOCITY, AttitudeCommand.VELOCITY)
self.imuSub = rospy.Subscriber("/state/imu", Imu, self.imuCb)
while self.angleTraveled < 330 and not rospy.is_shutdown():
rospy.sleep(0.05)
if self._as.is_preempt_requested():
rospy.loginfo('Preempted Gate Maneuver')
self.cleanup()
self._as.set_preempted()
return
rospy.loginfo("Leveling")
self.cleanup()
while abs(rospy.wait_for_message("/state/imu", Imu).rpy_deg.x) > 5 and not rospy.is_shutdown():
rospy.sleep(0.05)
rospy.loginfo("Done")
self._as.set_succeeded()
def cleanup(self):
self.yawPub.publish(0, AttitudeCommand.POSITION)
self.rollPub.publish(0, AttitudeCommand.POSITION)
self.imuSub.unregister()
self.XPub.publish(0, LinearCommand.FORCE)
self.YPub.publish(0, LinearCommand.FORCE)
self.ZPub.publish(0)
def imuCb(self, msg):
self.angleTraveled = angleDiff(msg.rpy_deg.z, self.startAngle)
roll = msg.rpy_deg.x
if self.angleTraveled < -90:
self.pastHalf = True
if self.pastHalf and self.angleTraveled < 0:
self.angleTraveled += 360
if roll < 0:
roll += 360
self.rollPub.publish(self.CRUISE_VELOCITY + self.ROLL_P * (self.angleTraveled - roll), AttitudeCommand.VELOCITY)
sr = math.sin(roll * math.pi / 180)
cr = math.cos(roll * math.pi / 180)
sy = math.sin(self.angleTraveled * math.pi / 180)
cy = math.cos(self.angleTraveled * math.pi / 180)
rRotMat = np.matrix([[1,0,0],[0,cr,-sr],[0,sr,cr]])
yRotMat = np.matrix([[cy,-sy,0],[sy,cy,0],[0,0,1]])
outVector = np.dot(np.linalg.inv(np.dot(yRotMat, rRotMat)), np.matrix([[self.DRIVE_FORCE],[0],[0]]))
self.XPub.publish(outVector.item(0), LinearCommand.FORCE)
self.YPub.publish(outVector.item(1), LinearCommand.FORCE)
self.ZPub.publish(outVector.item(2))
if __name__ == '__main__':
rospy.init_node('gate_maneuver')
server = GateManeuver()
rospy.spin()
|
[
"numpy.matrix",
"rospy.Subscriber",
"rospy.wait_for_message",
"rospy.Publisher",
"math.sin",
"rospy.sleep",
"rospy.loginfo",
"rospy.is_shutdown",
"actionlib.SimpleActionServer",
"rospy.init_node",
"math.cos",
"numpy.dot",
"rospy.spin"
] |
[((3524, 3556), 'rospy.init_node', 'rospy.init_node', (['"""gate_maneuver"""'], {}), "('gate_maneuver')\n", (3539, 3556), False, 'import rospy\n'), ((3589, 3601), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (3599, 3601), False, 'import rospy\n'), ((473, 536), 'rospy.Publisher', 'rospy.Publisher', (['"""/command/roll"""', 'AttitudeCommand'], {'queue_size': '(5)'}), "('/command/roll', AttitudeCommand, queue_size=5)\n", (488, 536), False, 'import rospy\n'), ((572, 634), 'rospy.Publisher', 'rospy.Publisher', (['"""/command/yaw"""', 'AttitudeCommand'], {'queue_size': '(5)'}), "('/command/yaw', AttitudeCommand, queue_size=5)\n", (587, 634), False, 'import rospy\n'), ((668, 726), 'rospy.Publisher', 'rospy.Publisher', (['"""/command/x"""', 'LinearCommand'], {'queue_size': '(5)'}), "('/command/x', LinearCommand, queue_size=5)\n", (683, 726), False, 'import rospy\n'), ((760, 818), 'rospy.Publisher', 'rospy.Publisher', (['"""/command/y"""', 'LinearCommand'], {'queue_size': '(5)'}), "('/command/y', LinearCommand, queue_size=5)\n", (775, 818), False, 'import rospy\n'), ((852, 910), 'rospy.Publisher', 'rospy.Publisher', (['"""/command/force_z"""', 'Float64'], {'queue_size': '(5)'}), "('/command/force_z', Float64, queue_size=5)\n", (867, 910), False, 'import rospy\n'), ((944, 1084), 'actionlib.SimpleActionServer', 'actionlib.SimpleActionServer', (['"""gate_maneuver"""', 'riptide_controllers.msg.GateManeuverAction'], {'execute_cb': 'self.execute_cb', 'auto_start': '(False)'}), "('gate_maneuver', riptide_controllers.msg.\n GateManeuverAction, execute_cb=self.execute_cb, auto_start=False)\n", (972, 1084), False, 'import actionlib\n'), ((1159, 1198), 'rospy.loginfo', 'rospy.loginfo', (['"""Starting gate maneuver"""'], {}), "('Starting gate maneuver')\n", (1172, 1198), False, 'import rospy\n'), ((1515, 1562), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/state/imu"""', 'Imu', 'self.imuCb'], {}), "('/state/imu', Imu, self.imuCb)\n", (1531, 1562), False, 'import rospy\n'), ((1872, 1897), 'rospy.loginfo', 'rospy.loginfo', (['"""Leveling"""'], {}), "('Leveling')\n", (1885, 1897), False, 'import rospy\n'), ((2066, 2087), 'rospy.loginfo', 'rospy.loginfo', (['"""Done"""'], {}), "('Done')\n", (2079, 2087), False, 'import rospy\n'), ((2892, 2922), 'math.sin', 'math.sin', (['(roll * math.pi / 180)'], {}), '(roll * math.pi / 180)\n', (2900, 2922), False, 'import math\n'), ((2936, 2966), 'math.cos', 'math.cos', (['(roll * math.pi / 180)'], {}), '(roll * math.pi / 180)\n', (2944, 2966), False, 'import math\n'), ((2980, 3024), 'math.sin', 'math.sin', (['(self.angleTraveled * math.pi / 180)'], {}), '(self.angleTraveled * math.pi / 180)\n', (2988, 3024), False, 'import math\n'), ((3038, 3082), 'math.cos', 'math.cos', (['(self.angleTraveled * math.pi / 180)'], {}), '(self.angleTraveled * math.pi / 180)\n', (3046, 3082), False, 'import math\n'), ((3102, 3151), 'numpy.matrix', 'np.matrix', (['[[1, 0, 0], [0, cr, -sr], [0, sr, cr]]'], {}), '([[1, 0, 0], [0, cr, -sr], [0, sr, cr]])\n', (3111, 3151), True, 'import numpy as np\n'), ((3162, 3211), 'numpy.matrix', 'np.matrix', (['[[cy, -sy, 0], [sy, cy, 0], [0, 0, 1]]'], {}), '([[cy, -sy, 0], [sy, cy, 0], [0, 0, 1]])\n', (3171, 3211), True, 'import numpy as np\n'), ((1644, 1661), 'rospy.sleep', 'rospy.sleep', (['(0.05)'], {}), '(0.05)\n', (1655, 1661), False, 'import rospy\n'), ((2039, 2056), 'rospy.sleep', 'rospy.sleep', (['(0.05)'], {}), '(0.05)\n', (2050, 2056), False, 'import rospy\n'), ((3272, 3313), 'numpy.matrix', 'np.matrix', (['[[self.DRIVE_FORCE], [0], [0]]'], {}), '([[self.DRIVE_FORCE], [0], [0]])\n', (3281, 3313), True, 'import numpy as np\n'), ((1225, 1266), 'rospy.wait_for_message', 'rospy.wait_for_message', (['"""/state/imu"""', 'Imu'], {}), "('/state/imu', Imu)\n", (1247, 1266), False, 'import rospy\n'), ((1611, 1630), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (1628, 1630), False, 'import rospy\n'), ((1727, 1767), 'rospy.loginfo', 'rospy.loginfo', (['"""Preempted Gate Maneuver"""'], {}), "('Preempted Gate Maneuver')\n", (1740, 1767), False, 'import rospy\n'), ((2006, 2025), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (2023, 2025), False, 'import rospy\n'), ((3245, 3269), 'numpy.dot', 'np.dot', (['yRotMat', 'rRotMat'], {}), '(yRotMat, rRotMat)\n', (3251, 3269), True, 'import numpy as np\n'), ((1941, 1982), 'rospy.wait_for_message', 'rospy.wait_for_message', (['"""/state/imu"""', 'Imu'], {}), "('/state/imu', Imu)\n", (1963, 1982), False, 'import rospy\n')]
|
import sys, os
sys.path.append(os.pardir)
import numpy as np
from dataset.mnist import load_mnist
from PIL import Image
def img_show(img):
pil_img = Image.fromarray(np.uint8(img))
pil_img.show()
(x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize = False)
img = x_train[0]
label = t_train[0]
print(label)
print(img.shape)
img = img.reshape(28, 28)
print(img.shape)
img_show(img)
|
[
"sys.path.append",
"dataset.mnist.load_mnist",
"numpy.uint8"
] |
[((16, 42), 'sys.path.append', 'sys.path.append', (['os.pardir'], {}), '(os.pardir)\n', (31, 42), False, 'import sys, os\n'), ((254, 295), 'dataset.mnist.load_mnist', 'load_mnist', ([], {'flatten': '(True)', 'normalize': '(False)'}), '(flatten=True, normalize=False)\n', (264, 295), False, 'from dataset.mnist import load_mnist\n'), ((177, 190), 'numpy.uint8', 'np.uint8', (['img'], {}), '(img)\n', (185, 190), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
import math
# Func to cal eucledian dist b/w 2 pts:
def euc_dst(x1, y1, x2, y2):
pt_a = (x1 - x2)**2
pt_b = (y1 - y2)**2
return math.sqrt(pt_a + pt_b)
cap = cv2.VideoCapture(0)
while(True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.medianBlur(gray, 5)
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, dp=1,
minDist=10, param1=100, param2=50, minRadius=0, maxRadius=500)
if circles is not None:
circles = np.uint16(np.around(circles))
x_cord = []
y_cord = []
rad = []
# Converting parameters of circle (center coordinates:x,y & radius)
for pt in circles[0, :]:
x, y, r = pt[0], pt[1], pt[2]
# Storing centers & radius of all circles
x_cord.append(x)
y_cord.append(y)
rad.append(r)
# Drawing outer circle
cv2.circle(frame, (x, y), r, (0, 255, 0), 2)
# Drawing circle center
cv2.circle(frame, (x, y), 1, (0, 0, 255), 3)
if len(rad) > 1:
for i in range(0, len(rad)):
x1 = x_cord[i]
y1 = y_cord[i]
for j in range(i+1, len(rad)):
x2 = x_cord[j]
y2 = y_cord[j]
cv2.line(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
mid_x = (x1+x2)/2
mid_y = (y1+y2)/2
dist = euc_dst(x1/25, y1/25, x2/25, y2/25)
cv2.putText(frame, "{:.1f}cm".format(dist), (int(mid_x), int(
mid_y - 10)), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), 2)
cv2.imshow('video', frame)
if cv2.waitKey(1) == 27: # esc Key
break
cap.release()
cv2.destroyAllWindows()
|
[
"cv2.line",
"cv2.HoughCircles",
"cv2.circle",
"math.sqrt",
"cv2.medianBlur",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imshow",
"cv2.VideoCapture",
"numpy.around",
"cv2.destroyAllWindows"
] |
[((217, 236), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (233, 236), False, 'import cv2\n'), ((1899, 1922), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1920, 1922), False, 'import cv2\n'), ((183, 205), 'math.sqrt', 'math.sqrt', (['(pt_a + pt_b)'], {}), '(pt_a + pt_b)\n', (192, 205), False, 'import math\n'), ((294, 333), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (306, 333), False, 'import cv2\n'), ((346, 369), 'cv2.medianBlur', 'cv2.medianBlur', (['gray', '(5)'], {}), '(gray, 5)\n', (360, 369), False, 'import cv2\n'), ((385, 500), 'cv2.HoughCircles', 'cv2.HoughCircles', (['gray', 'cv2.HOUGH_GRADIENT'], {'dp': '(1)', 'minDist': '(10)', 'param1': '(100)', 'param2': '(50)', 'minRadius': '(0)', 'maxRadius': '(500)'}), '(gray, cv2.HOUGH_GRADIENT, dp=1, minDist=10, param1=100,\n param2=50, minRadius=0, maxRadius=500)\n', (401, 500), False, 'import cv2\n'), ((1798, 1824), 'cv2.imshow', 'cv2.imshow', (['"""video"""', 'frame'], {}), "('video', frame)\n", (1808, 1824), False, 'import cv2\n'), ((1833, 1847), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1844, 1847), False, 'import cv2\n'), ((589, 607), 'numpy.around', 'np.around', (['circles'], {}), '(circles)\n', (598, 607), True, 'import numpy as np\n'), ((1014, 1058), 'cv2.circle', 'cv2.circle', (['frame', '(x, y)', 'r', '(0, 255, 0)', '(2)'], {}), '(frame, (x, y), r, (0, 255, 0), 2)\n', (1024, 1058), False, 'import cv2\n'), ((1111, 1155), 'cv2.circle', 'cv2.circle', (['frame', '(x, y)', '(1)', '(0, 0, 255)', '(3)'], {}), '(frame, (x, y), 1, (0, 0, 255), 3)\n', (1121, 1155), False, 'import cv2\n'), ((1429, 1480), 'cv2.line', 'cv2.line', (['frame', '(x1, y1)', '(x2, y2)', '(0, 255, 0)', '(2)'], {}), '(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)\n', (1437, 1480), False, 'import cv2\n')]
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
"""
Created on : Mon Jun 4 23:17:56 2018
@author : Sourabh
"""
# %%
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
import matplotlib.pyplot as plt
# ============================================================================ #
np.set_printoptions(threshold=np.nan)
# constant properties that need changes according to the actual problem
Data_File = 'Position_Salaries.csv'
Dependent_Variable_Column = 2
Test_Data_Size = 0.2
# import the dataset & extract the feature and the dependent variable vectors
dataset = pd.read_csv(Data_File)
X = dataset.iloc[:, 1:Dependent_Variable_Column].values
y = dataset.iloc[:, Dependent_Variable_Column].values
# feature scaling: SVR does not support it automatically, we need to do it here
sc_X = StandardScaler()
sc_y = StandardScaler()
X_scaled = sc_X.fit_transform(X.reshape(-1, 1))
y_scaled = sc_y.fit_transform(y.reshape(-1, 1))
# ============================================================================ #
# creating and fitting the SVR model to the dataset
# as we know that our training data set is not linear, we should not use linear
# kernel here, it's better we use any of Polynomial or Gaussian kernel.
regressor = SVR(kernel='rbf')
regressor.fit(X_scaled, y_scaled)
# predicting a new result with SVR model
# the sample should also be a 1 x m matrix with m feature values
sampleValue = np.array([[6.5]])
y_pred = sc_y.inverse_transform(
regressor.predict(
sc_X.transform(sampleValue)
)
)
# ============================================================================ #
# visualising the SVR results
stepSize = 0.1
X_grid = np.arange(start=min(X), stop=max(X)+stepSize, step=stepSize)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color='red', marker='o', label='Samples')
plt.plot(X_grid,
sc_y.inverse_transform(regressor.predict(sc_X.transform(X_grid))),
color='blue',
label='SVR Model')
plt.title('Truth or Bluff (SVR)')
plt.xlabel('Position Level')
plt.ylabel('Salary')
plt.legend(loc='best')
plt.show()
|
[
"matplotlib.pyplot.title",
"sklearn.svm.SVR",
"numpy.set_printoptions",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((356, 393), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.nan'}), '(threshold=np.nan)\n', (375, 393), True, 'import numpy as np\n'), ((643, 665), 'pandas.read_csv', 'pd.read_csv', (['Data_File'], {}), '(Data_File)\n', (654, 665), True, 'import pandas as pd\n'), ((864, 880), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (878, 880), False, 'from sklearn.preprocessing import StandardScaler\n'), ((888, 904), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (902, 904), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1300, 1317), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""'}), "(kernel='rbf')\n", (1303, 1317), False, 'from sklearn.svm import SVR\n'), ((1473, 1490), 'numpy.array', 'np.array', (['[[6.5]]'], {}), '([[6.5]])\n', (1481, 1490), True, 'import numpy as np\n'), ((1871, 1930), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X', 'y'], {'color': '"""red"""', 'marker': '"""o"""', 'label': '"""Samples"""'}), "(X, y, color='red', marker='o', label='Samples')\n", (1882, 1930), True, 'import matplotlib.pyplot as plt\n'), ((2075, 2108), 'matplotlib.pyplot.title', 'plt.title', (['"""Truth or Bluff (SVR)"""'], {}), "('Truth or Bluff (SVR)')\n", (2084, 2108), True, 'import matplotlib.pyplot as plt\n'), ((2109, 2137), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position Level"""'], {}), "('Position Level')\n", (2119, 2137), True, 'import matplotlib.pyplot as plt\n'), ((2138, 2158), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Salary"""'], {}), "('Salary')\n", (2148, 2158), True, 'import matplotlib.pyplot as plt\n'), ((2159, 2181), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (2169, 2181), True, 'import matplotlib.pyplot as plt\n'), ((2182, 2192), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2190, 2192), True, 'import matplotlib.pyplot as plt\n')]
|
# Imports modules
import argparse
import torch
from torchvision import transforms,datasets,models
from PIL import Image
import numpy as np
def get_input_args_train():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type = str, default = 'flowers',
help='dataset directory')
parser.add_argument('--save_dir', type = str, default = '/home/workspace/ImageClassifier/',
help = 'path to the folder for saving checkpoints')
parser.add_argument('--arch',type = str, default = 'densenet',
help = 'NN Model Architecture vgg or densenet. default = densenet')
parser.add_argument('--learning_rate',type = float, default = 0.001,
help = 'value of learning rate')
parser.add_argument('--hidden_units',type = int, default = 512,
help = 'number of hidden units')
parser.add_argument('--epochs',type = int, default = 10,
help = 'number of iterations for training network')
parser.add_argument('--gpu', type = bool, default = 'False',
help='device to run your model : gpu or cpu. Default = False i.e cpu')
return parser.parse_args()
def get_input_args_predict():
parser = argparse.ArgumentParser()
parser.add_argument('--image_path', type = str, default = '/home/workspace/ImageClassifier/flowers/test/1/image_06743.jpg',
help = 'path to image')
parser.add_argument('--checkpoint',type = str, default = 'checkpoint.pth',
help = 'trained model checkpoint')
parser.add_argument('--top_k',type = int, default = 3,
help = 'number of classes with highest prob.')
parser.add_argument('--category_names', default = 'cat_to_name.json',
help = 'mapping of categories to real names file')
parser.add_argument('--gpu', type = bool, default = 'False',
help='device to run your model : gpu or cpu.Default = False i.e cpu')
return parser.parse_args()
def process_data(train_dir, test_dir, valid_dir):
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
trainsets = datasets.ImageFolder(train_dir, transform = train_transforms)
testsets = datasets.ImageFolder(test_dir, transform = test_transforms)
validsets = datasets.ImageFolder(valid_dir, transform = test_transforms)
trainloader = torch.utils.data.DataLoader(trainsets, batch_size=64, shuffle=True)
testloader = torch.utils.data.DataLoader(testsets, batch_size=64)
validloader = torch.utils.data.DataLoader(validsets, batch_size=64)
return trainloader, testloader, validloader, trainsets
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
image = Image.open(image)
if image.size[0] > image.size[1]:
aspect = image.size[1] / 256
new_size = (image.size[0] / aspect, 256)
else:
aspect = image.size[0] / 256
new_size = (256, image.size[1] / aspect)
image.thumbnail(new_size, Image.ANTIALIAS)
# crop out center of image
width, height = image.size # Get dimensions
left = (width - 224) / 2
top = (height - 224) / 2
right = (width + 224) / 2
bottom = (height + 224) / 2
image = image.crop((left, top, right, bottom))
np_image = np.array(image)
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_image = np_image / 255.0
np_image = (np_image - mean)/std
np_image = np.transpose(np_image, (2, 0, 1))
return np_image
|
[
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"torchvision.transforms.RandomHorizontalFlip",
"torchvision.transforms.RandomRotation",
"torchvision.transforms.Normalize",
"numpy.transpose",
"PIL.Image.open",
"torchvision.datasets.ImageFolder",
"numpy.array",
"torchvision.transforms.CenterCrop",
"torchvision.transforms.RandomResizedCrop",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor"
] |
[((188, 213), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (211, 213), False, 'import argparse\n'), ((1301, 1326), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1324, 1326), False, 'import argparse\n'), ((3097, 3156), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['train_dir'], {'transform': 'train_transforms'}), '(train_dir, transform=train_transforms)\n', (3117, 3156), False, 'from torchvision import transforms, datasets, models\n'), ((3174, 3231), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['test_dir'], {'transform': 'test_transforms'}), '(test_dir, transform=test_transforms)\n', (3194, 3231), False, 'from torchvision import transforms, datasets, models\n'), ((3250, 3308), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['valid_dir'], {'transform': 'test_transforms'}), '(valid_dir, transform=test_transforms)\n', (3270, 3308), False, 'from torchvision import transforms, datasets, models\n'), ((3354, 3421), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainsets'], {'batch_size': '(64)', 'shuffle': '(True)'}), '(trainsets, batch_size=64, shuffle=True)\n', (3381, 3421), False, 'import torch\n'), ((3439, 3491), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testsets'], {'batch_size': '(64)'}), '(testsets, batch_size=64)\n', (3466, 3491), False, 'import torch\n'), ((3510, 3563), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['validsets'], {'batch_size': '(64)'}), '(validsets, batch_size=64)\n', (3537, 3563), False, 'import torch\n'), ((3798, 3815), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (3808, 3815), False, 'from PIL import Image\n'), ((4374, 4389), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (4382, 4389), True, 'import numpy as np\n'), ((4401, 4432), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (4409, 4432), True, 'import numpy as np\n'), ((4443, 4474), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (4451, 4474), True, 'import numpy as np\n'), ((4568, 4601), 'numpy.transpose', 'np.transpose', (['np_image', '(2, 0, 1)'], {}), '(np_image, (2, 0, 1))\n', (4580, 4601), True, 'import numpy as np\n'), ((2276, 2305), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(30)'], {}), '(30)\n', (2301, 2305), False, 'from torchvision import transforms, datasets, models\n'), ((2350, 2383), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(224)'], {}), '(224)\n', (2378, 2383), False, 'from torchvision import transforms, datasets, models\n'), ((2428, 2461), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (2459, 2461), False, 'from torchvision import transforms, datasets, models\n'), ((2506, 2527), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2525, 2527), False, 'from torchvision import transforms, datasets, models\n'), ((2572, 2638), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (2592, 2638), False, 'from torchvision import transforms, datasets, models\n'), ((2747, 2769), 'torchvision.transforms.Resize', 'transforms.Resize', (['(255)'], {}), '(255)\n', (2764, 2769), False, 'from torchvision import transforms, datasets, models\n'), ((2813, 2839), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (2834, 2839), False, 'from torchvision import transforms, datasets, models\n'), ((2883, 2904), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2902, 2904), False, 'from torchvision import transforms, datasets, models\n'), ((2948, 3014), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (2968, 3014), False, 'from torchvision import transforms, datasets, models\n')]
|
"""
Fits PSPL model with parallax using EMCEE sampler.
"""
import os
import sys
import numpy as np
try:
import emcee
except ImportError as err:
print(err)
print("\nEMCEE could not be imported.")
print("Get it from: http://dfm.io/emcee/current/user/install/")
print("and re-run the script")
sys.exit(1)
import matplotlib.pyplot as plt
import MulensModel as mm
# Define likelihood functions
def ln_like(theta, event, parameters_to_fit):
""" likelihood function """
for key, val in enumerate(parameters_to_fit):
setattr(event.model.parameters, val, theta[key])
return -0.5 * event.get_chi2()
def ln_prior(theta, parameters_to_fit):
"""priors - we only reject obviously wrong models"""
if theta[parameters_to_fit.index("t_E")] < 0.:
return -np.inf
return 0.0
def ln_prob(theta, event, parameters_to_fit):
""" combines likelihood and priors"""
ln_prior_ = ln_prior(theta, parameters_to_fit)
if not np.isfinite(ln_prior_):
return -np.inf
ln_like_ = ln_like(theta, event, parameters_to_fit)
# In the cases that source fluxes are negative we want to return
# these as if they were not in priors.
if np.isnan(ln_like_):
return -np.inf
return ln_prior_ + ln_like_
# Read the data
file_name = os.path.join(
mm.DATA_PATH, "photometry_files", "OB05086",
"starBLG234.6.I.218982.dat")
my_data = mm.MulensData(file_name=file_name, add_2450000=True)
coords = "18:04:45.71 -26:59:15.2"
# Starting parameters:
params = dict()
params['t_0'] = 2453628.3
params['t_0_par'] = 2453628.
params['u_0'] = 0.37 # Change sign of u_0 to find the other solution.
params['t_E'] = 100.
params['pi_E_N'] = 0.
params['pi_E_E'] = 0.
my_model = mm.Model(params, coords=coords)
my_event = mm.Event(datasets=my_data, model=my_model)
# Which parameters we want to fit?
parameters_to_fit = ["t_0", "u_0", "t_E", "pi_E_N", "pi_E_E"]
# And remember to provide dispersions to draw starting set of points
sigmas = [0.01, 0.001, 0.1, 0.01, 0.01]
# Initializations for EMCEE
n_dim = len(parameters_to_fit)
n_walkers = 40
n_steps = 500
n_burn = 150
# Including the set of n_walkers starting points:
start_1 = [params[p] for p in parameters_to_fit]
start = [start_1 + np.random.randn(n_dim) * sigmas
for i in range(n_walkers)]
# Run emcee (this can take some time):
sampler = emcee.EnsembleSampler(
n_walkers, n_dim, ln_prob, args=(my_event, parameters_to_fit))
sampler.run_mcmc(start, n_steps)
# Remove burn-in samples and reshape:
samples = sampler.chain[:, n_burn:, :].reshape((-1, n_dim))
# Results:
results = np.percentile(samples, [16, 50, 84], axis=0)
print("Fitted parameters:")
for i in range(n_dim):
r = results[1, i]
print("{:.5f} {:.5f} {:.5f}".format(r, results[2, i]-r, r-results[0, i]))
# We extract best model parameters and chi2 from my_event:
print("\nSmallest chi2 model:")
best = [my_event.best_chi2_parameters[p] for p in parameters_to_fit]
print(*[repr(b) if isinstance(b, float) else b.value for b in best])
print(my_event.best_chi2)
# Now let's plot 3 models
plt.figure()
model_0 = mm.Model({'t_0': 2453628.29062, 'u_0': 0.37263, 't_E': 102.387105})
model_1 = mm.Model(
{'t_0': 2453630.35507, 'u_0': 0.488817, 't_E': 93.611301,
'pi_E_N': 0.2719, 'pi_E_E': 0.1025, 't_0_par': params['t_0_par']},
coords=coords)
model_2 = mm.Model(
{'t_0': 2453630.67778, 'u_0': -0.415677, 't_E': 110.120755,
'pi_E_N': -0.2972, 'pi_E_E': 0.1103, 't_0_par': params['t_0_par']},
coords=coords)
model_0.set_datasets([my_data])
model_1.set_datasets([my_data])
model_2.set_datasets([my_data])
t_1 = 2453200.
t_2 = 2453950.
plot_params = {'lw': 2.5, 'alpha': 0.3, 'subtract_2450000': True,
't_start': t_1, 't_stop': t_2}
my_event.plot_data(subtract_2450000=True)
model_0.plot_lc(label='no pi_E', **plot_params)
model_1.plot_lc(label='pi_E, u_0>0', **plot_params)
model_2.plot_lc(label='pi_E, u_0<0', color='black', ls='dashed', **plot_params)
plt.xlim(t_1-2450000., t_2-2450000.)
plt.legend(loc='best')
plt.title('Data and 3 fitted models')
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"MulensModel.MulensData",
"MulensModel.Model",
"matplotlib.pyplot.show",
"numpy.random.randn",
"emcee.EnsembleSampler",
"matplotlib.pyplot.legend",
"numpy.isfinite",
"numpy.isnan",
"numpy.percentile",
"matplotlib.pyplot.figure",
"sys.exit",
"os.path.join",
"MulensModel.Event"
] |
[((1306, 1396), 'os.path.join', 'os.path.join', (['mm.DATA_PATH', '"""photometry_files"""', '"""OB05086"""', '"""starBLG234.6.I.218982.dat"""'], {}), "(mm.DATA_PATH, 'photometry_files', 'OB05086',\n 'starBLG234.6.I.218982.dat')\n", (1318, 1396), False, 'import os\n'), ((1412, 1464), 'MulensModel.MulensData', 'mm.MulensData', ([], {'file_name': 'file_name', 'add_2450000': '(True)'}), '(file_name=file_name, add_2450000=True)\n', (1425, 1464), True, 'import MulensModel as mm\n'), ((1743, 1774), 'MulensModel.Model', 'mm.Model', (['params'], {'coords': 'coords'}), '(params, coords=coords)\n', (1751, 1774), True, 'import MulensModel as mm\n'), ((1786, 1828), 'MulensModel.Event', 'mm.Event', ([], {'datasets': 'my_data', 'model': 'my_model'}), '(datasets=my_data, model=my_model)\n', (1794, 1828), True, 'import MulensModel as mm\n'), ((2374, 2462), 'emcee.EnsembleSampler', 'emcee.EnsembleSampler', (['n_walkers', 'n_dim', 'ln_prob'], {'args': '(my_event, parameters_to_fit)'}), '(n_walkers, n_dim, ln_prob, args=(my_event,\n parameters_to_fit))\n', (2395, 2462), False, 'import emcee\n'), ((2618, 2662), 'numpy.percentile', 'np.percentile', (['samples', '[16, 50, 84]'], {'axis': '(0)'}), '(samples, [16, 50, 84], axis=0)\n', (2631, 2662), True, 'import numpy as np\n'), ((3097, 3109), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3107, 3109), True, 'import matplotlib.pyplot as plt\n'), ((3120, 3187), 'MulensModel.Model', 'mm.Model', (["{'t_0': 2453628.29062, 'u_0': 0.37263, 't_E': 102.387105}"], {}), "({'t_0': 2453628.29062, 'u_0': 0.37263, 't_E': 102.387105})\n", (3128, 3187), True, 'import MulensModel as mm\n'), ((3198, 3350), 'MulensModel.Model', 'mm.Model', (["{'t_0': 2453630.35507, 'u_0': 0.488817, 't_E': 93.611301, 'pi_E_N': 0.2719,\n 'pi_E_E': 0.1025, 't_0_par': params['t_0_par']}"], {'coords': 'coords'}), "({'t_0': 2453630.35507, 'u_0': 0.488817, 't_E': 93.611301, 'pi_E_N':\n 0.2719, 'pi_E_E': 0.1025, 't_0_par': params['t_0_par']}, coords=coords)\n", (3206, 3350), True, 'import MulensModel as mm\n'), ((3371, 3530), 'MulensModel.Model', 'mm.Model', (["{'t_0': 2453630.67778, 'u_0': -0.415677, 't_E': 110.120755, 'pi_E_N': -\n 0.2972, 'pi_E_E': 0.1103, 't_0_par': params['t_0_par']}"], {'coords': 'coords'}), "({'t_0': 2453630.67778, 'u_0': -0.415677, 't_E': 110.120755,\n 'pi_E_N': -0.2972, 'pi_E_E': 0.1103, 't_0_par': params['t_0_par']},\n coords=coords)\n", (3379, 3530), True, 'import MulensModel as mm\n'), ((4000, 4042), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(t_1 - 2450000.0)', '(t_2 - 2450000.0)'], {}), '(t_1 - 2450000.0, t_2 - 2450000.0)\n', (4008, 4042), True, 'import matplotlib.pyplot as plt\n'), ((4037, 4059), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (4047, 4059), True, 'import matplotlib.pyplot as plt\n'), ((4060, 4097), 'matplotlib.pyplot.title', 'plt.title', (['"""Data and 3 fitted models"""'], {}), "('Data and 3 fitted models')\n", (4069, 4097), True, 'import matplotlib.pyplot as plt\n'), ((4098, 4108), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4106, 4108), True, 'import matplotlib.pyplot as plt\n'), ((1200, 1218), 'numpy.isnan', 'np.isnan', (['ln_like_'], {}), '(ln_like_)\n', (1208, 1218), True, 'import numpy as np\n'), ((315, 326), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (323, 326), False, 'import sys\n'), ((977, 999), 'numpy.isfinite', 'np.isfinite', (['ln_prior_'], {}), '(ln_prior_)\n', (988, 999), True, 'import numpy as np\n'), ((2256, 2278), 'numpy.random.randn', 'np.random.randn', (['n_dim'], {}), '(n_dim)\n', (2271, 2278), True, 'import numpy as np\n')]
|
import numpy as np
import cv2
import heapq
import statistics
import math
def get_norm(t1 , t2):
(xa, ya, za) = t1
(xb, yb, zb) = t2
return math.sqrt((xa-xb)^2 + (ya-yb)^2 + (za-zb)^2)
def popularity(image,k):
(m,n,_) = image.shape
d = {}
for i in range(m):
for j in range(n):
t = tuple(image[i,j])
if t in d:
d[t] += 1
else:
d[t] = 1
top_k_colors =heapq.nlargest(k, d, key=d.get)
return top_k_colors
def popularity_quant(image, k):
finalImage = image.copy()
color_map = popularity(image, k)
(m,n,_) = image.shape
for i in range(m):
for j in range(n):
t = tuple(image[i,j])
min_dist = 100000000.0
for col in color_map:
dist = get_norm(t, col)
if min_dist > dist :
min_dist = dist
min_col = col
finalImage[i,j] = np.asarray(min_col)
return finalImage
test_image = cv2.imread('test1.png')
img = popularity_quant(test_image, 10)
cv2.imshow('Popularity Cut image',img)
cv2.waitKey()
cv2.destroyAllWindows()
cv2.imwrite('popularity_test1.png', img)
|
[
"math.sqrt",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.imwrite",
"numpy.asarray",
"heapq.nlargest",
"cv2.imread",
"cv2.imshow"
] |
[((1057, 1080), 'cv2.imread', 'cv2.imread', (['"""test1.png"""'], {}), "('test1.png')\n", (1067, 1080), False, 'import cv2\n'), ((1121, 1160), 'cv2.imshow', 'cv2.imshow', (['"""Popularity Cut image"""', 'img'], {}), "('Popularity Cut image', img)\n", (1131, 1160), False, 'import cv2\n'), ((1160, 1173), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (1171, 1173), False, 'import cv2\n'), ((1174, 1197), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1195, 1197), False, 'import cv2\n'), ((1199, 1239), 'cv2.imwrite', 'cv2.imwrite', (['"""popularity_test1.png"""', 'img'], {}), "('popularity_test1.png', img)\n", (1210, 1239), False, 'import cv2\n'), ((152, 206), 'math.sqrt', 'math.sqrt', (['(xa - xb ^ 2 + (ya - yb) ^ 2 + (za - zb) ^ 2)'], {}), '(xa - xb ^ 2 + (ya - yb) ^ 2 + (za - zb) ^ 2)\n', (161, 206), False, 'import math\n'), ((455, 486), 'heapq.nlargest', 'heapq.nlargest', (['k', 'd'], {'key': 'd.get'}), '(k, d, key=d.get)\n', (469, 486), False, 'import heapq\n'), ((1001, 1020), 'numpy.asarray', 'np.asarray', (['min_col'], {}), '(min_col)\n', (1011, 1020), True, 'import numpy as np\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import os
import tensorflow as tf
import zipfile as zp
import subprocess
import glob
import json
from PIL import Image
from collections import OrderedDict
import shutil
import stat
import sys
def convert_to_dlc(script_path, frozen_model_file, snpe_root, input_node='input', output_node='output', image_size=224):
print('converting ' + frozen_model_file + ' to snpe dlc format')
sys.stdout.flush()
model_name_ = os.path.splitext(os.path.split(frozen_model_file)[1])[0]
dlc_path = 'models/{}.dlc'.format(model_name_)
dlc_full_path = os.path.join(snpe_root, 'benchmarks', dlc_path)
# if os.path.exists(dlc_full_path):
# return dlc_path
if not os.path.exists(os.path.dirname(dlc_full_path)):
os.makedirs(os.path.dirname(dlc_full_path))
cmd = [script_path,
'--graph', os.path.abspath(frozen_model_file),
'--input_dim', input_node, '1,{0},{0},3'.format(image_size),
'--out_node', output_node,
'--allow_unconsumed_nodes',
'--dlc', dlc_full_path]
subprocess.call(cmd)
print()
sys.stdout.flush()
return dlc_path
# print('INFO: Creating ' + DLC_QUANTIZED_FILENAME + ' quantized model')
# data_cropped_dir = os.path.join(os.path.join(model_dir, 'data'), 'cropped')
# cmd = ['snpe-dlc-quantize',
# '--input_dlc', os.path.join(dlc_dir, DLC_FILENAME),
# '--input_list', os.path.join(data_cropped_dir, RAW_LIST_FILE),
# '--output_dlc', os.path.join(dlc_dir, DLC_QUANTIZED_FILENAME)]
# subprocess.call(cmd)
def __get_img_raw(img_file):
img_file = os.path.abspath(img_file)
img = Image.open(img_file)
img_ndarray = np.array(img) # read it
if len(img_ndarray.shape) != 3:
raise RuntimeError('Image shape' + str(img_ndarray.shape))
if img_ndarray.shape[2] != 3:
raise RuntimeError('Require image with rgb but channel is %d' % img_ndarray.shape[2])
# reverse last dimension: rgb -> bgr
return img_ndarray
def __create_mean_raw(img_raw, mean_rgb):
if img_raw.shape[2] != 3:
raise RuntimeError('Require image with rgb but channel is %d' % img_raw.shape[2])
img_dim = (img_raw.shape[0], img_raw.shape[1])
mean_raw_r = np.empty(img_dim)
mean_raw_r.fill(mean_rgb[0])
mean_raw_g = np.empty(img_dim)
mean_raw_g.fill(mean_rgb[1])
mean_raw_b = np.empty(img_dim)
mean_raw_b.fill(mean_rgb[2])
# create with c, h, w shape first
tmp_transpose_dim = (img_raw.shape[2], img_raw.shape[0], img_raw.shape[1])
mean_raw = np.empty(tmp_transpose_dim)
mean_raw[0] = mean_raw_r
mean_raw[1] = mean_raw_g
mean_raw[2] = mean_raw_b
# back to h, w, c
mean_raw = np.transpose(mean_raw, (1, 2, 0))
return mean_raw.astype(np.float32)
def __create_raw_img(img_file, mean_rgb, div, req_bgr_raw, save_uint8):
img_raw = __get_img_raw(img_file)
mean_raw = __create_mean_raw(img_raw, mean_rgb)
snpe_raw = img_raw - mean_raw
snpe_raw = snpe_raw.astype(np.float32)
# scalar data divide
snpe_raw /= div
if req_bgr_raw:
snpe_raw = snpe_raw[..., ::-1]
if save_uint8:
snpe_raw = snpe_raw.astype(np.uint8)
else:
snpe_raw = snpe_raw.astype(np.float32)
img_file = os.path.abspath(img_file)
filename, ext = os.path.splitext(img_file)
snpe_raw_filename = filename
snpe_raw_filename += '.raw'
snpe_raw.tofile(snpe_raw_filename)
return 0
def __resize_square_to_jpg(src, dst, size):
src_img = Image.open(src)
# If black and white image, convert to rgb (all 3 channels the same)
if len(np.shape(src_img)) == 2: src_img = src_img.convert(mode='RGB')
# center crop to square
width, height = src_img.size
short_dim = min(height, width)
crop_coord = (
(width - short_dim) / 2,
(height - short_dim) / 2,
(width + short_dim) / 2,
(height + short_dim) / 2
)
img = src_img.crop(crop_coord)
# resize to alexnet size
dst_img = img.resize((size, size), Image.ANTIALIAS)
# save output - save determined from file extension
dst_img.save(dst)
return 0
def convert_img(src, dest, size):
print("converting images...")
for root, dirs, files in os.walk(src):
for jpgs in files:
src_image = os.path.join(root, jpgs)
if '.jpg' in src_image:
print(src_image)
dest_image = os.path.join(dest, jpgs)
__resize_square_to_jpg(src_image, dest_image, size)
for root, dirs, files in os.walk(dest):
for jpgs in files:
src_image = os.path.join(root, jpgs)
print(src_image)
mean_rgb = (128, 128, 128)
__create_raw_img(src_image, mean_rgb, 128, False, False)
def create_file_list(input_dir, output_filename, ext_pattern, print_out=True, rel_path=True):
input_dir = os.path.abspath(input_dir)
output_filename = os.path.abspath(output_filename)
output_dir = os.path.dirname(output_filename)
if not os.path.isdir(input_dir):
raise RuntimeError('input_dir %s is not a directory' % input_dir)
if not os.path.isdir(output_dir):
raise RuntimeError('output_filename %s directory does not exist' % output_dir)
glob_path = os.path.join(input_dir, ext_pattern)
file_list = glob.glob(glob_path)
if rel_path:
file_list = [os.path.relpath(file_path, output_dir) for file_path in file_list]
if len(file_list) <= 0:
if print_out:
print('no results with %s' % glob_path)
else:
with open(output_filename, 'w') as f:
f.write('\n'.join(file_list))
if print_out:
print('%s created listing %d files.' % (output_filename, len(file_list)))
def prepare_data_images(image_size, snpe_root):
# make a copy of the image files from the alex net model data dir
image_dir_relative_path = 'models/alexnet/data'
image_dir = os.path.join(snpe_root, image_dir_relative_path)
data_cropped_dir = os.path.join(image_dir, 'cropped_%s' % image_size)
raw_list = os.path.join(image_dir, 'target_raw_list_%s.txt' % image_size)
if not os.path.exists(raw_list):
os.makedirs(data_cropped_dir)
print('creating inception style raw image data')
convert_img(image_dir, data_cropped_dir, image_size)
print('Create file lists')
create_file_list(data_cropped_dir, raw_list, '*.raw')
print()
sys.stdout.flush()
return data_cropped_dir, raw_list
# generate bench config json file
def gen_config(dlc_path, input_list_file, input_data, processors_, runs):
name = os.path.splitext(os.path.basename(dlc_path))[0]
config = OrderedDict()
config['Name'] = name
config['HostRootPath'] = name
config['HostResultsDir'] = os.path.join(name, 'results')
config['DevicePath'] = '/data/local/tmp/snpebm'
config['Devices'] = ["123"]
config['Runs'] = runs
model = OrderedDict()
model['Name'] = name
model['Dlc'] = dlc_path
model['InputList'] = input_list_file
model['Data'] = [input_data]
config['Model'] = model
config['Runtimes'] = processors_
config['Measurements'] = ['timing'] # ['timing', 'mem']
return config
def write_config(config, save_path):
with open(save_path, 'w') as f:
json.dump(config, f, indent=4)
def check_processor_arg(processor_str):
default = "GPU,DSP,CPU,GPU_FP16"
processor_list = default.split(',')
parsed_processors = []
for p in processor_str.split(','):
if p not in processor_list:
print("please use either GPU, DSP or CPU or any combination of them, seperated by comma(',')")
print("e.g. -p GPU,DSP means running on GPU and DSP; -p CPU means only running on CPU")
exit(-1)
else:
parsed_processors.append(p)
return parsed_processors
"""
caution1: rename data/snpe-1.31.0.522 to data/snpe-1.31.0
caution2: manually change executable permission on the phone through adb:
adb shell "chmod a+x /data/local/tmp/snpebm/artifacts/arm-android-clang6.0/bin/snpe*"
python snpe/run_snpe.py --model data/resnet_v1_50/resnet_v1_50.frozen.pb --snpe_sdk data/snpe-1.31.0.zip 2>&1 | tee run_resnet50.log
(test for pip install tensorflow=1.14)
"""
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-sdk", "--snpe_sdk", type=str, default="data/snpe-1.15.0.zip",
help="path to snpe sdk zip file")
parser.add_argument("-p", "--processors", type=check_processor_arg, default="GPU,DSP,CPU",
help="processor to use, use GPU,DSP,CPU or any combination of them (separated by comma)")
parser.add_argument("-n", "--runs", type=int, default=10,
help="number of times to repeat the run")
parser.add_argument("-ndk", "--android_ndk", type=str,
help="path to android ndk")
parser.add_argument("-m", "--model", type=str, default="data/mobilenet_v1/mobilenet_v1_1.0_224.frozen.pb",
help="frozen tensorflow model")
parser.add_argument("-s", "--image_size", type=int, default=224,
help="input image size")
parser.add_argument("-i", "--input_node", type=str, default='input',
help="input node name in the model")
parser.add_argument("-o", "--output_node", type=str, default='output',
help="output node name in the model")
parser.add_argument("-t", "--show_time", action='store_true',
help="show time in csv")
return parser.parse_args()
if __name__ == '__main__':
web_url = "https://developer.qualcomm.com/software/snapdragon-neural-processing-engine-ai"
tf_path = os.path.dirname(tf.__file__)
args = parse_args()
snpe_sdk_file = args.snpe_sdk
snpe_dir = os.path.dirname(snpe_sdk_file)
snpe_sdk_path = os.path.abspath(os.path.splitext(snpe_sdk_file)[0])
snpe_name = os.path.basename(snpe_sdk_path)
if not os.path.exists(snpe_sdk_file):
print("please download SNPE SDK from:", web_url)
exit(-1)
elif not os.path.exists(snpe_sdk_path):
print("extracting snpe to:", snpe_sdk_path, "...")
zp_ref = zp.ZipFile(snpe_sdk_file, 'r')
zp_ref.extractall(snpe_dir)
zp_ref.close()
print("snpe sdk extraction done.")
else:
print("found snpe sdk at:", snpe_sdk_path)
sys.stdout.flush()
print()
sys.stdout.flush()
ndk_path = os.environ.get("ANDROID_NDK", None) or args.android_ndk
if not ndk_path:
print("please set ndk path either by specify -ndk or set 'export ANDROID_NDK=path/to/android-ndk'")
exit(-1)
# may install pkg deps
if not os.path.exists('/tmp/{}_deps_checked'.format(snpe_name)):
# print("copying libs from ndk to snpe sdk...")
#
# shutil.copy('{}/sources/cxx-stl/gnu-libstdc++/4.9/libs/arm64-v8a/libgnustl_shared.so'.format(ndk_path),
# '{}/lib/aarch64-linux-gcc4.9'.format(snpe_sdk_path))
#
# shutil.copy('{}/sources/cxx-stl/gnu-libstdc++/4.9/libs/armeabi-v7a/libgnustl_shared.so'.format(ndk_path),
# '{}/lib/arm-android-gcc4.9'.format(snpe_sdk_path))
# print("gcc libs copied.")
# print()
# sys.stdout.flush()
print("checking package dependencies...")
check_cmd = 'yes | bash {}/bin/dependencies.sh'.format(snpe_sdk_path)
subprocess.call(check_cmd, shell=True)
print("checking python dependencies...")
check_cmd = 'yes | bash {}/bin/check_python_depends.sh'.format(snpe_sdk_path)
subprocess.call(check_cmd, shell=True)
for os_type in ["arm-android-gcc4.9", "arm-android-clang6.0", "x86_64-linux-clang"]:
bin_dir = "{}/bin/{}".format(snpe_sdk_path, os_type)
if not os.path.exists(bin_dir):
continue
for bin_file in os.listdir(bin_dir):
script_file_path = os.path.join("{}/bin/{}".format(snpe_sdk_path, os_type), bin_file)
print('set script:', script_file_path, ' to executable')
sys.stdout.flush()
st = os.stat(script_file_path)
os.chmod(script_file_path, st.st_mode | stat.S_IEXEC)
open('/tmp/{}_deps_checked'.format(snpe_name), 'a').close()
os.environ["SNPE_ROOT"] = snpe_sdk_path
py_path = os.environ.get("PYTHONPATH", "")
os.environ["PYTHONPATH"] = "{0}/lib/python:{1}".format(snpe_sdk_path, py_path)
os.environ["TENSORFLOW_HOME"] = tf_path
bin_path = os.environ.get("PATH", "")
os.environ["PATH"] = "{}/bin/x86_64-linux-clang:{}".format(snpe_sdk_path, bin_path)
model_file = args.model
if not os.path.exists(model_file):
print(model_file, "not exist!")
exit(-1)
convert_dlc_script = "{}/bin/x86_64-linux-clang/snpe-tensorflow-to-dlc".format(snpe_sdk_path)
dlc_file = convert_to_dlc(convert_dlc_script, model_file, snpe_sdk_path,
args.input_node, args.output_node, args.image_size)
data_dir, raw_file_list = prepare_data_images(args.image_size, snpe_sdk_path)
print('generating benchmark configuration...')
sys.stdout.flush()
config = gen_config(dlc_file, raw_file_list, data_dir, args.processors, args.runs)
model_name = os.path.splitext(os.path.split(model_file)[1])[0]
config_path = os.path.join('{}/benchmarks'.format(snpe_sdk_path), "{}.json".format(model_name))
write_config(config, config_path)
print('benchmark configuration generated.')
print()
sys.stdout.flush()
print('running benchmark on {}...'.format(' '.join(args.processors)))
print()
sys.stdout.flush()
bench_cmd = ['python', 'snpe_bench.py', '-c', config_path, '-a']
subprocess.call(bench_cmd, cwd='{}/benchmarks'.format(snpe_sdk_path))
stats_file = model_file.replace('.pb', '.csv')
shutil.copy('{0}/benchmarks/{1}/results/latest_results/benchmark_stats_{1}.csv'.format(snpe_sdk_path, model_name),
stats_file)
print('benchmark results saved to:', stats_file)
if args.show_time:
import csv
with open(stats_file, 'r') as f, open('{}.txt'.format(stats_file), 'w') as f2:
reader = csv.reader(f)
next(reader)
for row in reader:
if 'Total Inference Time' in row:
gpu_time = float(row[3])/1000
dsp_time = float(row[9])/1000
cpu_time = float(row[18])/1000
header = 'GPU, DSP, CPU'
print(header)
f2.write(header + '\n')
time_str = '{:4.2f}, {:4.2f}, {:4.2f}'.format(gpu_time, dsp_time, cpu_time)
print(time_str)
f2.write(time_str + '\n')
break
print('all done.')
|
[
"csv.reader",
"argparse.ArgumentParser",
"numpy.empty",
"os.walk",
"numpy.shape",
"sys.stdout.flush",
"glob.glob",
"os.path.join",
"os.path.abspath",
"os.path.dirname",
"numpy.transpose",
"os.path.exists",
"json.dump",
"os.chmod",
"os.stat",
"os.path.basename",
"subprocess.call",
"os.listdir",
"zipfile.ZipFile",
"os.makedirs",
"os.path.isdir",
"PIL.Image.open",
"os.environ.get",
"numpy.array",
"os.path.splitext",
"os.path.relpath",
"collections.OrderedDict",
"os.path.split"
] |
[((532, 550), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (548, 550), False, 'import sys\n'), ((697, 744), 'os.path.join', 'os.path.join', (['snpe_root', '"""benchmarks"""', 'dlc_path'], {}), "(snpe_root, 'benchmarks', dlc_path)\n", (709, 744), False, 'import os\n'), ((1193, 1213), 'subprocess.call', 'subprocess.call', (['cmd'], {}), '(cmd)\n', (1208, 1213), False, 'import subprocess\n'), ((1230, 1248), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1246, 1248), False, 'import sys\n'), ((1752, 1777), 'os.path.abspath', 'os.path.abspath', (['img_file'], {}), '(img_file)\n', (1767, 1777), False, 'import os\n'), ((1788, 1808), 'PIL.Image.open', 'Image.open', (['img_file'], {}), '(img_file)\n', (1798, 1808), False, 'from PIL import Image\n'), ((1827, 1840), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1835, 1840), True, 'import numpy as np\n'), ((2379, 2396), 'numpy.empty', 'np.empty', (['img_dim'], {}), '(img_dim)\n', (2387, 2396), True, 'import numpy as np\n'), ((2447, 2464), 'numpy.empty', 'np.empty', (['img_dim'], {}), '(img_dim)\n', (2455, 2464), True, 'import numpy as np\n'), ((2515, 2532), 'numpy.empty', 'np.empty', (['img_dim'], {}), '(img_dim)\n', (2523, 2532), True, 'import numpy as np\n'), ((2698, 2725), 'numpy.empty', 'np.empty', (['tmp_transpose_dim'], {}), '(tmp_transpose_dim)\n', (2706, 2725), True, 'import numpy as np\n'), ((2850, 2883), 'numpy.transpose', 'np.transpose', (['mean_raw', '(1, 2, 0)'], {}), '(mean_raw, (1, 2, 0))\n', (2862, 2883), True, 'import numpy as np\n'), ((3408, 3433), 'os.path.abspath', 'os.path.abspath', (['img_file'], {}), '(img_file)\n', (3423, 3433), False, 'import os\n'), ((3454, 3480), 'os.path.splitext', 'os.path.splitext', (['img_file'], {}), '(img_file)\n', (3470, 3480), False, 'import os\n'), ((3659, 3674), 'PIL.Image.open', 'Image.open', (['src'], {}), '(src)\n', (3669, 3674), False, 'from PIL import Image\n'), ((4386, 4398), 'os.walk', 'os.walk', (['src'], {}), '(src)\n', (4393, 4398), False, 'import os\n'), ((4697, 4710), 'os.walk', 'os.walk', (['dest'], {}), '(dest)\n', (4704, 4710), False, 'import os\n'), ((5037, 5063), 'os.path.abspath', 'os.path.abspath', (['input_dir'], {}), '(input_dir)\n', (5052, 5063), False, 'import os\n'), ((5086, 5118), 'os.path.abspath', 'os.path.abspath', (['output_filename'], {}), '(output_filename)\n', (5101, 5118), False, 'import os\n'), ((5136, 5168), 'os.path.dirname', 'os.path.dirname', (['output_filename'], {}), '(output_filename)\n', (5151, 5168), False, 'import os\n'), ((5424, 5460), 'os.path.join', 'os.path.join', (['input_dir', 'ext_pattern'], {}), '(input_dir, ext_pattern)\n', (5436, 5460), False, 'import os\n'), ((5477, 5497), 'glob.glob', 'glob.glob', (['glob_path'], {}), '(glob_path)\n', (5486, 5497), False, 'import glob\n'), ((6109, 6157), 'os.path.join', 'os.path.join', (['snpe_root', 'image_dir_relative_path'], {}), '(snpe_root, image_dir_relative_path)\n', (6121, 6157), False, 'import os\n'), ((6182, 6232), 'os.path.join', 'os.path.join', (['image_dir', "('cropped_%s' % image_size)"], {}), "(image_dir, 'cropped_%s' % image_size)\n", (6194, 6232), False, 'import os\n'), ((6248, 6310), 'os.path.join', 'os.path.join', (['image_dir', "('target_raw_list_%s.txt' % image_size)"], {}), "(image_dir, 'target_raw_list_%s.txt' % image_size)\n", (6260, 6310), False, 'import os\n'), ((6619, 6637), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6635, 6637), False, 'import sys\n'), ((6858, 6871), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6869, 6871), False, 'from collections import OrderedDict\n'), ((6963, 6992), 'os.path.join', 'os.path.join', (['name', '"""results"""'], {}), "(name, 'results')\n", (6975, 6992), False, 'import os\n'), ((7116, 7129), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7127, 7129), False, 'from collections import OrderedDict\n'), ((8487, 8512), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8510, 8512), False, 'import argparse\n'), ((9947, 9975), 'os.path.dirname', 'os.path.dirname', (['tf.__file__'], {}), '(tf.__file__)\n', (9962, 9975), False, 'import os\n'), ((10052, 10082), 'os.path.dirname', 'os.path.dirname', (['snpe_sdk_file'], {}), '(snpe_sdk_file)\n', (10067, 10082), False, 'import os\n'), ((10171, 10202), 'os.path.basename', 'os.path.basename', (['snpe_sdk_path'], {}), '(snpe_sdk_path)\n', (10187, 10202), False, 'import os\n'), ((10677, 10695), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10693, 10695), False, 'import sys\n'), ((12639, 12671), 'os.environ.get', 'os.environ.get', (['"""PYTHONPATH"""', '""""""'], {}), "('PYTHONPATH', '')\n", (12653, 12671), False, 'import os\n'), ((12815, 12841), 'os.environ.get', 'os.environ.get', (['"""PATH"""', '""""""'], {}), "('PATH', '')\n", (12829, 12841), False, 'import os\n'), ((13453, 13471), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (13469, 13471), False, 'import sys\n'), ((13830, 13848), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (13846, 13848), False, 'import sys\n'), ((13940, 13958), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (13956, 13958), False, 'import sys\n'), ((969, 1003), 'os.path.abspath', 'os.path.abspath', (['frozen_model_file'], {}), '(frozen_model_file)\n', (984, 1003), False, 'import os\n'), ((5181, 5205), 'os.path.isdir', 'os.path.isdir', (['input_dir'], {}), '(input_dir)\n', (5194, 5205), False, 'import os\n'), ((5293, 5318), 'os.path.isdir', 'os.path.isdir', (['output_dir'], {}), '(output_dir)\n', (5306, 5318), False, 'import os\n'), ((6323, 6347), 'os.path.exists', 'os.path.exists', (['raw_list'], {}), '(raw_list)\n', (6337, 6347), False, 'import os\n'), ((6357, 6386), 'os.makedirs', 'os.makedirs', (['data_cropped_dir'], {}), '(data_cropped_dir)\n', (6368, 6386), False, 'import os\n'), ((7486, 7516), 'json.dump', 'json.dump', (['config', 'f'], {'indent': '(4)'}), '(config, f, indent=4)\n', (7495, 7516), False, 'import json\n'), ((10215, 10244), 'os.path.exists', 'os.path.exists', (['snpe_sdk_file'], {}), '(snpe_sdk_file)\n', (10229, 10244), False, 'import os\n'), ((10712, 10747), 'os.environ.get', 'os.environ.get', (['"""ANDROID_NDK"""', 'None'], {}), "('ANDROID_NDK', None)\n", (10726, 10747), False, 'import os\n'), ((11685, 11723), 'subprocess.call', 'subprocess.call', (['check_cmd'], {'shell': '(True)'}), '(check_cmd, shell=True)\n', (11700, 11723), False, 'import subprocess\n'), ((11868, 11906), 'subprocess.call', 'subprocess.call', (['check_cmd'], {'shell': '(True)'}), '(check_cmd, shell=True)\n', (11883, 11906), False, 'import subprocess\n'), ((12970, 12996), 'os.path.exists', 'os.path.exists', (['model_file'], {}), '(model_file)\n', (12984, 12996), False, 'import os\n'), ((838, 868), 'os.path.dirname', 'os.path.dirname', (['dlc_full_path'], {}), '(dlc_full_path)\n', (853, 868), False, 'import os\n'), ((891, 921), 'os.path.dirname', 'os.path.dirname', (['dlc_full_path'], {}), '(dlc_full_path)\n', (906, 921), False, 'import os\n'), ((3759, 3776), 'numpy.shape', 'np.shape', (['src_img'], {}), '(src_img)\n', (3767, 3776), True, 'import numpy as np\n'), ((4451, 4475), 'os.path.join', 'os.path.join', (['root', 'jpgs'], {}), '(root, jpgs)\n', (4463, 4475), False, 'import os\n'), ((4763, 4787), 'os.path.join', 'os.path.join', (['root', 'jpgs'], {}), '(root, jpgs)\n', (4775, 4787), False, 'import os\n'), ((5537, 5575), 'os.path.relpath', 'os.path.relpath', (['file_path', 'output_dir'], {}), '(file_path, output_dir)\n', (5552, 5575), False, 'import os\n'), ((6814, 6840), 'os.path.basename', 'os.path.basename', (['dlc_path'], {}), '(dlc_path)\n', (6830, 6840), False, 'import os\n'), ((10119, 10150), 'os.path.splitext', 'os.path.splitext', (['snpe_sdk_file'], {}), '(snpe_sdk_file)\n', (10135, 10150), False, 'import os\n'), ((10333, 10362), 'os.path.exists', 'os.path.exists', (['snpe_sdk_path'], {}), '(snpe_sdk_path)\n', (10347, 10362), False, 'import os\n'), ((10440, 10470), 'zipfile.ZipFile', 'zp.ZipFile', (['snpe_sdk_file', '"""r"""'], {}), "(snpe_sdk_file, 'r')\n", (10450, 10470), True, 'import zipfile as zp\n'), ((10642, 10660), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10658, 10660), False, 'import sys\n'), ((12163, 12182), 'os.listdir', 'os.listdir', (['bin_dir'], {}), '(bin_dir)\n', (12173, 12182), False, 'import os\n'), ((14505, 14518), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (14515, 14518), False, 'import csv\n'), ((586, 618), 'os.path.split', 'os.path.split', (['frozen_model_file'], {}), '(frozen_model_file)\n', (599, 618), False, 'import os\n'), ((4574, 4598), 'os.path.join', 'os.path.join', (['dest', 'jpgs'], {}), '(dest, jpgs)\n', (4586, 4598), False, 'import os\n'), ((12085, 12108), 'os.path.exists', 'os.path.exists', (['bin_dir'], {}), '(bin_dir)\n', (12099, 12108), False, 'import os\n'), ((12375, 12393), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (12391, 12393), False, 'import sys\n'), ((12415, 12440), 'os.stat', 'os.stat', (['script_file_path'], {}), '(script_file_path)\n', (12422, 12440), False, 'import os\n'), ((12457, 12510), 'os.chmod', 'os.chmod', (['script_file_path', '(st.st_mode | stat.S_IEXEC)'], {}), '(script_file_path, st.st_mode | stat.S_IEXEC)\n', (12465, 12510), False, 'import os\n'), ((13594, 13619), 'os.path.split', 'os.path.split', (['model_file'], {}), '(model_file)\n', (13607, 13619), False, 'import os\n')]
|
#!/usr/bin/env python
"""
Southern California Earthquake Center Broadband Platform
Copyright 2010-2016 Southern California Earthquake Center
"""
from __future__ import division, print_function
# Import Python modules
import os
import sys
import shutil
import matplotlib as mpl
mpl.use('AGG', warn=False)
import pylab
import numpy as np
# Import Broadband modules
import bband_utils
import install_cfg
from station_list import StationList
# Import plot config file
import plot_config
def create_boore_asc2smc(control_file, input_file,
data_column, num_headers,
extension_string):
"""
This function creates the control file for the asc2smc converter tool
"""
ctl_file = open(control_file, 'w')
ctl_file.write("!Control file for ASC2SMC ! first line\n")
ctl_file.write("! Revision of program involving a change in the "
"control file on this date:\n")
ctl_file.write(" 02/02/12\n")
ctl_file.write("!Name of summary file:\n")
ctl_file.write(" asc2smc.sum\n")
ctl_file.write("!n2skip (-1=headers preceded by !; 0=no headers; "
"otherwise number of headers to skip)\n")
ctl_file.write(" %d\n" % (num_headers))
ctl_file.write("!write headers to smc file "
"(even if n2skip > 0)? (Y/N)\n")
ctl_file.write(" Y\n")
ctl_file.write("!sps (0.0 = obtain from input file)\n")
ctl_file.write(" 0\n")
ctl_file.write("!N columns to read, column number for "
"time and data columns \n")
ctl_file.write("! (for files made using blpadflt, period is in "
"column 1 and sd, pv, pa, rv, \n")
ctl_file.write("! aa are in columns 2, 3, 4, 5, 6, respectively)\n")
ctl_file.write("! Note: if sps .ne. 0.0, then column number for time "
"is ignored (but a placeholder is\n")
ctl_file.write("! still needed--e.g., 1 1 1 (read one column, which "
"contains the data; 1 20 1 would be the same)\n")
ctl_file.write("! But note: if the data are not in the first column, "
"but only the data column is to be read\n")
ctl_file.write("! (because sps will be used to establish "
"the time values),\n")
ctl_file.write("! then ncolumns must be the column corresponding to "
"the data. For example, assume that\n")
ctl_file.write("! the data are in column 3 and that columns 1 and 2 "
"contain time and some other variable, but\n")
ctl_file.write("! the time column is not to be used (perhaps because "
"accumulated error in creating the column\n")
ctl_file.write("! leads to a slight shift in the time values). "
"Then the input line should be:\n")
ctl_file.write("! 3 1 3\n")
ctl_file.write("!\n")
ctl_file.write("! This program assumes one data point per row; if "
"there are more points (as, for example,\n")
ctl_file.write("! in files with N points per line), "
"use the program wrapped2asc).\n")
ctl_file.write("!\n")
ctl_file.write(" 3 1 %d\n" % (data_column))
ctl_file.write("!Xfactr\n")
ctl_file.write(" 1.0\n")
ctl_file.write("!Read input format (used if the format is such that "
"the values are not separated by spaces,\n")
ctl_file.write("!in which case a free format cannot be "
"used for input)?\n")
ctl_file.write(" N\n")
ctl_file.write("!If yes, specify a format; if not, "
"still need a placeholder\n")
ctl_file.write(" (3e13.5)\n")
ctl_file.write("!For output, use old (standard) smc format or new\n")
ctl_file.write('!higher precision format. Specify "high" for\n')
ctl_file.write("!high precision; any other word defaults to standard\n")
ctl_file.write("!precision (but some word is needed as "
"a placeholder, even if\n")
ctl_file.write("!standard precision is desired).\n")
ctl_file.write(" high\n")
ctl_file.write("!String to append to input file name "
"for the output filename.\n")
ctl_file.write(" %s\n" % (extension_string))
ctl_file.write('!Input file name (time,data pairs; "stop" in any '
'column to quit):\n')
ctl_file.write("%s\n" % (input_file))
ctl_file.write("STOP\n")
ctl_file.close()
def create_boore_smc2fs2(control_file, input_file, name_string):
"""
This function creates the control file for the smc2fs2 FAS tool
"""
ctl_file = open(control_file, 'w')
ctl_file.write('!Control file for program SMC2FS2\n')
ctl_file.write('! Revision of program involving a change in the control '
'file on this date:\n')
ctl_file.write(' 03/10/10\n')
ctl_file.write('! As many comment lines as desired, each '
'starting with "!"\n')
ctl_file.write('! The string "pp:" indicates a new set '
'of processing parameters\n')
ctl_file.write('! to be applied to the following smc files. '
'The parameters are given on the\n')
ctl_file.write('! lines following "pp:", until the next "pp:" line '
'or until "stop" is \n')
ctl_file.write('! encountered.\n')
ctl_file.write('! NOTE: Use the tapers with caution, '
'choosing them so that important signal\n')
ctl_file.write('! is not reduced by the tapering. '
'This can be particularly a problem with \n')
ctl_file.write('! analog data from relatively small earthquakes '
'that triggered near the \n')
ctl_file.write('! S-wave arrival. \n')
ctl_file.write('!\n')
ctl_file.write('! -----------------------------------------'
'------------------------------------\n')
ctl_file.write('!\n')
ctl_file.write('! Meaning of smoothing input parameters\n')
ctl_file.write('!\n')
ctl_file.write('! NO SMOOTHING\n')
ctl_file.write('! itype = 0\n')
ctl_file.write('! SMOOTHING OVER EQUALLY SPACED FREQUENCIES\n')
ctl_file.write('! itype = 1: box weighting function\n')
ctl_file.write('! smooth_param = width of box weighting function (Hz)\n')
ctl_file.write('! itype = 2: triangular weighting function\n')
ctl_file.write('! smooth_param = width of triangular '
'weighting function (Hz)\n')
ctl_file.write('! SMOOTHING OVER LOGARITHMICALLY SPACED FREQUENCIES\n')
ctl_file.write('! itype = 3: box weighting function\n')
ctl_file.write('! smooth_param = xi, which is the fraction of '
'a decade for the\n')
ctl_file.write('! box weighting function \n')
ctl_file.write('! itype = 4: triangular weighting function\n')
ctl_file.write('! smooth_param = xi, which is the fraction of '
'a decade for the\n')
ctl_file.write('! triangular weighting function \n')
ctl_file.write('! itype = 5: Konno and Ohmachi weighting function '
'(see BSSA 88, 228-241)\n')
ctl_file.write('! smooth_param = xi, which is the fraction '
'of a decade for which\n')
ctl_file.write('! the Konno and Ohmachi weighting '
'function is greater\n')
ctl_file.write('! than 0.043.(it is related to\n')
ctl_file.write('! their smoothing parameter b '
'by the equation\n')
ctl_file.write('! b = 4.0/smooth_param, so we have '
'this correspondence between\n')
ctl_file.write('! b and smooth_param\n')
ctl_file.write('! b smooth_param \n')
ctl_file.write('! 10 0.40\n')
ctl_file.write('! 20 0.20\n')
ctl_file.write('! 40 0.10\n')
ctl_file.write('! \n')
ctl_file.write('! b = 40 seems to be commonly used, '
'but I do not think that it\n')
ctl_file.write('! gives enough smoothing; '
'I PREFER SMOOTH_PARAM = 0.2, \n')
ctl_file.write('! corresponding to b = 20. \n')
ctl_file.write('!\n')
ctl_file.write('! ipow = power of FAS to be smoothed '
'(2 = smoothing energy spectrum)\n')
ctl_file.write('!\n')
ctl_file.write('! df_smooth: Note: need df_smooth for '
'linearly-spaced smoothers, \n')
ctl_file.write('! and generally it should be the df from the fft. '
'For general x data, it is\n')
ctl_file.write('! the spacing between x values, assumed to be constant, '
'The reason for\n')
ctl_file.write('! including it as an input parameter is to "fool" the\n')
ctl_file.write('! program to do smoothing over a specified '
'number of points by\n')
ctl_file.write('! setting df_smooth = 1 and smooth_param = number '
'of points (including \n')
ctl_file.write('! points with zero weight at ends; e.g., '
'smooth_param = 5 will \n')
ctl_file.write('! give a smoother with weights 0, 1/4, 2/4, 1/4, 0; '
'smooth_param\n')
ctl_file.write('! should be odd).\n')
ctl_file.write('!\n')
ctl_file.write('! ------------------------------------'
'-----------------------------------------\n')
ctl_file.write('! Meaning of frequency specification parameters:\n')
ctl_file.write('!\n')
ctl_file.write('!SPECIFY_FREQUENCIES? (y/n):\n')
ctl_file.write('! <enter Y or N>\n')
ctl_file.write('!FREQUENCY SPECIFICATION: \n')
ctl_file.write('! If specify_frequencies = Y, then enter the \n')
ctl_file.write('! number of frequencies, freq(1), freq(2)..., '
'freq(nfreq)\n')
ctl_file.write('! If specify_frequencies = N, then enter \n')
ctl_file.write('! f_low, f_high, log-spaced (0=N, 1=Y), freq_param\n')
ctl_file.write('! if freq_param = 0.0, there is no interpolation, '
'and the FFT frequencies \n')
ctl_file.write('! are used between f_low and f_high '
'(log-spaced is ignored).\n')
ctl_file.write('! if freq_param /= 0.0 and log-spaced = 0, '
'then freq_param is the spacing of the\n')
ctl_file.write('! interpolated frequencies '
'between f_low and f_high\n')
ctl_file.write('! if freq_param /= 0.0 and log-spaced = 1, '
'then freq_param is the number of \n')
ctl_file.write('! interpolated frequencies between f_low and '
'f_high (NOTE: f_low must be > 0.0)\n')
ctl_file.write('! ---------------------------------------'
'--------------------------------------\n')
ctl_file.write('!\n')
ctl_file.write('!Name of summary file:\n')
ctl_file.write(' smc2fs2.sum\n')
ctl_file.write('PP: new set of parameters\n')
ctl_file.write('!tskip, tlength\n')
ctl_file.write(' 0.0 2000.0\n')
ctl_file.write('!dc_remove?\n')
ctl_file.write(' .true. \n')
ctl_file.write('!Length of taper at beginning and end of time series, '
'before adding zeros\n')
ctl_file.write('! to make the number of points in '
'the record a power of two.\n')
ctl_file.write(' 0.0 0.0\n')
ctl_file.write('!signnpw2(<0, backup for npw2, no zpad):\n')
ctl_file.write(' +1.0\n')
ctl_file.write('!smoothing: itype, ipow, df_smooth '
'(0 = FFT df), smooth_param\n')
ctl_file.write('! (see above for the meaning of these input parameters):\n')
ctl_file.write(' 0 1 0.0 0.20\n')
ctl_file.write('!SPECIFY_FREQUENCIES? (y/n):\n')
ctl_file.write(' N\n')
ctl_file.write('!FREQUENCY SPECIFICATION\n')
ctl_file.write(' 0.01 100.0 0 0.0 \n')
ctl_file.write('!character string to append to filename:\n')
ctl_file.write(' %s\n' % (name_string))
ctl_file.write('!Output in smc format (Y,N)?\n')
ctl_file.write('! ***IMPORTANT NOTE: Output cannot be in smc '
'format if use log-spaced \n')
ctl_file.write('! frequencies because programs such as smc2asc '
'have not been modified\n')
ctl_file.write('! to deal with log-spaced frequency.\n')
ctl_file.write(' n\n')
ctl_file.write('!Files to process:\n')
ctl_file.write('%s\n' % (input_file))
ctl_file.write('stop\n')
ctl_file.close()
def read_fas_file(fas_file):
"""
Reads FAS file and returns freq and fas arrays
"""
freqs = []
fas = []
# Read input file
input_file = open(fas_file, 'r')
# Skip headers
for line in input_file:
line = line.strip()
# skip blank lines
if not line:
continue
if line.startswith("freq"):
break
for line in input_file:
line = line.strip()
# skip blank lines
if not line:
continue
pieces = line.split()
pieces = [float(piece) for piece in pieces]
freqs.append(pieces[0])
fas.append(pieces[1])
# All done!
input_file.close()
return freqs, fas
def plot_fas(freqs, ns_data, ew_data, eas_smoothed_data, fas_plot, station):
"""
Create a plot of both FAS components
"""
# Generate plot
# Set plot dims
pylab.gcf().set_size_inches(11, 8.5)
pylab.gcf().clf()
# Adjust title y-position
t = pylab.title("Station: %s" % (station), size=12)
pylab.plot(freqs, ns_data, 'b', lw=0.75, label="NS")
pylab.plot(freqs, ew_data, 'r', lw=0.75, label="EW")
pylab.plot(freqs, eas_smoothed_data, 'k', lw=1.25, label="Smoothed EAS")
pylab.legend(loc='upper right')
pylab.xscale('log')
pylab.yscale('log')
pylab.ylabel('Fourier Amplitude (cm/s)')
pylab.xlabel('Frequency (Hz)')
pylab.axis([0.01, 100, 0.001, 1000])
pylab.grid(True)
pylab.grid(b=True, which='major', linestyle='-', color='lightgray')
pylab.grid(b=True, which='minor', linewidth=0.5, color='gray')
# Save plot
pylab.savefig(fas_plot, format="png",
transparent=False, dpi=plot_config.dpi)
pylab.close()
def ko98_smoothing(freqs, data, delta_freq, bexp):
"""
# ** smoothing of a function y (equally-spaced, dx) with the "Konno-Ohmachi"
# ** function sin (alog10(f/fc)^exp) / alog10(f/fc)^exp) ^^4
# ** where fc is the frequency around which the smoothing is performed
# ** exp determines the exponent 10^(1/exp) is the half-width of the peak
# ** cf Konno & Ohmachi, 1998, BSSA 88-1, pp. 228-241
"""
nx = len(freqs)
data_smooth = np.zeros(nx)
fratio = np.power(10., (2.5 / bexp))
data_smooth[0] = data[0]
for index in range(1, nx):
freq = freqs[index]
# Added check to avoid division by zero later and NaNs in the output file
if freq == 0.0:
data_smooth[index] = data[index]
continue
fc1 = freq / fratio
fc2 = freq * fratio
index1 = int(fc1 / delta_freq)
index2 = int((fc2 / delta_freq) + 1)
if index1 <= 1:
index1 = 0
if index2 >= nx:
index2 = nx
a1 = 0.0
a2 = 0.0
for j in range(index1, index2):
if j != index:
# Extra check to avoid NaNs in output file
if freqs[j] == 0.0:
data_smooth[index] = data[index]
break
c1 = bexp * np.log10(freqs[j] / freq)
c1 = np.power(np.sin(c1) / c1, 4.0)
a2 = a2 + c1
a1 = a1 + c1 * data[j]
else:
a2 = a2 + 1.0
a1 = a1 + data[index]
data_smooth[index] = a1 / a2
return data_smooth
def calculate_smoothed_eas(ns_file, ew_file, output_file=None):
"""
Calculates the smoothed EAS at the same frequencies as specified in
the input files
"""
b_param = 188.5 # cm/s
# Read data
freqs, ns_data = read_fas_file(ns_file)
_, ew_data = read_fas_file(ew_file)
eas_data = []
# Calculate EAS
for ns_comp, ew_comp in zip(ns_data, ew_data):
eas_data.append(np.sqrt(0.5*(pow(ns_comp, 2) + pow(ew_comp, 2))))
# Calculate Smoothed EAS
smoothed_eas = ko98_smoothing(freqs, eas_data,
freqs[1]-freqs[0],
b_param)
# Write data file if output_file is provided
if output_file is not None:
out_file = open(output_file, 'w')
out_file.write("# Freq(Hz)\t FAS H1 (cm/s)\t FAS H2 (cm/s)\t "
"EAS (cm/s)\t Smoothed EAS, b=%f (cm/s)\n" %
(b_param))
for freq, fas_h1, fas_h2, eas, s_eas in zip(freqs, ns_data,
ew_data, eas_data,
smoothed_eas):
out_file.write("%2.7E\t%2.7E\t%2.7E\t%2.7E\t%2.7E\n" %
(freq, fas_h1, fas_h2, eas, s_eas))
out_file.close()
# All done!
return freqs, ns_data, ew_data, eas_data, smoothed_eas
class FAS(object):
"""
Implement FAS analisys for the Broadband Platform
"""
def __init__(self, i_r_stations, sim_id=0):
"""
Initializes class variables
"""
self.sim_id = sim_id
self.r_stations = i_r_stations
def run(self):
"""
Run FAS analysis codes
"""
print("FAS Calculation".center(80, '-'))
install = install_cfg.InstallCfg.getInstance()
sim_id = self.sim_id
sta_base = os.path.basename(os.path.splitext(self.r_stations)[0])
self.log = os.path.join(install.A_OUT_LOG_DIR, str(sim_id),
"%d.fas_%s.log" % (sim_id, sta_base))
a_statfile = os.path.join(install.A_IN_DATA_DIR,
str(sim_id),
self.r_stations)
a_tmpdir = os.path.join(install.A_TMP_DATA_DIR, str(sim_id))
a_outdir = os.path.join(install.A_OUT_DATA_DIR, str(sim_id))
a_outdir_fas = os.path.join(a_outdir, "FAS")
#
# Make sure the tmp and out directories exist
#
bband_utils.mkdirs([a_tmpdir, a_outdir, a_outdir_fas], print_cmd=False)
slo = StationList(a_statfile)
site_list = slo.getStationList()
# Save current directory
old_cwd = os.getcwd()
os.chdir(a_tmpdir)
for site in site_list:
print("==> Processing station: %s" % (site.scode))
# Copy acc file to tmpdata
acc_file = "%d.%s.acc.bbp" % (sim_id, site.scode)
shutil.copy2(os.path.join(a_outdir, acc_file),
os.path.join(a_tmpdir, acc_file))
asc2smc_control_file = "asc2smc.ctl"
smc2fs2_control_file = "smc2fs2.ctl"
header_lines = bband_utils.count_header_lines(os.path.join(a_tmpdir,
acc_file))
# Work on both NS and EW components
for comp, data_column in zip(["NS", "EW"], [2, 3]):
# First we convert from BBP to SMC format
create_boore_asc2smc(os.path.join(a_tmpdir,
asc2smc_control_file),
acc_file, data_column, header_lines,
".smc8.%s" % (comp))
cmd = ("%s << END >> %s 2>&1\n" %
(os.path.join(install.A_USGS_BIN_DIR, "asc2smc"),
self.log) +
"%s\n" % (asc2smc_control_file) +
"END\n")
bband_utils.runprog(cmd, False, abort_on_error=True)
# Then, we run the smc2fs2 FAS tool
smc_file = "%s.smc8.%s" % (acc_file, comp)
create_boore_smc2fs2(os.path.join(a_tmpdir,
smc2fs2_control_file),
smc_file, ".no_smooth.fs.col")
cmd = ("%s >> %s 2>&1\n" %
(os.path.join(install.A_USGS_BIN_DIR, "smc2fs2"),
self.log))
bband_utils.runprog(cmd, False, abort_on_error=True)
# Calculate EAS and smoothed EAS
ns_file = os.path.join(a_tmpdir,
"%s.smc8.NS.no_smooth.fs.col" % (acc_file))
ew_file = os.path.join(a_tmpdir,
"%s.smc8.EW.no_smooth.fs.col" % (acc_file))
output_file = os.path.join(a_outdir_fas,
"%s.smc8.smooth.fs.col" % (acc_file))
(freqs, ns_fas,
ew_fas, eas, smoothed_eas) = calculate_smoothed_eas(ns_file,
ew_file,
output_file)
# Create plot
fas_plot = os.path.join(a_outdir_fas,
"%d.%s.fas.png" % (sim_id, site.scode))
plot_fas(freqs, ns_fas, ew_fas, smoothed_eas, fas_plot, site.scode)
# All done, restore working directory
os.chdir(old_cwd)
print("FAS Calculation Completed".center(80, '-'))
if __name__ == '__main__':
if len(sys.argv) < 3:
print("Usage: %s station_list sim_id" % (os.path.basename(sys.argv[0])))
sys.exit(1)
print("Testing Module: %s" % (os.path.basename(sys.argv[0])))
ME = FAS(sys.argv[1], sim_id=int(sys.argv[2]))
ME.run()
sys.exit(0)
|
[
"pylab.close",
"numpy.sin",
"pylab.gcf",
"os.path.join",
"os.chdir",
"pylab.title",
"numpy.power",
"pylab.ylabel",
"pylab.xlabel",
"bband_utils.mkdirs",
"numpy.log10",
"pylab.legend",
"os.path.basename",
"pylab.grid",
"pylab.xscale",
"pylab.savefig",
"matplotlib.use",
"install_cfg.InstallCfg.getInstance",
"sys.exit",
"pylab.axis",
"os.getcwd",
"bband_utils.runprog",
"numpy.zeros",
"os.path.splitext",
"pylab.yscale",
"pylab.plot",
"station_list.StationList"
] |
[((278, 304), 'matplotlib.use', 'mpl.use', (['"""AGG"""'], {'warn': '(False)'}), "('AGG', warn=False)\n", (285, 304), True, 'import matplotlib as mpl\n'), ((13765, 13810), 'pylab.title', 'pylab.title', (["('Station: %s' % station)"], {'size': '(12)'}), "('Station: %s' % station, size=12)\n", (13776, 13810), False, 'import pylab\n'), ((13818, 13870), 'pylab.plot', 'pylab.plot', (['freqs', 'ns_data', '"""b"""'], {'lw': '(0.75)', 'label': '"""NS"""'}), "(freqs, ns_data, 'b', lw=0.75, label='NS')\n", (13828, 13870), False, 'import pylab\n'), ((13875, 13927), 'pylab.plot', 'pylab.plot', (['freqs', 'ew_data', '"""r"""'], {'lw': '(0.75)', 'label': '"""EW"""'}), "(freqs, ew_data, 'r', lw=0.75, label='EW')\n", (13885, 13927), False, 'import pylab\n'), ((13932, 14004), 'pylab.plot', 'pylab.plot', (['freqs', 'eas_smoothed_data', '"""k"""'], {'lw': '(1.25)', 'label': '"""Smoothed EAS"""'}), "(freqs, eas_smoothed_data, 'k', lw=1.25, label='Smoothed EAS')\n", (13942, 14004), False, 'import pylab\n'), ((14009, 14040), 'pylab.legend', 'pylab.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (14021, 14040), False, 'import pylab\n'), ((14045, 14064), 'pylab.xscale', 'pylab.xscale', (['"""log"""'], {}), "('log')\n", (14057, 14064), False, 'import pylab\n'), ((14069, 14088), 'pylab.yscale', 'pylab.yscale', (['"""log"""'], {}), "('log')\n", (14081, 14088), False, 'import pylab\n'), ((14093, 14133), 'pylab.ylabel', 'pylab.ylabel', (['"""Fourier Amplitude (cm/s)"""'], {}), "('Fourier Amplitude (cm/s)')\n", (14105, 14133), False, 'import pylab\n'), ((14138, 14168), 'pylab.xlabel', 'pylab.xlabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (14150, 14168), False, 'import pylab\n'), ((14173, 14209), 'pylab.axis', 'pylab.axis', (['[0.01, 100, 0.001, 1000]'], {}), '([0.01, 100, 0.001, 1000])\n', (14183, 14209), False, 'import pylab\n'), ((14214, 14230), 'pylab.grid', 'pylab.grid', (['(True)'], {}), '(True)\n', (14224, 14230), False, 'import pylab\n'), ((14235, 14302), 'pylab.grid', 'pylab.grid', ([], {'b': '(True)', 'which': '"""major"""', 'linestyle': '"""-"""', 'color': '"""lightgray"""'}), "(b=True, which='major', linestyle='-', color='lightgray')\n", (14245, 14302), False, 'import pylab\n'), ((14307, 14369), 'pylab.grid', 'pylab.grid', ([], {'b': '(True)', 'which': '"""minor"""', 'linewidth': '(0.5)', 'color': '"""gray"""'}), "(b=True, which='minor', linewidth=0.5, color='gray')\n", (14317, 14369), False, 'import pylab\n'), ((14391, 14468), 'pylab.savefig', 'pylab.savefig', (['fas_plot'], {'format': '"""png"""', 'transparent': '(False)', 'dpi': 'plot_config.dpi'}), "(fas_plot, format='png', transparent=False, dpi=plot_config.dpi)\n", (14404, 14468), False, 'import pylab\n'), ((14491, 14504), 'pylab.close', 'pylab.close', ([], {}), '()\n', (14502, 14504), False, 'import pylab\n'), ((14969, 14981), 'numpy.zeros', 'np.zeros', (['nx'], {}), '(nx)\n', (14977, 14981), True, 'import numpy as np\n'), ((14995, 15021), 'numpy.power', 'np.power', (['(10.0)', '(2.5 / bexp)'], {}), '(10.0, 2.5 / bexp)\n', (15003, 15021), True, 'import numpy as np\n'), ((22046, 22057), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (22054, 22057), False, 'import sys\n'), ((17905, 17941), 'install_cfg.InstallCfg.getInstance', 'install_cfg.InstallCfg.getInstance', ([], {}), '()\n', (17939, 17941), False, 'import install_cfg\n'), ((18499, 18528), 'os.path.join', 'os.path.join', (['a_outdir', '"""FAS"""'], {}), "(a_outdir, 'FAS')\n", (18511, 18528), False, 'import os\n'), ((18612, 18683), 'bband_utils.mkdirs', 'bband_utils.mkdirs', (['[a_tmpdir, a_outdir, a_outdir_fas]'], {'print_cmd': '(False)'}), '([a_tmpdir, a_outdir, a_outdir_fas], print_cmd=False)\n', (18630, 18683), False, 'import bband_utils\n'), ((18699, 18722), 'station_list.StationList', 'StationList', (['a_statfile'], {}), '(a_statfile)\n', (18710, 18722), False, 'from station_list import StationList\n'), ((18816, 18827), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (18825, 18827), False, 'import os\n'), ((18836, 18854), 'os.chdir', 'os.chdir', (['a_tmpdir'], {}), '(a_tmpdir)\n', (18844, 18854), False, 'import os\n'), ((21679, 21696), 'os.chdir', 'os.chdir', (['old_cwd'], {}), '(old_cwd)\n', (21687, 21696), False, 'import os\n'), ((21900, 21911), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (21908, 21911), False, 'import sys\n'), ((13667, 13678), 'pylab.gcf', 'pylab.gcf', ([], {}), '()\n', (13676, 13678), False, 'import pylab\n'), ((13708, 13719), 'pylab.gcf', 'pylab.gcf', ([], {}), '()\n', (13717, 13719), False, 'import pylab\n'), ((20782, 20846), 'os.path.join', 'os.path.join', (['a_tmpdir', "('%s.smc8.NS.no_smooth.fs.col' % acc_file)"], {}), "(a_tmpdir, '%s.smc8.NS.no_smooth.fs.col' % acc_file)\n", (20794, 20846), False, 'import os\n'), ((20906, 20970), 'os.path.join', 'os.path.join', (['a_tmpdir', "('%s.smc8.EW.no_smooth.fs.col' % acc_file)"], {}), "(a_tmpdir, '%s.smc8.EW.no_smooth.fs.col' % acc_file)\n", (20918, 20970), False, 'import os\n'), ((21034, 21096), 'os.path.join', 'os.path.join', (['a_outdir_fas', "('%s.smc8.smooth.fs.col' % acc_file)"], {}), "(a_outdir_fas, '%s.smc8.smooth.fs.col' % acc_file)\n", (21046, 21096), False, 'import os\n'), ((21441, 21507), 'os.path.join', 'os.path.join', (['a_outdir_fas', "('%d.%s.fas.png' % (sim_id, site.scode))"], {}), "(a_outdir_fas, '%d.%s.fas.png' % (sim_id, site.scode))\n", (21453, 21507), False, 'import os\n'), ((21946, 21975), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (21962, 21975), False, 'import os\n'), ((18007, 18040), 'os.path.splitext', 'os.path.splitext', (['self.r_stations'], {}), '(self.r_stations)\n', (18023, 18040), False, 'import os\n'), ((19076, 19108), 'os.path.join', 'os.path.join', (['a_outdir', 'acc_file'], {}), '(a_outdir, acc_file)\n', (19088, 19108), False, 'import os\n'), ((19135, 19167), 'os.path.join', 'os.path.join', (['a_tmpdir', 'acc_file'], {}), '(a_tmpdir, acc_file)\n', (19147, 19167), False, 'import os\n'), ((19325, 19357), 'os.path.join', 'os.path.join', (['a_tmpdir', 'acc_file'], {}), '(a_tmpdir, acc_file)\n', (19337, 19357), False, 'import os\n'), ((20129, 20181), 'bband_utils.runprog', 'bband_utils.runprog', (['cmd', '(False)'], {'abort_on_error': '(True)'}), '(cmd, False, abort_on_error=True)\n', (20148, 20181), False, 'import bband_utils\n'), ((20661, 20713), 'bband_utils.runprog', 'bband_utils.runprog', (['cmd', '(False)'], {'abort_on_error': '(True)'}), '(cmd, False, abort_on_error=True)\n', (20680, 20713), False, 'import bband_utils\n'), ((21860, 21889), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (21876, 21889), False, 'import os\n'), ((15823, 15848), 'numpy.log10', 'np.log10', (['(freqs[j] / freq)'], {}), '(freqs[j] / freq)\n', (15831, 15848), True, 'import numpy as np\n'), ((19637, 19681), 'os.path.join', 'os.path.join', (['a_tmpdir', 'asc2smc_control_file'], {}), '(a_tmpdir, asc2smc_control_file)\n', (19649, 19681), False, 'import os\n'), ((20330, 20374), 'os.path.join', 'os.path.join', (['a_tmpdir', 'smc2fs2_control_file'], {}), '(a_tmpdir, smc2fs2_control_file)\n', (20342, 20374), False, 'import os\n'), ((15879, 15889), 'numpy.sin', 'np.sin', (['c1'], {}), '(c1)\n', (15885, 15889), True, 'import numpy as np\n'), ((20561, 20608), 'os.path.join', 'os.path.join', (['install.A_USGS_BIN_DIR', '"""smc2fs2"""'], {}), "(install.A_USGS_BIN_DIR, 'smc2fs2')\n", (20573, 20608), False, 'import os\n'), ((19939, 19986), 'os.path.join', 'os.path.join', (['install.A_USGS_BIN_DIR', '"""asc2smc"""'], {}), "(install.A_USGS_BIN_DIR, 'asc2smc')\n", (19951, 19986), False, 'import os\n')]
|
import pytest
import numpy as np
import zmq
import h5py
import struct
import itertools
from .. import Writer
from .. import chunk_api
from ...messages import array as array_api
from .conftest import assert_chunk_allclose, assert_h5py_allclose
from zeeko.conftest import assert_canrecv
from ...tests.test_helpers import ZeekoTestBase, ZeekoMappingTests, OrderedDict
from ...messages.tests.test_receiver import ReceiverTests, ReceiverTestBase
@pytest.fixture
def notify(address2, context):
"""Notification socket"""
s = context.socket(zmq.PUSH)
s.bind(address2)
with s:
yield s
@pytest.fixture
def rnotify(address2, context, notify):
"""Recieve notifications."""
s = context.socket(zmq.PULL)
s.connect(address2)
with s:
yield s
@pytest.fixture
def n():
"""Number of arrays to publish."""
return 3
@pytest.fixture
def metadata_callback():
"""Return a metadata callback."""
def callback():
return {'meta':'data', 'n':5}
return callback
def test_writer_construction(filename):
"""Test construction"""
w = Writer(filename)
class WriterTestsBase(ReceiverTestBase):
"""Base class items for Writers."""
pytestmark = pytest.mark.usefixtures("rnotify")
cls = Writer
@pytest.fixture
def arrays(self, n, name, chunk_array, chunk_mask):
"""Return a list of chunks"""
cs = OrderedDict()
for i in range(n):
c = chunk_api.PyChunk("{0:s}{1:d}".format(name, i), np.random.randn(*chunk_array.shape), chunk_mask)
cs[c.name] = c
return cs
@pytest.fixture
def receiver(self, filename, metadata_callback):
"""The receiver object"""
obj = self.cls()
obj.metadata_callback = metadata_callback
with h5py.File(filename) as obj.file:
yield obj
@pytest.fixture
def writer(self, receiver):
"""Return a receiver"""
return receiver
def send_arrays(self, socket, arrays, framecount):
"""Send arrays."""
assert socket.poll(timeout=100, flags=zmq.POLLOUT)
array_api.send_array_packet_header(socket, "arrays", len(arrays), framecount, flags=zmq.SNDMORE)
chunks = list(arrays.values())
for chunk in chunks[:-1]:
chunk.send(socket, flags=zmq.SNDMORE)
chunks[-1].send(socket)
def recv_arrays(self, receiver, socket, arrays, flags=zmq.NOBLOCK):
"""Wrapper around receiving arrays."""
assert_canrecv(socket)
receiver.receive(socket, flags=flags)
for key in arrays:
assert receiver.event(key).is_set()
assert len(receiver) == len(arrays)
def send_unbundled_arrays(self, socket, arrays):
"""Send arrays as individual messages."""
array_api.send_array_packet_header(socket, "arrays", len(arrays), flags=zmq.SNDMORE)
chunks = list(arrays.values())
for chunk in chunks[:-1]:
chunk.send(socket, flags=zmq.SNDMORE)
chunks[-1].send(socket)
def recv_unbundled_arrays(self, receiver, socket, arrays, flags=zmq.NOBLOCK):
"""Receive unbundled arrays"""
count = 0
while socket.poll(timeout=100, flags=zmq.POLLIN):
assert_canrecv(socket)
receiver.receive(socket, flags=flags)
count += 1
for key in arrays:
assert receiver.event(key).is_set()
# assert count == len(arrays)
def make_modified_arrays(self, arrays):
"""Make modified arrays."""
return OrderedDict((cs.name, chunk_api.PyChunk(cs.name, cs.array * 2.0, cs.mask)) for cs in arrays.values())
def assert_receiver_arrays_allclose(self, receiver, arrays):
"""Assert receiver and arrays are all close."""
assert len(receiver) == len(arrays)
assert set(receiver.keys()) == set(arrays.keys())
for i, key in enumerate(receiver):
chunk = receiver[key]
assert_chunk_allclose(chunk, arrays[key])
class TestWriter(ReceiverTests, WriterTestsBase):
"""Test case for recorders."""
pass
class TestWriterMapping(ZeekoMappingTests, WriterTestsBase):
"""Test recorder behavior as a mapping."""
cls = Writer
@pytest.fixture
def mapping(self, chunksize, push, pull, arrays, framecount, filename, metadata_callback):
"""A client, set up for use as a mapping."""
obj = self.cls()
obj.metadata_callback = metadata_callback
with h5py.File(filename) as obj.file:
self.send_arrays(push, arrays, framecount)
self.recv_arrays(obj, pull, arrays)
yield obj
@pytest.fixture
def keys(self, arrays):
"""Return keys which should be availalbe."""
return arrays.keys()
|
[
"zeeko.conftest.assert_canrecv",
"h5py.File",
"pytest.mark.usefixtures",
"numpy.random.randn"
] |
[((1220, 1254), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""rnotify"""'], {}), "('rnotify')\n", (1243, 1254), False, 'import pytest\n'), ((2517, 2539), 'zeeko.conftest.assert_canrecv', 'assert_canrecv', (['socket'], {}), '(socket)\n', (2531, 2539), False, 'from zeeko.conftest import assert_canrecv\n'), ((1803, 1822), 'h5py.File', 'h5py.File', (['filename'], {}), '(filename)\n', (1812, 1822), False, 'import h5py\n'), ((3279, 3301), 'zeeko.conftest.assert_canrecv', 'assert_canrecv', (['socket'], {}), '(socket)\n', (3293, 3301), False, 'from zeeko.conftest import assert_canrecv\n'), ((4558, 4577), 'h5py.File', 'h5py.File', (['filename'], {}), '(filename)\n', (4567, 4577), False, 'import h5py\n'), ((1509, 1544), 'numpy.random.randn', 'np.random.randn', (['*chunk_array.shape'], {}), '(*chunk_array.shape)\n', (1524, 1544), True, 'import numpy as np\n')]
|
from __future__ import print_function, division, absolute_import
import pickle
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import pickle
def visualize_vertices(vertices:np.ndarray, bones:np.ndarray = None):
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(vertices[:-1:5,0], vertices[:-1:5,1], vertices[:-1:5,2], c='b')
print('%f to %f' % (vertices[:,2].min(), vertices[:,2].max()))
if bones is not None:
joints = []
for bone in bones:
joint = np.linalg.inv(bone['offset_matrix'])[0:3, 3]
joints.append(np.expand_dims(joint, axis=0))
joints = np.vstack(joints)
ax.scatter(joints[:,0], joints[:,1], joints[:,2], c='r')
print('%f to %f' % (joints[:,2].min(), joints[:,2].max()))
plt.show()
if __name__ == '__main__':
with open('mesh/model/preprocessed_right_hand.pkl', 'rb') as f:
mesh = pickle.load(f)
visualize_vertices(mesh['vertices'], mesh['bones'])
|
[
"matplotlib.pyplot.show",
"mpl_toolkits.mplot3d.Axes3D",
"numpy.expand_dims",
"matplotlib.pyplot.figure",
"pickle.load",
"numpy.linalg.inv",
"numpy.vstack"
] |
[((265, 277), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (275, 277), True, 'import matplotlib.pyplot as plt\n'), ((287, 298), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (293, 298), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((824, 834), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (832, 834), True, 'import matplotlib.pyplot as plt\n'), ((659, 676), 'numpy.vstack', 'np.vstack', (['joints'], {}), '(joints)\n', (668, 676), True, 'import numpy as np\n'), ((946, 960), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (957, 960), False, 'import pickle\n'), ((540, 576), 'numpy.linalg.inv', 'np.linalg.inv', (["bone['offset_matrix']"], {}), "(bone['offset_matrix'])\n", (553, 576), True, 'import numpy as np\n'), ((611, 640), 'numpy.expand_dims', 'np.expand_dims', (['joint'], {'axis': '(0)'}), '(joint, axis=0)\n', (625, 640), True, 'import numpy as np\n')]
|
"""
======================
Comparing CCA Variants
======================
A comparison of Kernel Canonical Correlation Analysis (KCCA) with three
different types of kernel to Deep Canonical Correlation Analysis (DCCA).
Each learns and computes kernels suitable for different situations. The point
of this tutorial is to illustrate, in toy examples, the rough intuition as to
when such methods work well and generate linearly correlated projections.
The simulated latent data has two signal dimensions draw from independent
Gaussians. Two views of data were derived from this.
- View 1: The latent data.
- View 2: A transformation of the latent data.
To each view, two additional independent Gaussian noise dimensions were added.
Each 2x2 grid of subplots in the figure corresponds to a transformation and
either the raw data or a CCA variant. The x-axes are the data from view 1
and the y-axes are the data from view 2. Plotted are the correlations between
the signal dimensions of the raw views and the top two components of each
view after a CCA variant transformation. Linearly correlated plots on the
diagonals of the 2x2 grids indicate that the CCA method was able to
successfully learn the underlying functional relationship between the two
views.
"""
from mvlearn.embed import KCCA, DCCA
from mvlearn.datasets import GaussianMixture
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
# Make Latents
n_samples = 200
centers = [[0, 1], [0, -1]]
covariances = 2*np.array([np.eye(2), np.eye(2)])
gm_train = GaussianMixture(n_samples, centers, covariances)
# Test
gm_test = GaussianMixture(n_samples, centers, covariances)
# Make 2 views
n_noise = 2
transforms = ['linear', 'poly', 'sin']
Xs_train = []
Xs_test = []
for transform in transforms:
gm_train.sample_views(transform=transform, n_noise=n_noise)
gm_test.sample_views(transform=transform, n_noise=n_noise)
Xs_train.append(gm_train.get_Xy()[0])
Xs_test.append(gm_test.get_Xy()[0])
# Plotting parameters
labels = gm_test.latent_[:, 0]
cmap = matplotlib.colors.ListedColormap(
sns.diverging_palette(240, 10, n=len(labels), center='light').as_hex())
cmap = 'coolwarm'
method_labels = \
['Raw Views', 'Linear KCCA', 'Polynomial KCCA', 'Gaussian KCCA', 'DCCA']
transform_labels = \
['Linear Transform', 'Polynomial Transform', 'Sinusoidal Transform']
input_size1, input_size2 = Xs_train[0][0].shape[1], Xs_train[0][1].shape[1]
outdim_size = min(Xs_train[0][0].shape[1], 2)
layer_sizes1 = [256, 256, outdim_size]
layer_sizes2 = [256, 256, outdim_size]
methods = [
KCCA(ktype='linear', reg=0.1, degree=2.0, constant=0.1, n_components=2),
KCCA(ktype='poly', reg=0.1, degree=2.0, constant=0.1, n_components=2),
KCCA(ktype='gaussian', reg=1.0, sigma=2.0, n_components=2),
DCCA(input_size1, input_size2, outdim_size, layer_sizes1, layer_sizes2,
epoch_num=400)
]
fig, axes = plt.subplots(3 * 2, 5 * 2, figsize=(20, 12))
sns.set_context('notebook')
for r, transform in enumerate(transforms):
axs = axes[2 * r:2 * r + 2, :2]
for i, ax in enumerate(axs.flatten()):
dim2 = int(i / 2)
dim1 = i % 2
ax.scatter(
Xs_test[r][0][:, dim1],
Xs_test[r][1][:, dim2],
cmap=cmap,
c=labels,
)
ax.set_xticks([], [])
ax.set_yticks([], [])
if dim1 == 0:
ax.set_ylabel(f"View 2 Dim {dim2+1}")
if dim1 == 0 and dim2 == 0:
ax.text(-0.5, -0.1, transform_labels[r], transform=ax.transAxes,
fontsize=18, rotation=90, verticalalignment='center')
if dim2 == 1 and r == len(transforms)-1:
ax.set_xlabel(f"View 1 Dim {dim1+1}")
if i == 0 and r == 0:
ax.set_title(method_labels[r],
{'position': (1.11, 1), 'fontsize': 18})
for c, method in enumerate(methods):
axs = axes[2*r: 2*r+2, 2*c+2:2*c+4]
Xs = method.fit(Xs_train[r]).transform(Xs_test[r])
for i, ax in enumerate(axs.flatten()):
dim2 = int(i / 2)
dim1 = i % 2
ax.scatter(
Xs[0][:, dim1],
Xs[1][:, dim2],
cmap=cmap,
c=labels,
)
if dim2 == 1 and r == len(transforms)-1:
ax.set_xlabel(f"View 1 Dim {dim1+1}")
if i == 0 and r == 0:
ax.set_title(method_labels[c + 1], {'position': (1.11, 1),
'fontsize': 18})
ax.axis("equal")
ax.set_xticks([], [])
ax.set_yticks([], [])
|
[
"mvlearn.embed.KCCA",
"mvlearn.embed.DCCA",
"mvlearn.datasets.GaussianMixture",
"numpy.eye",
"matplotlib.pyplot.subplots",
"seaborn.set_context"
] |
[((1558, 1606), 'mvlearn.datasets.GaussianMixture', 'GaussianMixture', (['n_samples', 'centers', 'covariances'], {}), '(n_samples, centers, covariances)\n', (1573, 1606), False, 'from mvlearn.datasets import GaussianMixture\n'), ((1625, 1673), 'mvlearn.datasets.GaussianMixture', 'GaussianMixture', (['n_samples', 'centers', 'covariances'], {}), '(n_samples, centers, covariances)\n', (1640, 1673), False, 'from mvlearn.datasets import GaussianMixture\n'), ((2932, 2976), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3 * 2)', '(5 * 2)'], {'figsize': '(20, 12)'}), '(3 * 2, 5 * 2, figsize=(20, 12))\n', (2944, 2976), True, 'import matplotlib.pyplot as plt\n'), ((2977, 3004), 'seaborn.set_context', 'sns.set_context', (['"""notebook"""'], {}), "('notebook')\n", (2992, 3004), True, 'import seaborn as sns\n'), ((2605, 2676), 'mvlearn.embed.KCCA', 'KCCA', ([], {'ktype': '"""linear"""', 'reg': '(0.1)', 'degree': '(2.0)', 'constant': '(0.1)', 'n_components': '(2)'}), "(ktype='linear', reg=0.1, degree=2.0, constant=0.1, n_components=2)\n", (2609, 2676), False, 'from mvlearn.embed import KCCA, DCCA\n'), ((2682, 2751), 'mvlearn.embed.KCCA', 'KCCA', ([], {'ktype': '"""poly"""', 'reg': '(0.1)', 'degree': '(2.0)', 'constant': '(0.1)', 'n_components': '(2)'}), "(ktype='poly', reg=0.1, degree=2.0, constant=0.1, n_components=2)\n", (2686, 2751), False, 'from mvlearn.embed import KCCA, DCCA\n'), ((2757, 2815), 'mvlearn.embed.KCCA', 'KCCA', ([], {'ktype': '"""gaussian"""', 'reg': '(1.0)', 'sigma': '(2.0)', 'n_components': '(2)'}), "(ktype='gaussian', reg=1.0, sigma=2.0, n_components=2)\n", (2761, 2815), False, 'from mvlearn.embed import KCCA, DCCA\n'), ((2821, 2911), 'mvlearn.embed.DCCA', 'DCCA', (['input_size1', 'input_size2', 'outdim_size', 'layer_sizes1', 'layer_sizes2'], {'epoch_num': '(400)'}), '(input_size1, input_size2, outdim_size, layer_sizes1, layer_sizes2,\n epoch_num=400)\n', (2825, 2911), False, 'from mvlearn.embed import KCCA, DCCA\n'), ((1524, 1533), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (1530, 1533), True, 'import numpy as np\n'), ((1535, 1544), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (1541, 1544), True, 'import numpy as np\n')]
|
"""Training GCMC model on the MovieLens data set.
The script loads the full graph to the training device.
"""
import os, time
import argparse
import logging
import random
import string
import dgl
import scipy.sparse as sp
import pandas as pd
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from data import DataSetLoader
#from data_custom import DataSetLoader
from model import BiDecoder, GCMCLayer, MLPDecoder
from utils import get_activation, get_optimizer, torch_total_param_num, torch_net_info, MetricLogger
from utils import to_etype_name
from sklearn.metrics import ndcg_score
#f1 = open(os.path.join(DATA_ROOT, 'EHCF.txt'), 'w')
def sample_negative(ratings, sample_rate, item_set):
"""
input:
1. training rating ::pd.frame
2. sample number::int
3. item_set:a set of item::set
"""
#"""return all negative items & 100 sampled negative items"""
interact_status = ratings.groupby('user_id')['movie_id'].apply(set).reset_index().rename(columns={'itemId': 'interacted_items'})
#print(interact_status)
#item_list = set(item_list)
interact_status['negative_items'] = interact_status['movie_id'].apply(lambda x: item_set - x)
#print(interact_status['negative_items'])
interact_status['negative_samples'] = interact_status['negative_items'].apply(lambda x: random.sample(x, sample_rate))
return interact_status[['user_id', 'negative_items', 'negative_samples']]
def generate_pair(user_list, num_movie):
# 输入user_list num_movie
# num_movie 是电影的总数
rating_pairs = (np.array(np.array([[ele] * num_movie for ele in user_list]).flatten(),
dtype=np.int64),
np.array(np.array([[np.arange(num_movie)] * len(user_list)]).flatten(),
dtype=np.int64))
return rating_pairs
def generate_dec_graph(rating_pairs, num_user, num_movie):
#print(rating_pairs)
#print("***:",len(rating_pairs), num_user, num_movie)
ones = np.ones_like(rating_pairs[0])
user_movie_ratings_coo = sp.coo_matrix(
(ones, rating_pairs),
shape=(num_user, num_movie), dtype=np.float32)
g = dgl.bipartite_from_scipy(user_movie_ratings_coo, utype='_U', etype='_E', vtype='_V')
return dgl.heterograph({('user', 'rate', 'movie'): g.edges()},
num_nodes_dict={'user': num_user, 'movie': num_movie})
class Net(nn.Module):
def __init__(self, args):
super(Net, self).__init__()
self._act = get_activation(args.model_activation)
self.encoder = nn.ModuleList()
self.encoder.append(GCMCLayer(args.rating_vals,
args.src_in_units,
args.dst_in_units,
args.gcn_agg_units,
args.gcn_out_units,
args.gcn_dropout,
args.gcn_agg_accum,
agg_act=self._act,
share_user_item_param=args.share_param,
device=args.device))
self.gcn_agg_accum = args.gcn_agg_accum
self.rating_vals = args.rating_vals
self.device = args.device
self.gcn_agg_units = args.gcn_agg_units
self.src_in_units = args.src_in_units
for i in range(1, args.layers):
if args.gcn_agg_accum == 'stack':
gcn_out_units = args.gcn_out_units * len(args.rating_vals)
else:
gcn_out_units = args.gcn_out_units
self.encoder.append(GCMCLayer(args.rating_vals,
args.gcn_out_units,
args.gcn_out_units,
gcn_out_units,
args.gcn_out_units,
args.gcn_dropout - i*0.1,
args.gcn_agg_accum,
agg_act=self._act,
share_user_item_param=args.share_param,
ini = False,
device=args.device))
if args.decoder == "Bi":
self.decoder = BiDecoder(in_units= args.gcn_out_units, #* args.layers,
num_classes=len(args.rating_vals),
num_basis=args.gen_r_num_basis_func)
'''
self.decoder2 = MLPDecoder(in_units= args.gcn_out_units * 2,
num_classes=len(args.rating_vals),
num_basis=args.gen_r_num_basis_func)
'''
elif args.decoder == "MLP":
if args.loss_func == "CE":
num_classes = len(args.rating_vals)
else:
num_classes = 1
self.decoder = MLPDecoder(in_units= args.gcn_out_units * args.layers,
num_classes=num_classes,
num_basis=args.gen_r_num_basis_func)
self.rating_vals = args.rating_vals
def forward(self, enc_graph, dec_graph, ufeat, ifeat, Two_Stage = False):
user_out = []
movie_out = []
for i in range(0, args.layers):
user_o, movie_o = self.encoder[i](
enc_graph,
ufeat,
ifeat,
Two_Stage)
if i == 0:
user_out = user_o
movie_out = movie_o
else:
user_out += user_o / float(i + 1)
movie_out += movie_o /float(i + 1)
#user_out.append(user_o)
#movie_out.append(movie_o)
ufeat = user_o
ifeat = movie_o
#pred_ratings = self.decoder2(dec_graph, th.cat([user_out[0], user_out[1]], 1), th.cat([movie_out[1], movie_out[0]], 1))
#user_out = th.cat(user_out, 1)
#movie_out = th.cat(movie_out, 1)
#print("user_out:", user_out[0])
#print("movie_out:", movie_out[0])
pred_ratings = self.decoder(dec_graph, user_out, movie_out)
W_r_last = None
reg_loss = 0.0
'''
for rating in self.rating_vals:
rating = to_etype_name(rating)
if W_r_last is not None:
reg_loss += th.sum((self.encoder[0].W_r[rating] - W_r_last)**2)
W_r_last = self.encoder[0].W_r[rating]
#W_r_last_2 = self.encoder_2.W_r[rating]
'''
W = th.matmul(self.encoder[0].att, self.encoder[0].basis.view(self.encoder[0].basis_units, -1))
W = W.view(len(self.rating_vals), self.src_in_units, -1)
for i, rating in enumerate(self.rating_vals):
rating = to_etype_name(rating)
if i != 0:
reg_loss += -th.sum(th.cosine_similarity(W[i,:,:], W[i-1,:,:], dim=1))
return pred_ratings, reg_loss, user_out, movie_out, W
def train(args):
print(args)
dataset = DataSetLoader(args.data_name, args.device,
use_one_hot_fea=args.use_one_hot_fea,
symm=args.gcn_agg_norm_symm,
test_ratio=args.data_test_ratio,
valid_ratio=args.data_valid_ratio,
sample_rate = args.sample_rate)
print("Loading data finished ...\n")
args.src_in_units = dataset.user_feature_shape[1]
args.dst_in_units = dataset.movie_feature_shape[1]
args.rating_vals = dataset.possible_rating_values
### build the net
net = Net(args=args)
net = net.to(args.device)
nd_possible_rating_values = th.FloatTensor(dataset.possible_rating_values).to(args.device)
rating_loss_net = nn.CrossEntropyLoss()
learning_rate = args.train_lr
optimizer = get_optimizer(args.train_optimizer)(net.parameters(), lr=learning_rate)
print("Loading network finished ...\n")
### perpare training data
train_gt_labels = dataset.train_labels
train_gt_ratings = dataset.train_truths
### prepare the logger
NDCG_logger = MetricLogger(['recall50', 'recall100', 'recall200','ndcg50', 'ndcg100', 'ndcg200'], ['%.4f', '%.4f', '%.4f','%.4f', '%.4f', '%.4f'], os.path.join(args.save_dir, 'NDCG.csv'))
### declare the loss information
best_valid_rmse = np.inf
best_valid_ndcg = -np.inf
best_test_ndcg = []
no_better_valid = 0
best_iter = -1
count_rmse = 0
count_num = 0
count_loss = 0
dataset.train_enc_graph = dataset.train_enc_graph.int().to(args.device)
dataset.train_dec_graph = dataset.train_dec_graph.int().to(args.device)
dataset.valid_enc_graph = dataset.train_enc_graph
dataset.valid_dec_graph = dataset.valid_dec_graph.int().to(args.device)
dataset.test_enc_graph = dataset.test_enc_graph.int().to(args.device)
dataset.test_dec_graph = dataset.test_dec_graph.int().to(args.device)
train_m = dataset.train_m
test_m = dataset.test_m
tset = dataset.tset
user_num ,item_num = train_m.shape[0], train_m.shape[1]
#dataset.valid_recall_dec_graph = dataset.valid_recall_dec_graph.to(args.device)
#dataset.test_recall_dec_graph = dataset.test_recall_dec_graph.to(args.device)
print("Start training ...")
train_rating_pairs, train_rating_values = dataset._generate_pair_value(dataset.train_rating_info)
def update_encode_graph(dataset, train_rating_pairs, train_rating_values, sampled_data):
train_rating_pairs_zeros, train_rating_values_zeros = dataset._generate_pair_value_for_zero(dataset.train_rating_info, sampled_data)
train_rating_pairs = (np.append(train_rating_pairs[0], train_rating_pairs_zeros[0]), np.append(train_rating_pairs[1], train_rating_pairs_zeros[1]))
train_rating_values = np.append(train_rating_values, train_rating_values_zeros)
dataset.train_enc_graph = dataset._generate_enc_graph(train_rating_pairs, train_rating_values, add_support = True)
dataset.train_enc_graph = dataset.train_enc_graph.int().to(args.device)
dataset.valid_enc_graph = dataset.train_enc_graph
return dataset.train_enc_graph
def sample_data(interact_status, random_number, sample_rate):
random.seed(random_number)
interact_status['negative_samples'] = interact_status['negative_items'].apply(lambda x: random.sample(x, sample_rate))
return interact_status[['user_id', 'negative_items', 'negative_samples']]
seed_list = np.random.randint(0, 10000, (args.train_max_iter,))
Two_Stage = False
#sampled_data = sample_data(negitive_all, random_number = seed_list[iter_idx], sample_rate = 3)
negitive_all = dataset.negative_all(dataset.train_rating_info)
sampled_data = sample_data(negitive_all, random_number = 1, sample_rate = 99)
dataset.train_enc_graph = update_encode_graph(dataset, train_rating_pairs, train_rating_values, sampled_data)
dataset.valid_enc_graph = dataset.train_enc_graph
for iter_idx in range(1, args.train_max_iter):
#sampled_data = sample_data(negitive_all, random_number = 1, sample_rate = 3)
#dataset.train_enc_graph = update_encode_graph(dataset, train_rating_pairs, train_rating_values, sampled_data)
print("iter:",iter_idx)
net.train()
pred_ratings, reg_loss, user_out, movie_out, W = net(dataset.train_enc_graph, dataset.train_dec_graph,
dataset.user_feature, dataset.movie_feature, Two_Stage)
loss = rating_loss_net(pred_ratings, train_gt_labels).mean() + args.ARR * reg_loss
count_loss += loss.item()
optimizer.zero_grad()
loss.backward(retain_graph=True)
nn.utils.clip_grad_norm_(net.parameters(), args.train_grad_clip)
optimizer.step()
real_pred_ratings = (th.softmax(pred_ratings, dim=1) * nd_possible_rating_values.view(1, -1)).sum(dim=1)
#print(real_pred_ratings.shape)
# 对pred的
if iter_idx < 100:
if iter_idx % 10 == 0:
recall50_, recall100_, recall200_, ndcg50_, ndcg100_, ndcg200_ = \
dev_step(tset, train_m, test_m, net, dataset, args, nd_possible_rating_values)
#dev_cold(u_train,i_train, tset, train_m, test_m)
NDCG_logger.log(recall50 = recall50_, recall100 = recall100_, recall200 = recall200_, ndcg50 = ndcg50_, ndcg100 = ndcg100_, ndcg200 = ndcg200_)
if iter_idx >= 500:
recall50, recall100, recall200, ndcg50, ndcg100, ndcg200 = \
dev_step(tset, train_m, test_m, net, dataset, args ,nd_possible_rating_values)
NDCG_logger.log(recall50 = recall50_, recall100 = recall100_, recall200 = recall200_, ndcg50 = ndcg50_, ndcg100 = ndcg100_, ndcg200 = ndcg200_)
#dev_cold(u_train,i_train, tset, train_m, test_m)
NDCG_logger.close()
def dev_step(tset, train_m, test_m, net, dataset, args, nd_possible_rating_values):
"""
Evaluates model on a dev set
"""
batch_size = 128
#print("tset:",tset)
user_te = np.array(list(tset.keys()))
#print("user_te:",user_te)
user_te2 = user_te[:, np.newaxis]
#user_te2 = user_te
ll = int(len(user_te) / batch_size) + 1
recall50 = []
recall100 = []
recall200 = []
ndcg50 = []
ndcg100 = []
ndcg200 = []
for batch_num in range(ll):
print(batch_num/ll*100,"%")
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, len(user_te))
# u_batch 是每个batch中的一个对user的一个list
u_batch = user_te2[start_index:end_index]
# batch_users 是这个batch中user的个数
batch_users = end_index - start_index
num_user = train_m.shape[0]#user总数
num_movie = train_m.shape[1]#item总数
user_list = user_te[start_index:end_index]
batch_rating_pairs = generate_pair(user_list, num_movie)
batch_dec_graph = generate_dec_graph(batch_rating_pairs, num_user, num_movie).to(args.device)
Two_Stage = False
pred_ratings, reg_loss, user_out, movie_out, W = net(dataset.train_enc_graph, batch_dec_graph, dataset.user_feature, dataset.movie_feature, Two_Stage)
real_pred_ratings = (th.softmax(pred_ratings, dim=1) * nd_possible_rating_values.view(1, -1)).sum(dim=1)
u_b = user_te[start_index:end_index]
real_pred_ratings = real_pred_ratings.cpu()
#print("pred_shape:", real_pred_ratings.shape)
pre = real_pred_ratings.reshape(batch_users, -1)
#print("pred_shape:", pre.shape)
#pre = np.reshape(real_pred_ratings, (batch_users, num_movie))
pre = pre.detach().numpy()
idx = np.zeros_like(pre, dtype=bool)
idx[train_m[u_b].nonzero()] = True
pre[idx] = -np.inf
recall = []
for kj in [50, 100, 200]:
idx_topk_part = np.argpartition(-pre, kj, 1)
# print pre[np.arange(batch_users)[:, np.newaxis], idx_topk_part[:, :kj]]
# print idx_topk_part
pre_bin = np.zeros_like(pre, dtype=bool)
pre_bin[np.arange(batch_users)[:, np.newaxis], idx_topk_part[:, :kj]] = True
# print pre_bin
true_bin = np.zeros_like(pre, dtype=bool)
true_bin[test_m[u_b].nonzero()] = True
tmp = (np.logical_and(true_bin, pre_bin).sum(axis=1)).astype(np.float32)
#print("tmp:",tmp)
recall.append(tmp / np.minimum(kj, true_bin.sum(axis=1)))
#print("recall:",tmp / np.minimum(kj, true_bin.sum(axis=1)))
# print tmp
#print("recall:",recall)
ndcg = []
for kj in [20, 40, 80]:
# 获取前20个元素的大致序号
idx_topk_part = np.argpartition(-pre, kj, 1)
#print("pre:",pre.shape)
#
#print("idx_topk_part[:, :kj]:",idx_topk_part[:, :kj])
#获取每个用户对应的前20个预测的index
topk_part = pre[np.arange(batch_users)[:, np.newaxis], idx_topk_part[:, :kj]]
#print("topk_part:",topk_part[0:2])
idx_part = np.argsort(-topk_part, axis=1)
# 将预测分数进行排序,从大到校输出index的值
#print("idx_part:",idx_part[0:2])
idx_topk = idx_topk_part[np.arange(end_index - start_index)[:, np.newaxis], idx_part]
# 得到原来的序列中的对应index
#print("idx_topk:",idx_topk[0:2])
tp = np.log(2) / np.log(np.arange(2, kj + 2))
test_batch = test_m[u_b]
#print("test_batch:",test_batch)
DCG = (test_batch[np.arange(batch_users)[:, np.newaxis], idx_topk].toarray() * tp).sum(axis=1)
# 就只计算真实结果在预测结果中的第几号的dcg
#print("tp:",tp)
#print("DCG:",DCG)
IDCG = np.array([(tp[:min(n, kj)]).sum()
for n in test_batch.getnnz(axis=1)])
#print("IDCG:",np.array([(tp[:min(n, kj)]).sum()
# for n in test_batch.getnnz(axis=1)]))
ndcg.append(DCG / IDCG)
#print("ndcg:",ndcg)
recall50.append(recall[0])
recall100.append(recall[1])
recall200.append(recall[2])
ndcg50.append(ndcg[0])
ndcg100.append(ndcg[1])
ndcg200.append(ndcg[2])
recall50 = np.hstack(recall50)
recall100 = np.hstack(recall100)
recall200 = np.hstack(recall200)
ndcg50 = np.hstack(ndcg50)
ndcg100 = np.hstack(ndcg100)
ndcg200 = np.hstack(ndcg200)
print("recall50:",recall50[0:10])
print("ndcg50:", ndcg50.shape)
print("recall50:", np.mean(recall50), "ndcg50:",np.mean(ndcg50))
print("recall100:",np.mean(recall100),"ndcg100:", np.mean(ndcg100))
print("recall200:",np.mean(recall200), "ndcg200:",np.mean(ndcg200))
#f1.write(str(np.mean(recall100)) + ' ' + str(np.mean(ndcg100)) + '\n')
#f1.flush()
return np.mean(recall50), np.mean(recall100), np.mean(recall200), np.mean(ndcg50), np.mean(ndcg100), np.mean(ndcg200)
def config():
parser = argparse.ArgumentParser(description='PGMC')
parser.add_argument('--seed', default=125, type=int) #123
parser.add_argument('--device', default='1', type=int,
help='Running device. E.g `--device 0`, if using cpu, set `--device -1`')
parser.add_argument('--save_dir', type=str, help='The saving directory')
parser.add_argument('--save_id', type=int, help='The saving log id')
parser.add_argument('--silent', action='store_true')
parser.add_argument('--data_name', default='yahoo_music', type=str,
help='The dataset name: ml-100k, ml-1m, ml-10m, flixster, douban, yahoo_music')
parser.add_argument('--data_test_ratio', type=float, default=0.1) ## for ml-100k the test ration is 0.2
parser.add_argument('--data_valid_ratio', type=float, default=0.05)
parser.add_argument('--use_one_hot_fea', action='store_true', default=False)
parser.add_argument('--model_activation', type=str, default="leaky")
parser.add_argument('--sample_rate', type=int, default=1)
parser.add_argument('--gcn_dropout', type=float, default=0.7)
parser.add_argument('--gcn_agg_norm_symm', type=bool, default=True)
parser.add_argument('--gcn_agg_units', type=int, default=1800)
parser.add_argument('--gcn_agg_accum', type=str, default="sum")
parser.add_argument('--gcn_out_units', type=int, default=75)
parser.add_argument('--gen_r_num_basis_func', type=int, default=2)
parser.add_argument('--train_max_iter', type=int, default=50000)
parser.add_argument('--train_log_interval', type=int, default=1)
parser.add_argument('--train_valid_interval', type=int, default=1)
parser.add_argument('--train_optimizer', type=str, default="adam")
parser.add_argument('--decoder', type=str, default="Bi")
parser.add_argument('--train_grad_clip', type=float, default=1.0)
parser.add_argument('--train_lr', type=float, default=0.01)
parser.add_argument('--train_min_lr', type=float, default=0.001)
parser.add_argument('--train_lr_decay_factor', type=float, default=0.5)
parser.add_argument('--train_decay_patience', type=int, default=50)
parser.add_argument('--layers', type=int, default=1)
parser.add_argument('--train_early_stopping_patience', type=int, default=200)
parser.add_argument('--share_param', default=True, action='store_true')
parser.add_argument('--ARR', type=float, default='0.000004')
parser.add_argument('--loss_func', type=str, default='CE')
parser.add_argument('--sparse_ratio', type=float, default=0.0)
args = parser.parse_args()
args.device = th.device(args.device) if args.device >= 0 else th.device('cpu')
### configure save_fir to save all the info
now = int(round(time.time()*1000))
now02 = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(now/1000))
if args.save_dir is None:
args.save_dir = args.data_name+"_" + ''.join(now02)
if args.save_id is None:
args.save_id = np.random.randint(20)
args.save_dir = os.path.join("log", args.save_dir)
if not os.path.isdir(args.save_dir):
os.makedirs(args.save_dir)
return args
if __name__ == '__main__':
'''
ml_1m : param, ARR = 0.0000004, gcn_agg_units = 1000, gcn_agg_accum = sum, tmse = 0.8322, valid_ratio = 0.05
ml_100k : param, ARR = 0.000001, gcn_agg_units = 500, gcn_agg_accum = sum, tmse = 0.9046, valid_ratio = 0.05
1lyaer ml_1m : param, ARR = 0.0000005, gcn_agg_units = 2400, gcn_agg_accum = sum, tmse = 0.8305, valid_ratio = 0.05, gcn_out_units = 75
1layer ml_100k : param, pos_emb, ARR = 0.000005, gcn_agg_units = 750, gcn_agg_accum = sum, tmse = 0.8974, valid_ratio = 0.05, gcn_out_units = 75
2layer ml_100k : param, pos_emb, ARR = 0.000005, gcn_agg_units = 750, gcn_agg_accum = sum, tmse = 0.8969, valid_ratio = 0.05, gcn_out_units = 75
2lyaer ml_1m : param, ARR = 0.0000004, gcn_agg_units = 1800, gcn_agg_accum = sum, tmse = 0.8319, valid_ratio = 0.05, gcn_out_units = 75
'''
args = config()
np.random.seed(args.seed)
th.manual_seed(args.seed)
if th.cuda.is_available():
th.cuda.manual_seed_all(args.seed)
train(args)
|
[
"numpy.random.seed",
"argparse.ArgumentParser",
"random.sample",
"utils.get_activation",
"numpy.argsort",
"numpy.argpartition",
"numpy.random.randint",
"numpy.mean",
"utils.to_etype_name",
"numpy.arange",
"torch.device",
"model.MLPDecoder",
"os.path.join",
"numpy.zeros_like",
"torch.FloatTensor",
"torch.softmax",
"numpy.append",
"scipy.sparse.coo_matrix",
"random.seed",
"time.localtime",
"numpy.ones_like",
"torch.nn.ModuleList",
"torch.manual_seed",
"numpy.hstack",
"torch.cuda.is_available",
"dgl.bipartite_from_scipy",
"utils.get_optimizer",
"os.makedirs",
"numpy.log",
"numpy.logical_and",
"os.path.isdir",
"model.GCMCLayer",
"data.DataSetLoader",
"torch.nn.CrossEntropyLoss",
"time.time",
"torch.cuda.manual_seed_all",
"numpy.array",
"torch.cosine_similarity"
] |
[((2008, 2037), 'numpy.ones_like', 'np.ones_like', (['rating_pairs[0]'], {}), '(rating_pairs[0])\n', (2020, 2037), True, 'import numpy as np\n'), ((2067, 2154), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['(ones, rating_pairs)'], {'shape': '(num_user, num_movie)', 'dtype': 'np.float32'}), '((ones, rating_pairs), shape=(num_user, num_movie), dtype=np.\n float32)\n', (2080, 2154), True, 'import scipy.sparse as sp\n'), ((2175, 2263), 'dgl.bipartite_from_scipy', 'dgl.bipartite_from_scipy', (['user_movie_ratings_coo'], {'utype': '"""_U"""', 'etype': '"""_E"""', 'vtype': '"""_V"""'}), "(user_movie_ratings_coo, utype='_U', etype='_E',\n vtype='_V')\n", (2199, 2263), False, 'import dgl\n'), ((7122, 7344), 'data.DataSetLoader', 'DataSetLoader', (['args.data_name', 'args.device'], {'use_one_hot_fea': 'args.use_one_hot_fea', 'symm': 'args.gcn_agg_norm_symm', 'test_ratio': 'args.data_test_ratio', 'valid_ratio': 'args.data_valid_ratio', 'sample_rate': 'args.sample_rate'}), '(args.data_name, args.device, use_one_hot_fea=args.\n use_one_hot_fea, symm=args.gcn_agg_norm_symm, test_ratio=args.\n data_test_ratio, valid_ratio=args.data_valid_ratio, sample_rate=args.\n sample_rate)\n', (7135, 7344), False, 'from data import DataSetLoader\n'), ((7812, 7833), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (7831, 7833), True, 'import torch.nn as nn\n'), ((10558, 10609), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10000)', '(args.train_max_iter,)'], {}), '(0, 10000, (args.train_max_iter,))\n', (10575, 10609), True, 'import numpy as np\n'), ((17375, 17394), 'numpy.hstack', 'np.hstack', (['recall50'], {}), '(recall50)\n', (17384, 17394), True, 'import numpy as np\n'), ((17411, 17431), 'numpy.hstack', 'np.hstack', (['recall100'], {}), '(recall100)\n', (17420, 17431), True, 'import numpy as np\n'), ((17448, 17468), 'numpy.hstack', 'np.hstack', (['recall200'], {}), '(recall200)\n', (17457, 17468), True, 'import numpy as np\n'), ((17482, 17499), 'numpy.hstack', 'np.hstack', (['ndcg50'], {}), '(ndcg50)\n', (17491, 17499), True, 'import numpy as np\n'), ((17514, 17532), 'numpy.hstack', 'np.hstack', (['ndcg100'], {}), '(ndcg100)\n', (17523, 17532), True, 'import numpy as np\n'), ((17547, 17565), 'numpy.hstack', 'np.hstack', (['ndcg200'], {}), '(ndcg200)\n', (17556, 17565), True, 'import numpy as np\n'), ((18096, 18139), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PGMC"""'}), "(description='PGMC')\n", (18119, 18139), False, 'import argparse\n'), ((21107, 21141), 'os.path.join', 'os.path.join', (['"""log"""', 'args.save_dir'], {}), "('log', args.save_dir)\n", (21119, 21141), False, 'import os, time\n'), ((22107, 22132), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (22121, 22132), True, 'import numpy as np\n'), ((22137, 22162), 'torch.manual_seed', 'th.manual_seed', (['args.seed'], {}), '(args.seed)\n', (22151, 22162), True, 'import torch as th\n'), ((22170, 22192), 'torch.cuda.is_available', 'th.cuda.is_available', ([], {}), '()\n', (22190, 22192), True, 'import torch as th\n'), ((2520, 2557), 'utils.get_activation', 'get_activation', (['args.model_activation'], {}), '(args.model_activation)\n', (2534, 2557), False, 'from utils import get_activation, get_optimizer, torch_total_param_num, torch_net_info, MetricLogger\n'), ((2581, 2596), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (2594, 2596), True, 'import torch.nn as nn\n'), ((7884, 7919), 'utils.get_optimizer', 'get_optimizer', (['args.train_optimizer'], {}), '(args.train_optimizer)\n', (7897, 7919), False, 'from utils import get_activation, get_optimizer, torch_total_param_num, torch_net_info, MetricLogger\n'), ((8298, 8337), 'os.path.join', 'os.path.join', (['args.save_dir', '"""NDCG.csv"""'], {}), "(args.save_dir, 'NDCG.csv')\n", (8310, 8337), False, 'import os, time\n'), ((9867, 9924), 'numpy.append', 'np.append', (['train_rating_values', 'train_rating_values_zeros'], {}), '(train_rating_values, train_rating_values_zeros)\n', (9876, 9924), True, 'import numpy as np\n'), ((10304, 10330), 'random.seed', 'random.seed', (['random_number'], {}), '(random_number)\n', (10315, 10330), False, 'import random\n'), ((14794, 14824), 'numpy.zeros_like', 'np.zeros_like', (['pre'], {'dtype': 'bool'}), '(pre, dtype=bool)\n', (14807, 14824), True, 'import numpy as np\n'), ((17663, 17680), 'numpy.mean', 'np.mean', (['recall50'], {}), '(recall50)\n', (17670, 17680), True, 'import numpy as np\n'), ((17692, 17707), 'numpy.mean', 'np.mean', (['ndcg50'], {}), '(ndcg50)\n', (17699, 17707), True, 'import numpy as np\n'), ((17732, 17750), 'numpy.mean', 'np.mean', (['recall100'], {}), '(recall100)\n', (17739, 17750), True, 'import numpy as np\n'), ((17763, 17779), 'numpy.mean', 'np.mean', (['ndcg100'], {}), '(ndcg100)\n', (17770, 17779), True, 'import numpy as np\n'), ((17804, 17822), 'numpy.mean', 'np.mean', (['recall200'], {}), '(recall200)\n', (17811, 17822), True, 'import numpy as np\n'), ((17835, 17851), 'numpy.mean', 'np.mean', (['ndcg200'], {}), '(ndcg200)\n', (17842, 17851), True, 'import numpy as np\n'), ((17957, 17974), 'numpy.mean', 'np.mean', (['recall50'], {}), '(recall50)\n', (17964, 17974), True, 'import numpy as np\n'), ((17976, 17994), 'numpy.mean', 'np.mean', (['recall100'], {}), '(recall100)\n', (17983, 17994), True, 'import numpy as np\n'), ((17996, 18014), 'numpy.mean', 'np.mean', (['recall200'], {}), '(recall200)\n', (18003, 18014), True, 'import numpy as np\n'), ((18016, 18031), 'numpy.mean', 'np.mean', (['ndcg50'], {}), '(ndcg50)\n', (18023, 18031), True, 'import numpy as np\n'), ((18033, 18049), 'numpy.mean', 'np.mean', (['ndcg100'], {}), '(ndcg100)\n', (18040, 18049), True, 'import numpy as np\n'), ((18051, 18067), 'numpy.mean', 'np.mean', (['ndcg200'], {}), '(ndcg200)\n', (18058, 18067), True, 'import numpy as np\n'), ((20698, 20720), 'torch.device', 'th.device', (['args.device'], {}), '(args.device)\n', (20707, 20720), True, 'import torch as th\n'), ((20746, 20762), 'torch.device', 'th.device', (['"""cpu"""'], {}), "('cpu')\n", (20755, 20762), True, 'import torch as th\n'), ((20897, 20923), 'time.localtime', 'time.localtime', (['(now / 1000)'], {}), '(now / 1000)\n', (20911, 20923), False, 'import os, time\n'), ((21065, 21086), 'numpy.random.randint', 'np.random.randint', (['(20)'], {}), '(20)\n', (21082, 21086), True, 'import numpy as np\n'), ((21153, 21181), 'os.path.isdir', 'os.path.isdir', (['args.save_dir'], {}), '(args.save_dir)\n', (21166, 21181), False, 'import os, time\n'), ((21191, 21217), 'os.makedirs', 'os.makedirs', (['args.save_dir'], {}), '(args.save_dir)\n', (21202, 21217), False, 'import os, time\n'), ((22202, 22236), 'torch.cuda.manual_seed_all', 'th.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (22225, 22236), True, 'import torch as th\n'), ((1355, 1384), 'random.sample', 'random.sample', (['x', 'sample_rate'], {}), '(x, sample_rate)\n', (1368, 1384), False, 'import random\n'), ((2625, 2861), 'model.GCMCLayer', 'GCMCLayer', (['args.rating_vals', 'args.src_in_units', 'args.dst_in_units', 'args.gcn_agg_units', 'args.gcn_out_units', 'args.gcn_dropout', 'args.gcn_agg_accum'], {'agg_act': 'self._act', 'share_user_item_param': 'args.share_param', 'device': 'args.device'}), '(args.rating_vals, args.src_in_units, args.dst_in_units, args.\n gcn_agg_units, args.gcn_out_units, args.gcn_dropout, args.gcn_agg_accum,\n agg_act=self._act, share_user_item_param=args.share_param, device=args.\n device)\n', (2634, 2861), False, 'from model import BiDecoder, GCMCLayer, MLPDecoder\n'), ((6880, 6901), 'utils.to_etype_name', 'to_etype_name', (['rating'], {}), '(rating)\n', (6893, 6901), False, 'from utils import to_etype_name\n'), ((7727, 7773), 'torch.FloatTensor', 'th.FloatTensor', (['dataset.possible_rating_values'], {}), '(dataset.possible_rating_values)\n', (7741, 7773), True, 'import torch as th\n'), ((9711, 9772), 'numpy.append', 'np.append', (['train_rating_pairs[0]', 'train_rating_pairs_zeros[0]'], {}), '(train_rating_pairs[0], train_rating_pairs_zeros[0])\n', (9720, 9772), True, 'import numpy as np\n'), ((9774, 9835), 'numpy.append', 'np.append', (['train_rating_pairs[1]', 'train_rating_pairs_zeros[1]'], {}), '(train_rating_pairs[1], train_rating_pairs_zeros[1])\n', (9783, 9835), True, 'import numpy as np\n'), ((14985, 15013), 'numpy.argpartition', 'np.argpartition', (['(-pre)', 'kj', '(1)'], {}), '(-pre, kj, 1)\n', (15000, 15013), True, 'import numpy as np\n'), ((15156, 15186), 'numpy.zeros_like', 'np.zeros_like', (['pre'], {'dtype': 'bool'}), '(pre, dtype=bool)\n', (15169, 15186), True, 'import numpy as np\n'), ((15329, 15359), 'numpy.zeros_like', 'np.zeros_like', (['pre'], {'dtype': 'bool'}), '(pre, dtype=bool)\n', (15342, 15359), True, 'import numpy as np\n'), ((15836, 15864), 'numpy.argpartition', 'np.argpartition', (['(-pre)', 'kj', '(1)'], {}), '(-pre, kj, 1)\n', (15851, 15864), True, 'import numpy as np\n'), ((16180, 16210), 'numpy.argsort', 'np.argsort', (['(-topk_part)'], {'axis': '(1)'}), '(-topk_part, axis=1)\n', (16190, 16210), True, 'import numpy as np\n'), ((3629, 3883), 'model.GCMCLayer', 'GCMCLayer', (['args.rating_vals', 'args.gcn_out_units', 'args.gcn_out_units', 'gcn_out_units', 'args.gcn_out_units', '(args.gcn_dropout - i * 0.1)', 'args.gcn_agg_accum'], {'agg_act': 'self._act', 'share_user_item_param': 'args.share_param', 'ini': '(False)', 'device': 'args.device'}), '(args.rating_vals, args.gcn_out_units, args.gcn_out_units,\n gcn_out_units, args.gcn_out_units, args.gcn_dropout - i * 0.1, args.\n gcn_agg_accum, agg_act=self._act, share_user_item_param=args.\n share_param, ini=False, device=args.device)\n', (3638, 3883), False, 'from model import BiDecoder, GCMCLayer, MLPDecoder\n'), ((4990, 5110), 'model.MLPDecoder', 'MLPDecoder', ([], {'in_units': '(args.gcn_out_units * args.layers)', 'num_classes': 'num_classes', 'num_basis': 'args.gen_r_num_basis_func'}), '(in_units=args.gcn_out_units * args.layers, num_classes=\n num_classes, num_basis=args.gen_r_num_basis_func)\n', (5000, 5110), False, 'from model import BiDecoder, GCMCLayer, MLPDecoder\n'), ((10427, 10456), 'random.sample', 'random.sample', (['x', 'sample_rate'], {}), '(x, sample_rate)\n', (10440, 10456), False, 'import random\n'), ((16487, 16496), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (16493, 16496), True, 'import numpy as np\n'), ((20832, 20843), 'time.time', 'time.time', ([], {}), '()\n', (20841, 20843), False, 'import os, time\n'), ((1586, 1638), 'numpy.array', 'np.array', (['[([ele] * num_movie) for ele in user_list]'], {}), '([([ele] * num_movie) for ele in user_list])\n', (1594, 1638), True, 'import numpy as np\n'), ((11887, 11918), 'torch.softmax', 'th.softmax', (['pred_ratings'], {'dim': '(1)'}), '(pred_ratings, dim=1)\n', (11897, 11918), True, 'import torch as th\n'), ((14331, 14362), 'torch.softmax', 'th.softmax', (['pred_ratings'], {'dim': '(1)'}), '(pred_ratings, dim=1)\n', (14341, 14362), True, 'import torch as th\n'), ((16506, 16526), 'numpy.arange', 'np.arange', (['(2)', '(kj + 2)'], {}), '(2, kj + 2)\n', (16515, 16526), True, 'import numpy as np\n'), ((6961, 7016), 'torch.cosine_similarity', 'th.cosine_similarity', (['W[i, :, :]', 'W[i - 1, :, :]'], {'dim': '(1)'}), '(W[i, :, :], W[i - 1, :, :], dim=1)\n', (6981, 7016), True, 'import torch as th\n'), ((15207, 15229), 'numpy.arange', 'np.arange', (['batch_users'], {}), '(batch_users)\n', (15216, 15229), True, 'import numpy as np\n'), ((16047, 16069), 'numpy.arange', 'np.arange', (['batch_users'], {}), '(batch_users)\n', (16056, 16069), True, 'import numpy as np\n'), ((16332, 16366), 'numpy.arange', 'np.arange', (['(end_index - start_index)'], {}), '(end_index - start_index)\n', (16341, 16366), True, 'import numpy as np\n'), ((15431, 15464), 'numpy.logical_and', 'np.logical_and', (['true_bin', 'pre_bin'], {}), '(true_bin, pre_bin)\n', (15445, 15464), True, 'import numpy as np\n'), ((1733, 1753), 'numpy.arange', 'np.arange', (['num_movie'], {}), '(num_movie)\n', (1742, 1753), True, 'import numpy as np\n'), ((16644, 16666), 'numpy.arange', 'np.arange', (['batch_users'], {}), '(batch_users)\n', (16653, 16666), True, 'import numpy as np\n')]
|
# Copyright 2020 Stanford University, Los Alamos National Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flexflow.keras.models import Model, Sequential
from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate
import flexflow.keras.optimizers
from flexflow.keras.datasets import mnist
from flexflow.keras.datasets import cifar10
from flexflow.keras import losses
from flexflow.keras import metrics
from flexflow.keras.callbacks import Callback, VerifyMetrics, EpochVerifyMetrics
from accuracy import ModelAccuracy
import flexflow.core as ff
import numpy as np
import argparse
import gc
from PIL import Image
def top_level_task():
num_samples = 10000
(x_train, y_train), (x_test, y_test) = cifar10.load_data(num_samples)
full_input_np = np.zeros((num_samples, 3, 229, 229), dtype=np.float32)
for i in range(0, num_samples):
image = x_train[i, :, :, :]
image = image.transpose(1, 2, 0)
pil_image = Image.fromarray(image)
pil_image = pil_image.resize((229,229), Image.NEAREST)
image = np.array(pil_image, dtype=np.float32)
image = image.transpose(2, 0, 1)
full_input_np[i, :, :, :] = image
if (i == 0):
print(image)
full_input_np /= 255
y_train = y_train.astype('int32')
full_label_np = y_train
input_tensor = Input(shape=(3, 229, 229), dtype="float32")
output = Conv2D(filters=64, input_shape=(3,229,229), kernel_size=(11,11), strides=(4,4), padding=(2,2), activation="relu")(input_tensor)
output = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding="valid")(output)
output = Conv2D(filters=192, kernel_size=(5,5), strides=(1,1), padding=(2,2), activation="relu")(output)
output = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding="valid")(output)
output = Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu")(output)
output = Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu")(output)
output = Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu")(output)
output = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding="valid")(output)
output = Flatten()(output)
output = Dense(4096, activation="relu")(output)
output = Dense(4096, activation="relu")(output)
output = Dense(10)(output)
output = Activation("softmax")(output)
model = Model(input_tensor, output)
opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy', 'sparse_categorical_crossentropy'])
print(model.summary())
model.fit(full_input_np, full_label_np, epochs=40, callbacks=[VerifyMetrics(ModelAccuracy.CIFAR10_ALEXNET), EpochVerifyMetrics(ModelAccuracy.CIFAR10_ALEXNET)])
if __name__ == "__main__":
print("Functional API, cifar10 alexnet")
top_level_task()
gc.collect()
|
[
"flexflow.keras.datasets.cifar10.load_data",
"flexflow.keras.models.Model",
"flexflow.keras.layers.Dense",
"flexflow.keras.callbacks.VerifyMetrics",
"flexflow.keras.layers.MaxPooling2D",
"numpy.zeros",
"flexflow.keras.layers.Input",
"flexflow.keras.layers.Flatten",
"gc.collect",
"flexflow.keras.layers.Activation",
"numpy.array",
"flexflow.keras.callbacks.EpochVerifyMetrics",
"PIL.Image.fromarray",
"flexflow.keras.layers.Conv2D"
] |
[((1277, 1307), 'flexflow.keras.datasets.cifar10.load_data', 'cifar10.load_data', (['num_samples'], {}), '(num_samples)\n', (1294, 1307), False, 'from flexflow.keras.datasets import cifar10\n'), ((1327, 1381), 'numpy.zeros', 'np.zeros', (['(num_samples, 3, 229, 229)'], {'dtype': 'np.float32'}), '((num_samples, 3, 229, 229), dtype=np.float32)\n', (1335, 1381), True, 'import numpy as np\n'), ((1856, 1899), 'flexflow.keras.layers.Input', 'Input', ([], {'shape': '(3, 229, 229)', 'dtype': '"""float32"""'}), "(shape=(3, 229, 229), dtype='float32')\n", (1861, 1899), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((2925, 2952), 'flexflow.keras.models.Model', 'Model', (['input_tensor', 'output'], {}), '(input_tensor, output)\n', (2930, 2952), False, 'from flexflow.keras.models import Model, Sequential\n'), ((3424, 3436), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3434, 3436), False, 'import gc\n'), ((1501, 1523), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (1516, 1523), False, 'from PIL import Image\n'), ((1595, 1632), 'numpy.array', 'np.array', (['pil_image'], {'dtype': 'np.float32'}), '(pil_image, dtype=np.float32)\n', (1603, 1632), True, 'import numpy as np\n'), ((1914, 2037), 'flexflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'input_shape': '(3, 229, 229)', 'kernel_size': '(11, 11)', 'strides': '(4, 4)', 'padding': '(2, 2)', 'activation': '"""relu"""'}), "(filters=64, input_shape=(3, 229, 229), kernel_size=(11, 11), strides\n =(4, 4), padding=(2, 2), activation='relu')\n", (1920, 2037), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((2053, 2116), 'flexflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(3, 3)', 'strides': '(2, 2)', 'padding': '"""valid"""'}), "(pool_size=(3, 3), strides=(2, 2), padding='valid')\n", (2065, 2116), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((2134, 2228), 'flexflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(192)', 'kernel_size': '(5, 5)', 'strides': '(1, 1)', 'padding': '(2, 2)', 'activation': '"""relu"""'}), "(filters=192, kernel_size=(5, 5), strides=(1, 1), padding=(2, 2),\n activation='relu')\n", (2140, 2228), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((2241, 2304), 'flexflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(3, 3)', 'strides': '(2, 2)', 'padding': '"""valid"""'}), "(pool_size=(3, 3), strides=(2, 2), padding='valid')\n", (2253, 2304), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((2322, 2416), 'flexflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(384)', 'kernel_size': '(3, 3)', 'strides': '(1, 1)', 'padding': '(1, 1)', 'activation': '"""relu"""'}), "(filters=384, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1),\n activation='relu')\n", (2328, 2416), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((2429, 2523), 'flexflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(256)', 'kernel_size': '(3, 3)', 'strides': '(1, 1)', 'padding': '(1, 1)', 'activation': '"""relu"""'}), "(filters=256, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1),\n activation='relu')\n", (2435, 2523), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((2536, 2630), 'flexflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(256)', 'kernel_size': '(3, 3)', 'strides': '(1, 1)', 'padding': '(1, 1)', 'activation': '"""relu"""'}), "(filters=256, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1),\n activation='relu')\n", (2542, 2630), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((2643, 2706), 'flexflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(3, 3)', 'strides': '(2, 2)', 'padding': '"""valid"""'}), "(pool_size=(3, 3), strides=(2, 2), padding='valid')\n", (2655, 2706), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((2724, 2733), 'flexflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2731, 2733), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((2753, 2783), 'flexflow.keras.layers.Dense', 'Dense', (['(4096)'], {'activation': '"""relu"""'}), "(4096, activation='relu')\n", (2758, 2783), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((2803, 2833), 'flexflow.keras.layers.Dense', 'Dense', (['(4096)'], {'activation': '"""relu"""'}), "(4096, activation='relu')\n", (2808, 2833), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((2853, 2862), 'flexflow.keras.layers.Dense', 'Dense', (['(10)'], {}), '(10)\n', (2858, 2862), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((2882, 2903), 'flexflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (2892, 2903), False, 'from flexflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D, Concatenate, concatenate\n'), ((3234, 3278), 'flexflow.keras.callbacks.VerifyMetrics', 'VerifyMetrics', (['ModelAccuracy.CIFAR10_ALEXNET'], {}), '(ModelAccuracy.CIFAR10_ALEXNET)\n', (3247, 3278), False, 'from flexflow.keras.callbacks import Callback, VerifyMetrics, EpochVerifyMetrics\n'), ((3280, 3329), 'flexflow.keras.callbacks.EpochVerifyMetrics', 'EpochVerifyMetrics', (['ModelAccuracy.CIFAR10_ALEXNET'], {}), '(ModelAccuracy.CIFAR10_ALEXNET)\n', (3298, 3329), False, 'from flexflow.keras.callbacks import Callback, VerifyMetrics, EpochVerifyMetrics\n')]
|
import numpy as np
import sys
from collections import Counter
class CFeval(object):
"""Classification evaluator class"""
def __init__(self, metrics, reshapeDims, classes):
"""
# Arguments
metrics: dictionary of metrics to be evaluated, currently supports only classification accuracy
reshapeDims: list of the reshape dimensions of the image
classes: integer representing the number of classes
"""
super(CFeval, self).__init__()
self.metrics = metrics
self.avgAcc = []
self.runThrough = False
def reset(self):
self.avgAcc = []
def evaluate(self, remoteOut, classValues):
"""Evaluates the predictions produced by the model in the cloud.
# Arguments
remoteOut: numpy ndarray containing the predictions of the model in the cloud
classValues: numpy array containing the ground truth labels
"""
predictions = np.argmax(remoteOut, axis=1)
self.avgAcc.append(np.sum(np.equal(predictions, classValues))/classValues.shape[0])
def simRes(self):
"""Returns the mean of the classification accuracies over all batches of predictions.
"""
self.avgAcc = np.array(self.avgAcc)
return [np.mean(self.avgAcc)]
class ODeval(object):
"""Object detection evaluator class."""
def __init__(self, metrics, reshapeDims, classes):
"""
# Arguments
metrics: dictionary of metrics to be evaluated, currently supports only mean average precision
reshapeDims: list of the reshape dimensions of the image
classes: integer representing the number of classes
"""
super(ODeval, self).__init__()
self.metrics = metrics
self.iou = metrics['map']['iou'] #iterate through for loop for multiple values
self.reshapeDims = reshapeDims
self.n_classes = classes
self.pred_format = {'class_id': 0, 'conf': 1, 'xmin': 2, 'ymin': 3, 'xmax': 4, 'ymax': 5}
self.gt_format = {'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}
#pred format: class id, conf, xmin, ymin, xmax, ymax
#ground truth: class id, xmin, ymin, xmax, ymax
# The following lists all contain per-class data, i.e. all list have the length `n_classes + 1`,
# where one element is for the background class, i.e. that element is just a dummy entry.
self.prediction_results = [list() for _ in range(self.n_classes + 1)]
self.num_gt_per_class = None
self.groundTruth = []
self.imageId = []
self.runThrough = False
def reset(self):
self.prediction_results = [list() for _ in range(self.n_classes + 1)]
def evaluate(self, remoteOut, labels):
"""Evaluates the output of the predictions of the model in the cloud.
# Arguments
remoteOut: numpy ndarray containing the predictions of the model in the cloud
labels: ground truth labels corresponding to each image
"""
groundTruth = labels[1]
imageId = labels[0]
if not self.runThrough:
self.groundTruth+= list(groundTruth)
[self.imageId.append(i) for i in imageId]
self.predictOnBatch( remoteOut, imageId)
def simRes(self):
"""Evaluates the results of the simulation over all the iou values and returns a list
containing iou and corresponding mAp values.
"""
userRes = {}
# print(self.iou)
for i in self.iou:
# print(i)
userRes[i] = self.iterateOverIOU(self.prediction_results, i, self.imageId)
return np.array(list(userRes.items()))
def iterateOverIOU(self, preds, iou, imageId):
"""Calculates the desired metrics over all iou values.
# Arguments
preds: list containing per class prediction results of the model in the cloud
iou: IOU value for which the mAp has to be evaluated
imageId: list containing the image ID's of the images in the test set
# Returns
Mean Average Precision calculated over all classes
"""
return self.calcmAp(self.groundTruth, self.prediction_results, iou, imageId, self.n_classes)
def predictOnBatch(self, remoteOut, imageId):
"""Generates per batch predictions.
# Arguments
remoteOut: numpy ndarray representing the prediction of the model in the cloud
imageId: list containing the image ID's of all images in the batch
"""
class_id_pred = self.pred_format['class_id']
conf_pred = self.pred_format['conf']
xmin_pred = self.pred_format['xmin']
ymin_pred = self.pred_format['ymin']
xmax_pred = self.pred_format['xmax']
ymax_pred = self.pred_format['ymax']
y_pred_filtered = []
for i in range(len(remoteOut)):
y_pred_filtered.append(remoteOut[i][remoteOut[i, :, 0] !=0])
remoteOut = y_pred_filtered
for k, batch_item in enumerate(remoteOut):
image_id = imageId[k]
for box in batch_item:
class_id = int(box[class_id_pred])
confidence = box[conf_pred]
xmin = round(box[xmin_pred], 1)
ymin = round(box[ymin_pred], 1)
xmax = round(box[xmax_pred], 1)
ymax = round(box[ymax_pred], 1)
prediction = (image_id, confidence, xmin, ymin, xmax, ymax)
self.prediction_results[class_id].append(prediction)
def calcmAp(self, labels, predictions, IOUThreshold, imageIds, n_classes):
"""Calculate the mean average precision over all classes for a given IOU thershold.
# Arguments
labels: array containing the ground truth labels
predictions: list containing per class predictions
IOUThreshold: float value that represents the IOU threshold to be considered
imageIds: list containing image ID's of all images in the test set
n_classes: number of classes
# Returns
The mean average precision calculated over all classes
"""
groundTruths = []
detections = predictions
ret = []
num_classes = 0
gtsPerClass = [0]
for i in range(len(imageIds)):
imageBoxes = labels[i]
for j in range(len(imageBoxes)):
boxes = imageBoxes[j]
b = list(boxes)
b.insert(0, imageIds[i])
b.insert(2, 1)
groundTruths.append(b)
for c in range(1, n_classes+1):
dects = detections[c]
#pred format: image_id, confidence, xmin, ymin, xmax, ymax
#gt format: image_id, 'class_id', conf, 'xmin', 'ymin', 'xmax', 'ymax'
gts = []
[gts.append(g) for g in groundTruths if g[1]==c]
npos = len(gts)
gtsPerClass.append(npos)
if npos!=0:
num_classes+=1
dects = sorted(dects, key=lambda conf: conf[1], reverse=True)
TP = np.zeros(len(dects))
FP = np.zeros(len(dects))
det = Counter([cc[0] for cc in gts])
for key, val in det.items():
det[key] = np.zeros(val)
for d in range(len(dects)):
gt = [gt for gt in gts if gt[0]==dects[d][0]]
iouMax = sys.float_info.min
for j in range(len(gt)):
iou = evalIOU(dects[d][2:], gt[j][3:])
if iou>iouMax:
iouMax = iou
jmax = j
if iouMax>=IOUThreshold:
if det[dects[d][0]][jmax] == 0:
TP[d] = 1
det[dects[d][0]][jmax] = 1
else:
FP[d] = 1
acc_FP = np.cumsum(FP)
acc_TP = np.cumsum(TP)
rec = acc_TP/npos
prec = np.divide(acc_TP,(acc_FP+acc_TP))
[ap, mpre, mrec, ii] = CalculateAveragePrecision(rec, prec)
# print(ap)
ret.append(ap)
# tot = len(ret)
print(gtsPerClass)
print(ret)
return np.nansum(ret)/num_classes
def evalIOU(boxes1, boxes2):
"""Computes the intersection over union for the given pair of boxes.
# Arguments
boxes1: list containing the corner locations of the bounding boxes in the format
<xmin, ymin, xmax, ymax>
boxes2: list containing the corner locations of the bounding boxes in the format
<xmin, ymin, xmax, ymax>
# Returns
The intersection over union of the regions under the boxes
"""
boxes1 = np.array(boxes1)
boxes2 = np.array(boxes2)
if boxes1.ndim == 1: boxes1 = np.expand_dims(boxes1, axis=0)
if boxes2.ndim == 1: boxes2 = np.expand_dims(boxes2, axis=0)
xmin = 0
ymin = 1
xmax = 2
ymax = 3
intersection_areas = intersection_area_(boxes1, boxes2)
boxes1_areas = (boxes1[:, xmax] - boxes1[:, xmin] + 1) * (boxes1[:, ymax] - boxes1[:, ymin] + 1)
boxes2_areas = (boxes2[:, xmax] - boxes2[:, xmin] + 1) * (boxes2[:, ymax] - boxes2[:, ymin] + 1)
union_areas = boxes1_areas + boxes2_areas - intersection_areas
return intersection_areas / union_areas
def intersection_area_(boxes1, boxes2):
"""Computes the intersection areas of the two boxes.
# Arguments
boxes1: array containing the corner locations of the bounding boxes in the format
<xmin, ymin, xmax, ymax>
boxes2: array containing the corner locations of the bounding boxes in the format
<xmin, ymin, xmax, ymax>
# Returns
The area common to both the boxes
"""
xmin = 0
ymin = 1
xmax = 2
ymax = 3
min_xy = np.maximum(boxes1[:,[xmin,ymin]], boxes2[:,[xmin,ymin]])
max_xy = np.minimum(boxes1[:,[xmax,ymax]], boxes2[:,[xmax,ymax]])
# Compute the side lengths of the intersection rectangles.
side_lengths = np.maximum(0, max_xy - min_xy + 1)
return side_lengths[:,0] * side_lengths[:,1]
def CalculateAveragePrecision(rec, prec):
"""Compute the average precision for a particular class
# Arguments
rec: cumulative recall of the class under consideration
prec: cumulative precision of the class under consideration
# Returns
Average precision per class
"""
mrec = []
mrec.append(0)
[mrec.append(e) for e in rec]
mrec.append(1)
mpre = []
mpre.append(0)
[mpre.append(e) for e in prec]
mpre.append(0)
for i in range(len(mpre)-1, 0, -1):
mpre[i-1]=max(mpre[i-1],mpre[i])
ii = []
for i in range(len(mrec)-1):
if mrec[1:][i]!=mrec[0:-1][i]:
ii.append(i+1)
ap = 0
for i in ii:
ap = ap + np.sum((mrec[i]-mrec[i-1])*mpre[i])
# return [ap, mpre[1:len(mpre)-1], mrec[1:len(mpre)-1], ii]
return [ap, mpre[0:len(mpre)-1], mrec[0:len(mpre)-1], ii]
|
[
"numpy.divide",
"numpy.nansum",
"numpy.minimum",
"numpy.maximum",
"numpy.sum",
"numpy.argmax",
"numpy.zeros",
"numpy.expand_dims",
"numpy.equal",
"numpy.cumsum",
"numpy.mean",
"numpy.array",
"collections.Counter"
] |
[((7449, 7465), 'numpy.array', 'np.array', (['boxes1'], {}), '(boxes1)\n', (7457, 7465), True, 'import numpy as np\n'), ((7476, 7492), 'numpy.array', 'np.array', (['boxes2'], {}), '(boxes2)\n', (7484, 7492), True, 'import numpy as np\n'), ((8460, 8520), 'numpy.maximum', 'np.maximum', (['boxes1[:, [xmin, ymin]]', 'boxes2[:, [xmin, ymin]]'], {}), '(boxes1[:, [xmin, ymin]], boxes2[:, [xmin, ymin]])\n', (8470, 8520), True, 'import numpy as np\n'), ((8527, 8587), 'numpy.minimum', 'np.minimum', (['boxes1[:, [xmax, ymax]]', 'boxes2[:, [xmax, ymax]]'], {}), '(boxes1[:, [xmax, ymax]], boxes2[:, [xmax, ymax]])\n', (8537, 8587), True, 'import numpy as np\n'), ((8661, 8695), 'numpy.maximum', 'np.maximum', (['(0)', '(max_xy - min_xy + 1)'], {}), '(0, max_xy - min_xy + 1)\n', (8671, 8695), True, 'import numpy as np\n'), ((852, 880), 'numpy.argmax', 'np.argmax', (['remoteOut'], {'axis': '(1)'}), '(remoteOut, axis=1)\n', (861, 880), True, 'import numpy as np\n'), ((1097, 1118), 'numpy.array', 'np.array', (['self.avgAcc'], {}), '(self.avgAcc)\n', (1105, 1118), True, 'import numpy as np\n'), ((7525, 7555), 'numpy.expand_dims', 'np.expand_dims', (['boxes1'], {'axis': '(0)'}), '(boxes1, axis=0)\n', (7539, 7555), True, 'import numpy as np\n'), ((7587, 7617), 'numpy.expand_dims', 'np.expand_dims', (['boxes2'], {'axis': '(0)'}), '(boxes2, axis=0)\n', (7601, 7617), True, 'import numpy as np\n'), ((1129, 1149), 'numpy.mean', 'np.mean', (['self.avgAcc'], {}), '(self.avgAcc)\n', (1136, 1149), True, 'import numpy as np\n'), ((6246, 6276), 'collections.Counter', 'Counter', (['[cc[0] for cc in gts]'], {}), '([cc[0] for cc in gts])\n', (6253, 6276), False, 'from collections import Counter\n'), ((6732, 6745), 'numpy.cumsum', 'np.cumsum', (['FP'], {}), '(FP)\n', (6741, 6745), True, 'import numpy as np\n'), ((6758, 6771), 'numpy.cumsum', 'np.cumsum', (['TP'], {}), '(TP)\n', (6767, 6771), True, 'import numpy as np\n'), ((6804, 6838), 'numpy.divide', 'np.divide', (['acc_TP', '(acc_FP + acc_TP)'], {}), '(acc_TP, acc_FP + acc_TP)\n', (6813, 6838), True, 'import numpy as np\n'), ((6996, 7010), 'numpy.nansum', 'np.nansum', (['ret'], {}), '(ret)\n', (7005, 7010), True, 'import numpy as np\n'), ((9369, 9410), 'numpy.sum', 'np.sum', (['((mrec[i] - mrec[i - 1]) * mpre[i])'], {}), '((mrec[i] - mrec[i - 1]) * mpre[i])\n', (9375, 9410), True, 'import numpy as np\n'), ((6325, 6338), 'numpy.zeros', 'np.zeros', (['val'], {}), '(val)\n', (6333, 6338), True, 'import numpy as np\n'), ((909, 943), 'numpy.equal', 'np.equal', (['predictions', 'classValues'], {}), '(predictions, classValues)\n', (917, 943), True, 'import numpy as np\n')]
|
# coding: utf-8
# In[ ]:
import cv2
from keras.models import load_model
import numpy as np
from collections import deque
from keras.preprocessing import image
import keras
import os
# In[ ]:
model1 = load_model('mob_logo_model.h5')
val = ['Adidas','Apple','BMW','Citroen','Fedex','HP','Mcdonalds','Nike','none','Pepsi','Puma']
pred_class = 8
# In[ ]:
def nothing(x):
pass
cap = cv2.VideoCapture(0)
cv2.namedWindow("Trackbars")
cv2.createTrackbar("L - H", "Trackbars", 0, 179, nothing)
cv2.createTrackbar("L - S", "Trackbars", 0, 255, nothing)
cv2.createTrackbar("L - V", "Trackbars", 0, 255, nothing)
cv2.createTrackbar("U - H", "Trackbars", 179, 179, nothing)
cv2.createTrackbar("U - S", "Trackbars", 255, 255, nothing)
cv2.createTrackbar("U - V", "Trackbars", 255, 255, nothing)
# In[ ]:
def main():
logos = get_logos()
cap = cv2.VideoCapture(0)
Lower_green = np.array([10,130,130])
Upper_green = np.array([40,255,255])
pts = deque(maxlen=512)
blackboard = np.zeros((480, 640, 3), dtype=np.uint8)
value = np.zeros((224,224,3), dtype = np.uint8)
#print(blackboard)
digit = np.zeros((200, 200, 3), dtype=np.uint8)
pred_class = 8
while (cap.isOpened()):
ret, img = cap.read()
img = cv2.flip(img, 1)
cv2.rectangle(img,(400,250),(624,474),(255,0,255),5)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
l_h = cv2.getTrackbarPos("L - H", "Trackbars")
l_s = cv2.getTrackbarPos("L - S", "Trackbars")
l_v = cv2.getTrackbarPos("L - V", "Trackbars")
u_h = cv2.getTrackbarPos("U - H", "Trackbars")
u_s = cv2.getTrackbarPos("U - S", "Trackbars")
u_v = cv2.getTrackbarPos("U - V", "Trackbars")
Lower_green= np.array([l_h, l_s, l_v]) # use the trackbars to customize the colour to track to make the doodles
Upper_green = np.array([u_v, u_s, u_v]) #0,131,157 179,255,255 (orange color settings)
kernel = np.ones((5, 5), np.uint8)
mask = cv2.inRange(hsv, Lower_green, Upper_green)
mask = cv2.erode(mask, kernel, iterations=2)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
mask=cv2.morphologyEx(mask,cv2.MORPH_CLOSE,kernel)
mask = cv2.dilate(mask, kernel, iterations=1)
res = cv2.bitwise_and(img, img, mask=mask)
cnts, heir = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2:]
center = None
if len(cnts) >= 1:
cnt = max(cnts, key=cv2.contourArea)
#print(cnt)
if cv2.contourArea(cnt) > 200:
((x, y), radius) = cv2.minEnclosingCircle(cnt)
cv2.circle(img, (int(x), int(y)), int(radius), (0, 255, 255), 2)
cv2.circle(img, center, 5, (0, 0, 255), -1)
M = cv2.moments(cnt)
center = (int(M['m10'] / M['m00']), int(M['m01'] / M['m00']))
pts.appendleft(center)
for i in range(1, len(pts)):
if pts[i - 1] is None or pts[i] is None:
continue
cv2.line(blackboard, pts[i - 1], pts[i], (255, 255, 255), 7)
cv2.line(img, pts[i - 1], pts[i], (0, 0, 255), 2)
elif len(cnts) == 0:
if len(pts) != []:
blackboard_gray = cv2.cvtColor(blackboard, cv2.COLOR_BGR2GRAY)
blur1 = cv2.medianBlur(blackboard_gray, 15)
blur1 = cv2.GaussianBlur(blur1, (5, 5), 0)
thresh1 = cv2.threshold(blur1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
blackboard_cnts = cv2.findContours(thresh1.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[1]
if len(blackboard_cnts) >= 1:
cnt = max(blackboard_cnts, key=cv2.contourArea)
#print(cv2.contourArea(cnt))
if cv2.contourArea(cnt) > 2000:
value = blackboard[250:474, 400:624]
pred_probab, pred_class = keras_predict(model1, value)
print(val[pred_class], pred_probab)
pts = deque(maxlen=512)
blackboard = np.zeros((480, 640, 3), dtype=np.uint8)
img = overlay(img, logos[pred_class])
cv2.imshow("Frame", img)
cv2.imshow("Res", res)
cv2.imshow("mask", mask)
k = cv2.waitKey(10)
if k == 27:
break
# In[ ]:
def keras_predict(model, image):
processed = keras_process_image(image)
print("processed: " + str(processed.shape))
pred_probab = model1.predict(processed)[0]
pred_class = list(pred_probab).index(max(pred_probab))
return max(pred_probab), pred_class
# In[ ]:
def keras_process_image(img):
img_array = image.img_to_array(img)
img_array_expanded_dims = np.expand_dims(img_array, axis = 0)
return keras.applications.mobilenet.preprocess_input(img_array_expanded_dims)
# In[ ]:
def get_logos():
logos_folder = "../logo/"
logos = []
for logo in range(len(os.listdir(logos_folder))):
logos.append(cv2.imread(logos_folder + str(logo) + '.png', cv2.IMREAD_UNCHANGED))
print(logos)
return logos
# In[ ]:
def overlay(image, logo):
x,y,z = logo.shape
#try:
image[0:x, 0:y] = blend_transparent(image[0:x, 0:y ], logo)
#except:
#pass
return image
# In[ ]:
def blend_transparent(face_img, overlay_t_img):
# Split out the transparency mask from the colour info
overlay_img = overlay_t_img[:, :, :3] # Grab the BRG planes
overlay_mask = overlay_t_img[:, :, 3:] # And the alpha plane
# Again calculate the inverse mask
background_mask = 255 - overlay_mask
# Turn the masks into three channel, so we can use them as weights
overlay_mask = cv2.cvtColor(overlay_mask, cv2.COLOR_GRAY2BGR)
background_mask = cv2.cvtColor(background_mask, cv2.COLOR_GRAY2BGR)
# Create a masked out face image, and masked out overlay
# We convert the images to floating point in range 0.0 - 1.0
face_part = (face_img * (1 / 255.0)) * (background_mask * (1 / 255.0))
overlay_part = (overlay_img * (1 / 255.0)) * (overlay_mask * (1 / 255.0))
# And finally just add them together, and rescale it back to an 8bit integer image
return np.uint8(cv2.addWeighted(face_part, 255.0, overlay_part, 255.0, 0.0))
# In[ ]:
keras_predict(model1, np.zeros((224, 224, 3), dtype=np.uint8))
main()
|
[
"keras.models.load_model",
"cv2.GaussianBlur",
"cv2.bitwise_and",
"cv2.medianBlur",
"numpy.ones",
"keras.preprocessing.image.img_to_array",
"cv2.rectangle",
"cv2.erode",
"cv2.imshow",
"cv2.inRange",
"collections.deque",
"cv2.line",
"cv2.contourArea",
"cv2.dilate",
"cv2.cvtColor",
"cv2.getTrackbarPos",
"cv2.createTrackbar",
"cv2.circle",
"cv2.minEnclosingCircle",
"cv2.waitKey",
"cv2.morphologyEx",
"cv2.addWeighted",
"cv2.flip",
"keras.applications.mobilenet.preprocess_input",
"os.listdir",
"cv2.threshold",
"cv2.moments",
"numpy.zeros",
"numpy.expand_dims",
"cv2.VideoCapture",
"numpy.array",
"cv2.namedWindow"
] |
[((210, 241), 'keras.models.load_model', 'load_model', (['"""mob_logo_model.h5"""'], {}), "('mob_logo_model.h5')\n", (220, 241), False, 'from keras.models import load_model\n'), ((399, 418), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (415, 418), False, 'import cv2\n'), ((419, 447), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Trackbars"""'], {}), "('Trackbars')\n", (434, 447), False, 'import cv2\n'), ((450, 507), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""L - H"""', '"""Trackbars"""', '(0)', '(179)', 'nothing'], {}), "('L - H', 'Trackbars', 0, 179, nothing)\n", (468, 507), False, 'import cv2\n'), ((508, 565), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""L - S"""', '"""Trackbars"""', '(0)', '(255)', 'nothing'], {}), "('L - S', 'Trackbars', 0, 255, nothing)\n", (526, 565), False, 'import cv2\n'), ((566, 623), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""L - V"""', '"""Trackbars"""', '(0)', '(255)', 'nothing'], {}), "('L - V', 'Trackbars', 0, 255, nothing)\n", (584, 623), False, 'import cv2\n'), ((624, 683), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""U - H"""', '"""Trackbars"""', '(179)', '(179)', 'nothing'], {}), "('U - H', 'Trackbars', 179, 179, nothing)\n", (642, 683), False, 'import cv2\n'), ((684, 743), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""U - S"""', '"""Trackbars"""', '(255)', '(255)', 'nothing'], {}), "('U - S', 'Trackbars', 255, 255, nothing)\n", (702, 743), False, 'import cv2\n'), ((744, 803), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""U - V"""', '"""Trackbars"""', '(255)', '(255)', 'nothing'], {}), "('U - V', 'Trackbars', 255, 255, nothing)\n", (762, 803), False, 'import cv2\n'), ((864, 883), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (880, 883), False, 'import cv2\n'), ((902, 926), 'numpy.array', 'np.array', (['[10, 130, 130]'], {}), '([10, 130, 130])\n', (910, 926), True, 'import numpy as np\n'), ((943, 967), 'numpy.array', 'np.array', (['[40, 255, 255]'], {}), '([40, 255, 255])\n', (951, 967), True, 'import numpy as np\n'), ((976, 993), 'collections.deque', 'deque', ([], {'maxlen': '(512)'}), '(maxlen=512)\n', (981, 993), False, 'from collections import deque\n'), ((1011, 1050), 'numpy.zeros', 'np.zeros', (['(480, 640, 3)'], {'dtype': 'np.uint8'}), '((480, 640, 3), dtype=np.uint8)\n', (1019, 1050), True, 'import numpy as np\n'), ((1063, 1102), 'numpy.zeros', 'np.zeros', (['(224, 224, 3)'], {'dtype': 'np.uint8'}), '((224, 224, 3), dtype=np.uint8)\n', (1071, 1102), True, 'import numpy as np\n'), ((1143, 1182), 'numpy.zeros', 'np.zeros', (['(200, 200, 3)'], {'dtype': 'np.uint8'}), '((200, 200, 3), dtype=np.uint8)\n', (1151, 1182), True, 'import numpy as np\n'), ((5020, 5043), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (5038, 5043), False, 'from keras.preprocessing import image\n'), ((5079, 5112), 'numpy.expand_dims', 'np.expand_dims', (['img_array'], {'axis': '(0)'}), '(img_array, axis=0)\n', (5093, 5112), True, 'import numpy as np\n'), ((5131, 5201), 'keras.applications.mobilenet.preprocess_input', 'keras.applications.mobilenet.preprocess_input', (['img_array_expanded_dims'], {}), '(img_array_expanded_dims)\n', (5176, 5201), False, 'import keras\n'), ((6116, 6162), 'cv2.cvtColor', 'cv2.cvtColor', (['overlay_mask', 'cv2.COLOR_GRAY2BGR'], {}), '(overlay_mask, cv2.COLOR_GRAY2BGR)\n', (6128, 6162), False, 'import cv2\n'), ((6185, 6234), 'cv2.cvtColor', 'cv2.cvtColor', (['background_mask', 'cv2.COLOR_GRAY2BGR'], {}), '(background_mask, cv2.COLOR_GRAY2BGR)\n', (6197, 6234), False, 'import cv2\n'), ((6719, 6758), 'numpy.zeros', 'np.zeros', (['(224, 224, 3)'], {'dtype': 'np.uint8'}), '((224, 224, 3), dtype=np.uint8)\n', (6727, 6758), True, 'import numpy as np\n'), ((1275, 1291), 'cv2.flip', 'cv2.flip', (['img', '(1)'], {}), '(img, 1)\n', (1283, 1291), False, 'import cv2\n'), ((1300, 1360), 'cv2.rectangle', 'cv2.rectangle', (['img', '(400, 250)', '(624, 474)', '(255, 0, 255)', '(5)'], {}), '(img, (400, 250), (624, 474), (255, 0, 255), 5)\n', (1313, 1360), False, 'import cv2\n'), ((1367, 1403), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (1379, 1403), False, 'import cv2\n'), ((1418, 1458), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""L - H"""', '"""Trackbars"""'], {}), "('L - H', 'Trackbars')\n", (1436, 1458), False, 'import cv2\n'), ((1473, 1513), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""L - S"""', '"""Trackbars"""'], {}), "('L - S', 'Trackbars')\n", (1491, 1513), False, 'import cv2\n'), ((1528, 1568), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""L - V"""', '"""Trackbars"""'], {}), "('L - V', 'Trackbars')\n", (1546, 1568), False, 'import cv2\n'), ((1583, 1623), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""U - H"""', '"""Trackbars"""'], {}), "('U - H', 'Trackbars')\n", (1601, 1623), False, 'import cv2\n'), ((1638, 1678), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""U - S"""', '"""Trackbars"""'], {}), "('U - S', 'Trackbars')\n", (1656, 1678), False, 'import cv2\n'), ((1693, 1733), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""U - V"""', '"""Trackbars"""'], {}), "('U - V', 'Trackbars')\n", (1711, 1733), False, 'import cv2\n'), ((1764, 1789), 'numpy.array', 'np.array', (['[l_h, l_s, l_v]'], {}), '([l_h, l_s, l_v])\n', (1772, 1789), True, 'import numpy as np\n'), ((1885, 1910), 'numpy.array', 'np.array', (['[u_v, u_s, u_v]'], {}), '([u_v, u_s, u_v])\n', (1893, 1910), True, 'import numpy as np\n'), ((1979, 2004), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (1986, 2004), True, 'import numpy as np\n'), ((2020, 2062), 'cv2.inRange', 'cv2.inRange', (['hsv', 'Lower_green', 'Upper_green'], {}), '(hsv, Lower_green, Upper_green)\n', (2031, 2062), False, 'import cv2\n'), ((2078, 2115), 'cv2.erode', 'cv2.erode', (['mask', 'kernel'], {'iterations': '(2)'}), '(mask, kernel, iterations=2)\n', (2087, 2115), False, 'import cv2\n'), ((2131, 2177), 'cv2.morphologyEx', 'cv2.morphologyEx', (['mask', 'cv2.MORPH_OPEN', 'kernel'], {}), '(mask, cv2.MORPH_OPEN, kernel)\n', (2147, 2177), False, 'import cv2\n'), ((2191, 2238), 'cv2.morphologyEx', 'cv2.morphologyEx', (['mask', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(mask, cv2.MORPH_CLOSE, kernel)\n', (2207, 2238), False, 'import cv2\n'), ((2252, 2290), 'cv2.dilate', 'cv2.dilate', (['mask', 'kernel'], {'iterations': '(1)'}), '(mask, kernel, iterations=1)\n', (2262, 2290), False, 'import cv2\n'), ((2305, 2341), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'img'], {'mask': 'mask'}), '(img, img, mask=mask)\n', (2320, 2341), False, 'import cv2\n'), ((4500, 4524), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'img'], {}), "('Frame', img)\n", (4510, 4524), False, 'import cv2\n'), ((4533, 4555), 'cv2.imshow', 'cv2.imshow', (['"""Res"""', 'res'], {}), "('Res', res)\n", (4543, 4555), False, 'import cv2\n'), ((4564, 4588), 'cv2.imshow', 'cv2.imshow', (['"""mask"""', 'mask'], {}), "('mask', mask)\n", (4574, 4588), False, 'import cv2\n'), ((4619, 4634), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (4630, 4634), False, 'import cv2\n'), ((6623, 6682), 'cv2.addWeighted', 'cv2.addWeighted', (['face_part', '(255.0)', 'overlay_part', '(255.0)', '(0.0)'], {}), '(face_part, 255.0, overlay_part, 255.0, 0.0)\n', (6638, 6682), False, 'import cv2\n'), ((5318, 5342), 'os.listdir', 'os.listdir', (['logos_folder'], {}), '(logos_folder)\n', (5328, 5342), False, 'import os\n'), ((2620, 2640), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (2635, 2640), False, 'import cv2\n'), ((2700, 2727), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['cnt'], {}), '(cnt)\n', (2722, 2727), False, 'import cv2\n'), ((2825, 2868), 'cv2.circle', 'cv2.circle', (['img', 'center', '(5)', '(0, 0, 255)', '(-1)'], {}), '(img, center, 5, (0, 0, 255), -1)\n', (2835, 2868), False, 'import cv2\n'), ((2889, 2905), 'cv2.moments', 'cv2.moments', (['cnt'], {}), '(cnt)\n', (2900, 2905), False, 'import cv2\n'), ((4309, 4326), 'collections.deque', 'deque', ([], {'maxlen': '(512)'}), '(maxlen=512)\n', (4314, 4326), False, 'from collections import deque\n'), ((4378, 4417), 'numpy.zeros', 'np.zeros', (['(480, 640, 3)'], {'dtype': 'np.uint8'}), '((480, 640, 3), dtype=np.uint8)\n', (4386, 4417), True, 'import numpy as np\n'), ((3182, 3242), 'cv2.line', 'cv2.line', (['blackboard', 'pts[i - 1]', 'pts[i]', '(255, 255, 255)', '(7)'], {}), '(blackboard, pts[i - 1], pts[i], (255, 255, 255), 7)\n', (3190, 3242), False, 'import cv2\n'), ((3263, 3312), 'cv2.line', 'cv2.line', (['img', 'pts[i - 1]', 'pts[i]', '(0, 0, 255)', '(2)'], {}), '(img, pts[i - 1], pts[i], (0, 0, 255), 2)\n', (3271, 3312), False, 'import cv2\n'), ((3458, 3502), 'cv2.cvtColor', 'cv2.cvtColor', (['blackboard', 'cv2.COLOR_BGR2GRAY'], {}), '(blackboard, cv2.COLOR_BGR2GRAY)\n', (3470, 3502), False, 'import cv2\n'), ((3553, 3588), 'cv2.medianBlur', 'cv2.medianBlur', (['blackboard_gray', '(15)'], {}), '(blackboard_gray, 15)\n', (3567, 3588), False, 'import cv2\n'), ((3613, 3647), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['blur1', '(5, 5)', '(0)'], {}), '(blur1, (5, 5), 0)\n', (3629, 3647), False, 'import cv2\n'), ((3674, 3739), 'cv2.threshold', 'cv2.threshold', (['blur1', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(blur1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (3687, 3739), False, 'import cv2\n'), ((4037, 4057), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (4052, 4057), False, 'import cv2\n')]
|
from itertools import permutations
import numpy as np
import pytest
from pyomeca import Angles, Rototrans, Markers
SEQ = (
["".join(p) for i in range(1, 4) for p in permutations("xyz", i)]
+ ["zyzz"]
+ ["zxz"]
)
SEQ = [s for s in SEQ if s not in ["yxz"]]
EPSILON = 1e-12
ANGLES = Angles(np.random.rand(4, 1, 100))
@pytest.mark.parametrize("seq", SEQ)
def test_euler2rot_rot2euleur(seq, angles=ANGLES, epsilon=EPSILON):
if seq == "zyzz":
angles_to_test = angles[:3, ...]
else:
angles_to_test = angles[: len(seq), ...]
r = Rototrans.from_euler_angles(angles=angles_to_test, angle_sequence=seq)
a = Angles.from_rototrans(rototrans=r, angle_sequence=seq)
np.testing.assert_array_less((a - angles_to_test).meca.abs().sum(), epsilon)
def test_construct_rt():
eye = Rototrans()
np.testing.assert_equal(eye.time.size, 1)
np.testing.assert_equal(eye.sel(time=0), np.eye(4))
eye = Rototrans.from_euler_angles()
np.testing.assert_equal(eye.time.size, 1)
np.testing.assert_equal(eye.sel(time=0), np.eye(4))
# Test the way to create a rt, but not when providing bot angles and sequence
nb_frames = 10
random_vector = Angles(np.random.rand(3, 1, nb_frames))
# with angles
rt_random_angles = Rototrans.from_euler_angles(
angles=random_vector, angle_sequence="xyz"
)
np.testing.assert_equal(rt_random_angles.time.size, nb_frames)
np.testing.assert_equal(
rt_random_angles[:-1, -1:, :], np.zeros((3, 1, nb_frames))
) # Translation is 0
# with translation
rt_random_translation = Rototrans.from_euler_angles(translations=random_vector)
np.testing.assert_equal(rt_random_translation.time.size, nb_frames)
np.testing.assert_equal(
rt_random_translation[:3, :3, :],
np.repeat(np.eye(3)[:, :, np.newaxis], nb_frames, axis=2),
) # rotation is eye3
np.arange(0, rt_random_angles.time.size / 0.5, 1 / 0.5)
rt_with_time = Rototrans(
rt_random_angles, time=np.arange(0, rt_random_angles.time.size / 100, 1 / 100),
)
assert rt_with_time.time[-1] == 0.09
with pytest.raises(IndexError):
Rototrans(data=np.zeros(1))
with pytest.raises(IndexError):
Rototrans.from_euler_angles(
angles=random_vector[..., :5],
translations=random_vector,
angle_sequence="x",
)
with pytest.raises(IndexError):
Rototrans.from_euler_angles(angles=random_vector, angle_sequence="x")
with pytest.raises(ValueError):
Rototrans.from_euler_angles(angles=random_vector, angle_sequence="nop")
def test_rt_from_markers():
all_m = Markers.from_random_data()
rt_xy = Rototrans.from_markers(
origin=all_m.isel(channel=[0]),
axis_1=all_m.isel(channel=[0, 1]),
axis_2=all_m.isel(channel=[0, 2]),
axes_name="xy",
axis_to_recalculate="y",
)
rt_yx = Rototrans.from_markers(
origin=all_m.isel(channel=[0]),
axis_1=all_m.isel(channel=[0, 2]),
axis_2=all_m.isel(channel=[0, 1]),
axes_name="yx",
axis_to_recalculate="y",
)
rt_xy_x_recalc = Rototrans.from_markers(
origin=all_m.isel(channel=[0]),
axis_1=all_m.isel(channel=[0, 1]),
axis_2=all_m.isel(channel=[0, 2]),
axes_name="yx",
axis_to_recalculate="x",
)
rt_xy_x_recalc = rt_xy_x_recalc.isel(col=[1, 0, 2, 3])
rt_xy_x_recalc[:, 2, :] = -rt_xy_x_recalc[:, 2, :]
rt_yz = Rototrans.from_markers(
origin=all_m.isel(channel=[0]),
axis_1=all_m.isel(channel=[0, 1]),
axis_2=all_m.isel(channel=[0, 2]),
axes_name="yz",
axis_to_recalculate="z",
)
rt_zy = Rototrans.from_markers(
origin=all_m.isel(channel=[0]),
axis_1=all_m.isel(channel=[0, 2]),
axis_2=all_m.isel(channel=[0, 1]),
axes_name="zy",
axis_to_recalculate="z",
)
rt_xy_from_yz = rt_yz.isel(col=[1, 2, 0, 3])
rt_xz = Rototrans.from_markers(
origin=all_m.isel(channel=[0]),
axis_1=all_m.isel(channel=[0, 1]),
axis_2=all_m.isel(channel=[0, 2]),
axes_name="xz",
axis_to_recalculate="z",
)
rt_zx = Rototrans.from_markers(
origin=all_m.isel(channel=[0]),
axis_1=all_m.isel(channel=[0, 2]),
axis_2=all_m.isel(channel=[0, 1]),
axes_name="zx",
axis_to_recalculate="z",
)
rt_xy_from_zx = rt_xz.isel(col=[0, 2, 1, 3])
rt_xy_from_zx[:, 2, :] = -rt_xy_from_zx[:, 2, :]
np.testing.assert_array_equal(rt_xy, rt_xy_x_recalc)
np.testing.assert_array_equal(rt_xy, rt_yx)
np.testing.assert_array_equal(rt_yz, rt_zy)
np.testing.assert_array_equal(rt_xz, rt_zx)
np.testing.assert_array_equal(rt_xy, rt_xy_from_yz)
np.testing.assert_array_equal(rt_xy, rt_xy_from_zx)
# Produce one that we know the solution
ref_m = Markers(np.array(((1, 2, 3), (4, 5, 6), (6, 5, 4))).T[:, :, np.newaxis])
rt_xy_from_known_m = Rototrans.from_markers(
origin=ref_m.isel(channel=[0]),
axis_1=ref_m.isel(channel=[0, 1]),
axis_2=ref_m.isel(channel=[0, 2]),
axes_name="xy",
axis_to_recalculate="y",
)
rt_xy_expected = Rototrans(
np.array(
[
[0.5773502691896257, 0.7071067811865475, -0.408248290463863, 1.0],
[0.5773502691896257, 0.0, 0.816496580927726, 2.0],
[0.5773502691896257, -0.7071067811865475, -0.408248290463863, 3.0],
[0, 0, 0, 1.0],
]
)
)
np.testing.assert_array_equal(rt_xy_from_known_m, rt_xy_expected)
exception_default_params = dict(
origin=all_m.isel(channel=[0]),
axis_1=all_m.isel(channel=[0, 1]),
axis_2=all_m.isel(channel=[0, 2]),
axes_name="xy",
axis_to_recalculate="y",
)
with pytest.raises(ValueError):
Rototrans.from_markers(
**{**exception_default_params, **dict(origin=all_m.isel(channel=[0, 1]))}
)
with pytest.raises(ValueError):
Rototrans.from_markers(
**{**exception_default_params, **dict(axis_1=all_m.isel(channel=[0]))}
)
with pytest.raises(ValueError):
Rototrans.from_markers(
**{**exception_default_params, **dict(axis_2=all_m.isel(channel=[0]))}
)
with pytest.raises(ValueError):
Rototrans.from_markers(
**{
**exception_default_params,
**dict(axis_1=all_m.isel(channel=[0, 1], time=slice(None, 50))),
}
)
with pytest.raises(ValueError):
Rototrans.from_markers(**{**exception_default_params, **dict(axes_name="yyz")})
with pytest.raises(ValueError):
Rototrans.from_markers(**{**exception_default_params, **dict(axes_name="xxz")})
with pytest.raises(ValueError):
Rototrans.from_markers(**{**exception_default_params, **dict(axes_name="zzz")})
with pytest.raises(ValueError):
Rototrans.from_markers(
**{**exception_default_params, **dict(axis_to_recalculate="h")}
)
def test_rt_transpose():
n_frames = 10
angles = Angles.from_random_data(size=(3, 1, n_frames))
rt = Rototrans.from_euler_angles(angles, angle_sequence="xyz")
rt_t = Rototrans.from_transposed_rototrans(rt)
rt_t_expected = np.zeros((4, 4, n_frames))
rt_t_expected[3, 3, :] = 1
for row in range(rt.row.size):
for col in range(rt.col.size):
for frame in range(rt.time.size):
rt_t_expected[col, row, frame] = rt[row, col, frame]
for frame in range(rt.time.size):
rt_t_expected[:3, 3, frame] = -rt_t_expected[:3, :3, frame].dot(
rt[:3, 3, frame]
)
np.testing.assert_array_almost_equal(rt_t, rt_t_expected, decimal=10)
def test_average_rt():
# TODO: investigate why this does not work
# angles = Angles.from_random_data(size=(3, 1, 100))
# or
# angles = Angles(np.arange(300).reshape((3, 1, 100)))
angles = Angles(np.random.rand(3, 1, 100))
seq = "xyz"
rt = Rototrans.from_euler_angles(angles, seq)
rt_mean = Rototrans.from_averaged_rototrans(rt)
angles_mean = Angles.from_rototrans(rt_mean, seq).isel(time=0)
angles_mean_ref = Angles.from_rototrans(rt, seq).mean(dim="time")
np.testing.assert_array_almost_equal(angles_mean, angles_mean_ref, decimal=2)
|
[
"pyomeca.Markers.from_random_data",
"numpy.eye",
"pyomeca.Angles.from_rototrans",
"numpy.testing.assert_array_equal",
"itertools.permutations",
"pyomeca.Rototrans.from_euler_angles",
"numpy.zeros",
"pyomeca.Rototrans.from_averaged_rototrans",
"pytest.raises",
"pyomeca.Angles.from_random_data",
"numpy.arange",
"numpy.array",
"numpy.testing.assert_equal",
"pyomeca.Rototrans.from_transposed_rototrans",
"numpy.random.rand",
"pytest.mark.parametrize",
"numpy.testing.assert_array_almost_equal",
"pyomeca.Rototrans"
] |
[((332, 367), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""seq"""', 'SEQ'], {}), "('seq', SEQ)\n", (355, 367), False, 'import pytest\n'), ((302, 327), 'numpy.random.rand', 'np.random.rand', (['(4)', '(1)', '(100)'], {}), '(4, 1, 100)\n', (316, 327), True, 'import numpy as np\n'), ((566, 636), 'pyomeca.Rototrans.from_euler_angles', 'Rototrans.from_euler_angles', ([], {'angles': 'angles_to_test', 'angle_sequence': 'seq'}), '(angles=angles_to_test, angle_sequence=seq)\n', (593, 636), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((645, 699), 'pyomeca.Angles.from_rototrans', 'Angles.from_rototrans', ([], {'rototrans': 'r', 'angle_sequence': 'seq'}), '(rototrans=r, angle_sequence=seq)\n', (666, 699), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((819, 830), 'pyomeca.Rototrans', 'Rototrans', ([], {}), '()\n', (828, 830), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((835, 876), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['eye.time.size', '(1)'], {}), '(eye.time.size, 1)\n', (858, 876), True, 'import numpy as np\n'), ((944, 973), 'pyomeca.Rototrans.from_euler_angles', 'Rototrans.from_euler_angles', ([], {}), '()\n', (971, 973), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((978, 1019), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['eye.time.size', '(1)'], {}), '(eye.time.size, 1)\n', (1001, 1019), True, 'import numpy as np\n'), ((1280, 1351), 'pyomeca.Rototrans.from_euler_angles', 'Rototrans.from_euler_angles', ([], {'angles': 'random_vector', 'angle_sequence': '"""xyz"""'}), "(angles=random_vector, angle_sequence='xyz')\n", (1307, 1351), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((1370, 1432), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['rt_random_angles.time.size', 'nb_frames'], {}), '(rt_random_angles.time.size, nb_frames)\n', (1393, 1432), True, 'import numpy as np\n'), ((1607, 1662), 'pyomeca.Rototrans.from_euler_angles', 'Rototrans.from_euler_angles', ([], {'translations': 'random_vector'}), '(translations=random_vector)\n', (1634, 1662), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((1667, 1734), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['rt_random_translation.time.size', 'nb_frames'], {}), '(rt_random_translation.time.size, nb_frames)\n', (1690, 1734), True, 'import numpy as np\n'), ((1903, 1958), 'numpy.arange', 'np.arange', (['(0)', '(rt_random_angles.time.size / 0.5)', '(1 / 0.5)'], {}), '(0, rt_random_angles.time.size / 0.5, 1 / 0.5)\n', (1912, 1958), True, 'import numpy as np\n'), ((2671, 2697), 'pyomeca.Markers.from_random_data', 'Markers.from_random_data', ([], {}), '()\n', (2695, 2697), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((4559, 4611), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['rt_xy', 'rt_xy_x_recalc'], {}), '(rt_xy, rt_xy_x_recalc)\n', (4588, 4611), True, 'import numpy as np\n'), ((4616, 4659), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['rt_xy', 'rt_yx'], {}), '(rt_xy, rt_yx)\n', (4645, 4659), True, 'import numpy as np\n'), ((4664, 4707), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['rt_yz', 'rt_zy'], {}), '(rt_yz, rt_zy)\n', (4693, 4707), True, 'import numpy as np\n'), ((4712, 4755), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['rt_xz', 'rt_zx'], {}), '(rt_xz, rt_zx)\n', (4741, 4755), True, 'import numpy as np\n'), ((4760, 4811), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['rt_xy', 'rt_xy_from_yz'], {}), '(rt_xy, rt_xy_from_yz)\n', (4789, 4811), True, 'import numpy as np\n'), ((4816, 4867), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['rt_xy', 'rt_xy_from_zx'], {}), '(rt_xy, rt_xy_from_zx)\n', (4845, 4867), True, 'import numpy as np\n'), ((5602, 5667), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['rt_xy_from_known_m', 'rt_xy_expected'], {}), '(rt_xy_from_known_m, rt_xy_expected)\n', (5631, 5667), True, 'import numpy as np\n'), ((7205, 7251), 'pyomeca.Angles.from_random_data', 'Angles.from_random_data', ([], {'size': '(3, 1, n_frames)'}), '(size=(3, 1, n_frames))\n', (7228, 7251), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((7261, 7318), 'pyomeca.Rototrans.from_euler_angles', 'Rototrans.from_euler_angles', (['angles'], {'angle_sequence': '"""xyz"""'}), "(angles, angle_sequence='xyz')\n", (7288, 7318), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((7331, 7370), 'pyomeca.Rototrans.from_transposed_rototrans', 'Rototrans.from_transposed_rototrans', (['rt'], {}), '(rt)\n', (7366, 7370), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((7392, 7418), 'numpy.zeros', 'np.zeros', (['(4, 4, n_frames)'], {}), '((4, 4, n_frames))\n', (7400, 7418), True, 'import numpy as np\n'), ((7795, 7864), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['rt_t', 'rt_t_expected'], {'decimal': '(10)'}), '(rt_t, rt_t_expected, decimal=10)\n', (7831, 7864), True, 'import numpy as np\n'), ((8135, 8175), 'pyomeca.Rototrans.from_euler_angles', 'Rototrans.from_euler_angles', (['angles', 'seq'], {}), '(angles, seq)\n', (8162, 8175), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((8190, 8227), 'pyomeca.Rototrans.from_averaged_rototrans', 'Rototrans.from_averaged_rototrans', (['rt'], {}), '(rt)\n', (8223, 8227), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((8371, 8448), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['angles_mean', 'angles_mean_ref'], {'decimal': '(2)'}), '(angles_mean, angles_mean_ref, decimal=2)\n', (8407, 8448), True, 'import numpy as np\n'), ((922, 931), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (928, 931), True, 'import numpy as np\n'), ((1065, 1074), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1071, 1074), True, 'import numpy as np\n'), ((1205, 1236), 'numpy.random.rand', 'np.random.rand', (['(3)', '(1)', 'nb_frames'], {}), '(3, 1, nb_frames)\n', (1219, 1236), True, 'import numpy as np\n'), ((1501, 1528), 'numpy.zeros', 'np.zeros', (['(3, 1, nb_frames)'], {}), '((3, 1, nb_frames))\n', (1509, 1528), True, 'import numpy as np\n'), ((2135, 2160), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (2148, 2160), False, 'import pytest\n'), ((2208, 2233), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (2221, 2233), False, 'import pytest\n'), ((2243, 2354), 'pyomeca.Rototrans.from_euler_angles', 'Rototrans.from_euler_angles', ([], {'angles': 'random_vector[..., :5]', 'translations': 'random_vector', 'angle_sequence': '"""x"""'}), "(angles=random_vector[..., :5], translations=\n random_vector, angle_sequence='x')\n", (2270, 2354), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((2407, 2432), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (2420, 2432), False, 'import pytest\n'), ((2442, 2511), 'pyomeca.Rototrans.from_euler_angles', 'Rototrans.from_euler_angles', ([], {'angles': 'random_vector', 'angle_sequence': '"""x"""'}), "(angles=random_vector, angle_sequence='x')\n", (2469, 2511), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((2522, 2547), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2535, 2547), False, 'import pytest\n'), ((2557, 2628), 'pyomeca.Rototrans.from_euler_angles', 'Rototrans.from_euler_angles', ([], {'angles': 'random_vector', 'angle_sequence': '"""nop"""'}), "(angles=random_vector, angle_sequence='nop')\n", (2584, 2628), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((5277, 5497), 'numpy.array', 'np.array', (['[[0.5773502691896257, 0.7071067811865475, -0.408248290463863, 1.0], [\n 0.5773502691896257, 0.0, 0.816496580927726, 2.0], [0.5773502691896257, \n -0.7071067811865475, -0.408248290463863, 3.0], [0, 0, 0, 1.0]]'], {}), '([[0.5773502691896257, 0.7071067811865475, -0.408248290463863, 1.0],\n [0.5773502691896257, 0.0, 0.816496580927726, 2.0], [0.5773502691896257,\n -0.7071067811865475, -0.408248290463863, 3.0], [0, 0, 0, 1.0]])\n', (5285, 5497), True, 'import numpy as np\n'), ((5904, 5929), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5917, 5929), False, 'import pytest\n'), ((6069, 6094), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6082, 6094), False, 'import pytest\n'), ((6231, 6256), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6244, 6256), False, 'import pytest\n'), ((6393, 6418), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6406, 6418), False, 'import pytest\n'), ((6627, 6652), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6640, 6652), False, 'import pytest\n'), ((6752, 6777), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6765, 6777), False, 'import pytest\n'), ((6877, 6902), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6890, 6902), False, 'import pytest\n'), ((7002, 7027), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7015, 7027), False, 'import pytest\n'), ((8082, 8107), 'numpy.random.rand', 'np.random.rand', (['(3)', '(1)', '(100)'], {}), '(3, 1, 100)\n', (8096, 8107), True, 'import numpy as np\n'), ((2021, 2076), 'numpy.arange', 'np.arange', (['(0)', '(rt_random_angles.time.size / 100)', '(1 / 100)'], {}), '(0, rt_random_angles.time.size / 100, 1 / 100)\n', (2030, 2076), True, 'import numpy as np\n'), ((8246, 8281), 'pyomeca.Angles.from_rototrans', 'Angles.from_rototrans', (['rt_mean', 'seq'], {}), '(rt_mean, seq)\n', (8267, 8281), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((8318, 8348), 'pyomeca.Angles.from_rototrans', 'Angles.from_rototrans', (['rt', 'seq'], {}), '(rt, seq)\n', (8339, 8348), False, 'from pyomeca import Angles, Rototrans, Markers\n'), ((172, 194), 'itertools.permutations', 'permutations', (['"""xyz"""', 'i'], {}), "('xyz', i)\n", (184, 194), False, 'from itertools import permutations\n'), ((1824, 1833), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1830, 1833), True, 'import numpy as np\n'), ((2185, 2196), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (2193, 2196), True, 'import numpy as np\n'), ((4933, 4976), 'numpy.array', 'np.array', (['((1, 2, 3), (4, 5, 6), (6, 5, 4))'], {}), '(((1, 2, 3), (4, 5, 6), (6, 5, 4)))\n', (4941, 4976), True, 'import numpy as np\n')]
|
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010-2014 <NAME>, <NAME>, <NAME>,
# <NAME>. All rights reserved.
# Copyright (C) 2011-2014 <NAME>
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# -*- coding: utf-8 -*-
from casadi import *
import matplotlib.pyplot as plt
import numpy
# Sailboat model based on
#
# [MF2011]:
# <NAME>, <NAME>
# "Tacking Simulation of Sailing Yachts with New Model of Aerodynamic
# Force Variation During Tacking Maneuver"
# Journal of Sailboat Technology, Article 2011-01
#
# <NAME>, UW Madison 2017
#
# Create DaeBuilder instance
dae = DaeBuilder()
# Load file with external functions
from os import path
curr_dir = path.dirname(path.abspath(__file__))
clib = Importer(curr_dir + '/sailboat_c.c', 'none')
# Options for external functions
# NOTE: These options should become redundant once code is more stable
external_opts = dict(enable_jacobian = False, enable_forward = False, \
enable_reverse = False, enable_fd = True)
# Physical constants
g = 9.81 # [m/s^2] gravity
rho = 1027. # p[kg/m^3] density of ocean water
# Sailboat model constants. Cf. Table 1 [MF2011]
L = 8.80 # [m] Length of design waterline
D = 2.02 # [m] Design draft, including fin keel
m = 4410. # [kg] Displacement
GM = 1.45 # [m] metacentric height of boat
m_x = 160.; m_y_hull = 2130.; m_y_sail = 280.; m_z = 12000. # [kg] Added masses
Ixx = 17700.; Iyy = 33100.; Izz = 17200. # [kg m^2] Moments of inertia
Jxx_hull = 7200.; Jxx_sail = 8100.; Jyy = 42400.; Jzz = 6700. # [kg m^2] Added moments of inertia
X_pVV = 3.38e-1
X_pPP = 1.40e-3
X_pVVVV = -1.84
X_pT = -1.91e-2
Y_pV = -5.35e-1
Y_pP = -5.89e-3
Y_pVPP = 7.37e-1
Y_pVVP = -5.53e-1
Y_pVVV = 3.07
Y_pP = 2.19e-1
Y_pT = -4.01e-3
K_pV = 2.80e-1
K_pP = 3.36e-3
K_pVPP = -4.07e-1
K_pVVP = 2.24e-1
K_pVVV = -1.38
K_pT = -3.53e-1
N_pV = -3.23e-2
N_pP = -1.52e-2
N_pVPP = 2.71e-4
N_pVVP = -9.06e-2
N_pVVV = -2.98e-2
N_pT = -5.89e-3
C_Xd = -3.79e-2
C_Yd = -1.80e-1
C_Kd = 9.76e-2
C_Nd = 9.74e-2
# States
U = dae.add_x('U') # Velocity along the X axis
V = dae.add_x('V') # Velocity along the Y axis
phi = dae.add_x('phi') # Roll angle
theta = dae.add_x('theta') # Yaw angle
dphi = dae.add_x('dphi') # Time derivative of phi
dtheta = dae.add_x('dtheta') # Time derivative of theta
# Controls
beta = dae.add_u('beta')
# Sum contributions from hull and sail (?)
m_y = m_y_hull + m_y_sail
Jxx = Jxx_hull + Jxx_sail
# Auxiliary variables
# Squared boat velocity
V_B2 = U**2 + V**2
# To avoid duplicate expressions
cos_phi = cos(phi)
sin_phi = sin(phi)
cos2_phi = cos_phi**2
sin2_phi = sin_phi**2
phi2 = phi**2
# Hull resistance in the upright position
X_0_fun = dae.add_fun('hull_resistance', clib, external_opts)
X_0 = X_0_fun(U)
# Calculate hydrodynamic forces
V_p = sin(beta)
V_p2 = V_p**2
V_p3 = V_p2*V_p
V_p4 = V_p2**2
H_fact = 0.5*rho*V_B2*L*D
dae.add_d('X_H', (X_pVV*V_p2 + X_pPP*phi2 + X_pVVVV*V_p4)*H_fact)
dae.add_d('Y_H', (Y_pV*V_p + Y_pP*phi + Y_pVPP*V_p*phi2 + Y_pVVP*V_p2*phi + Y_pVVV*V_p3)*H_fact)
dae.add_d('K_H', (K_pV*V_p + K_pP*phi + K_pVPP*V_p*phi2 + K_pVVP*V_p2*phi + K_pVVV*V_p3)*H_fact*D)
dae.add_d('N_H', (N_pV*V_p + N_pP*phi + N_pVPP*V_p*phi2 + N_pVVP*V_p2*phi + N_pVVV*V_p3)*H_fact*L)
H = dae.add_fun('H', ['phi', 'beta', 'U', 'V'], ['X_H', 'Y_H', 'K_H', 'N_H'])
# Plot it for reference
ngrid_phi = 100; ngrid_beta = 100
U0 = 5.; V0 = 5. # [m/s] Boat speed for simulation
phi0 = numpy.linspace(-pi/4, pi/4, ngrid_phi)
beta0 = numpy.linspace(-pi/4, pi/4, ngrid_beta)
PHI0,BETA0 = numpy.meshgrid(phi0, beta0)
r = H(phi=PHI0, beta=BETA0, U=U0, V=V0)
for i,c in enumerate(['X_H', 'Y_H', 'K_H', 'N_H']):
plt.subplot(2,2,i+1)
CS = plt.contour(PHI0*180/pi, BETA0*180/pi, log10(r[c]))
plt.clabel(CS, inline=1, fontsize=10)
plt.title('log10(' + c + ')')
plt.grid(True)
# Make a function call
X_H, Y_H, K_H, N_H = H(phi, beta, U, V)
# Hydrodynamic derivatives of the hull due to yawing motion
X_VT = 0. # Neglected
Y_T = 0. # Neglected
N_T = 0. # Neglected
# Derivative due to rolling
Y_P = 0. # Neglected
K_P = 0. # Neglected
# Hydrodynamic forces on the rudder
X_R = 0. # Neglected
Y_R = 0. # Neglected
K_R = 0. # Neglected
N_R = 0. # Neglected
# Sail forces
X_S = 0. # Neglected
Y_S = 0. # Neglected
K_S = 0. # Neglected
N_S = 0. # Neglected
# Surge: (m+m_x)*dot(U) = F_X, cf. (3) [MF2011]
F_X = X_0 + X_H + X_R + X_S \
+ (m + m_y*cos2_phi + m_z*sin2_phi + X_VT)*V*dtheta
dae.add_ode("surge", F_X / (m+m_x))
# Sway: (m + m_y*cos2_phi + m_z*sin2_phi)*dot(V) = F_Y
F_Y = Y_H + Y_P*dphi + Y_T*dtheta + Y_R + Y_S \
- (m + m_x)*U*dtheta \
- 2*(m_z - m_y)*sin_phi*cos_phi*V*dphi
dae.add_ode("sway", F_Y / (m + m_y*cos2_phi + m_z*sin2_phi))
# Roll: (Ixx + Jxx)*dot(dphi) = F_K
F_K = K_H + K_P*dphi + K_R + K_S - m*g*GM*sin_phi \
+ ((Iyy+Jyy)-(Izz+Jzz))*sin_phi*cos_phi*dtheta**2
dae.add_ode("roll", F_K / (Ixx + Jxx))
# Yaw: ((Iyy+Jyy)*sin2_phi + (Izz+Jzz)*cos2_phi)*dot(dtheta) = F_N
F_N = N_H + N_T*dtheta + N_R + N_S \
-2*((Iyy+Jyy)-(Izz+Jzz))*sin_phi*cos_phi*dtheta*dphi
dae.add_ode("yaw", F_N / ((Iyy+Jyy)*sin2_phi + (Izz+Jzz)*cos2_phi))
# Roll angle
dae.add_ode("roll_angle", dphi)
# Yaw angle
dae.add_ode("yaw_angle", dtheta)
# Print ODE
print(dae)
# Generate Jacobian of ODE rhs w.r.t. to states and control
Jfcn = dae.create("Jfcn", ['x', 'u'], ['jac_ode_x', 'jac_ode_u'])
Jfcn_file = Jfcn.generate()
print('Jacobian function saved to ' + Jfcn_file)
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"os.path.abspath",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.clabel",
"numpy.linspace",
"matplotlib.pyplot.grid"
] |
[((4203, 4245), 'numpy.linspace', 'numpy.linspace', (['(-pi / 4)', '(pi / 4)', 'ngrid_phi'], {}), '(-pi / 4, pi / 4, ngrid_phi)\n', (4217, 4245), False, 'import numpy\n'), ((4250, 4293), 'numpy.linspace', 'numpy.linspace', (['(-pi / 4)', '(pi / 4)', 'ngrid_beta'], {}), '(-pi / 4, pi / 4, ngrid_beta)\n', (4264, 4293), False, 'import numpy\n'), ((4303, 4330), 'numpy.meshgrid', 'numpy.meshgrid', (['phi0', 'beta0'], {}), '(phi0, beta0)\n', (4317, 4330), False, 'import numpy\n'), ((6223, 6233), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6231, 6233), True, 'import matplotlib.pyplot as plt\n'), ((1481, 1503), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (1493, 1503), False, 'from os import path\n'), ((4427, 4451), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(i + 1)'], {}), '(2, 2, i + 1)\n', (4438, 4451), True, 'import matplotlib.pyplot as plt\n'), ((4513, 4550), 'matplotlib.pyplot.clabel', 'plt.clabel', (['CS'], {'inline': '(1)', 'fontsize': '(10)'}), '(CS, inline=1, fontsize=10)\n', (4523, 4550), True, 'import matplotlib.pyplot as plt\n'), ((4555, 4584), 'matplotlib.pyplot.title', 'plt.title', (["('log10(' + c + ')')"], {}), "('log10(' + c + ')')\n", (4564, 4584), True, 'import matplotlib.pyplot as plt\n'), ((4589, 4603), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4597, 4603), True, 'import matplotlib.pyplot as plt\n')]
|
import tensorflow as tf
import numpy as np
import os
import time
import argparse
import imageio
parser = argparse.ArgumentParser()
parser.add_argument("--training", type=int, default=1, help="training or testing")
parser.add_argument("--testdir", type=str, default=None, help="specify log file dir")
parser.add_argument("--testnum", type=int, default=-1, help="specify file name")
parser.add_argument("--modelnum", type=int, default=-1, help="specify model name")
parser.add_argument("--basePath", type=str, default="", help="specify base path")
parser.add_argument("--batchsize", type=int, default=64, help="set batch size")
parser.add_argument("--epochnum", type=int, default=100, help="set training epochs")
parser.add_argument("--learningrate", type=float, default=0.0001, help="set learning rate")
parser.add_argument("--maxsave", type=int, default=5, help="set saving number")
parser.add_argument("--rrfactor", type=float, default=0.0, help="set factor for rr term")
parser.add_argument("--orthofactor", type=float, default=0.0, help="set factor for orthogonal term")
parser.add_argument("--runfile", type=str, default="run.py", help="specify run file for copy")
args = parser.parse_args()
if (not args.training):
if args.testnum < 0 or args.modelnum < 0:
print("specify --testnum and --modelnum for testing!")
exit()
if args.testdir:
folderpre = args.testdir
else:
folderpre = "default"
BATCH_SIZE = 2
if not args.training:
BATCH_SIZE = 1
CLASS_NUM = 10
EPOCHS = args.epochnum
learningratevalue = args.learningrate
maxToKeep = args.maxsave
epsilon = 1e-6
imagewidth = 28
imageheight = 28
def makedir():
count = 0
currentdir = os.getcwd()+"/"
while os.path.exists(args.basePath+folderpre+"/test_%04d/"%count):
count += 1
targetdir = args.basePath+folderpre+"/test_%04d/"%count
os.makedirs(targetdir)
return targetdir
test_path = makedir()
testf = open(test_path + "testaccuracy.txt",'a+')
trainf = open(test_path + "trainloss.txt",'a+')
timef = open(test_path + "elapsedtime.txt",'a+')
os.system("cp %s %s/%s"%(args.runfile,test_path,args.runfile))
os.system("cp %s %s/%s"%(__file__,test_path,__file__))
# training data
num1, num2 = 0,1
x_train0 = np.reshape(imageio.imread("MNIST/%d.png"%num1),[1,imagewidth*imageheight])
x_train1 = np.reshape(imageio.imread("MNIST/%d.png"%num2),[1,imagewidth*imageheight])
y_train0 = np.zeros([1,10])
y_train1 = np.zeros([1,10])
y_train0[0,num1]=1
y_train1[0,num2]=1
x_train = np.concatenate((x_train0,x_train1),axis=0)
y_train = np.concatenate((y_train0,y_train1),axis=0)
# testing data
x_test0 = np.reshape(imageio.imread("MNIST/%d_test.png"%num1),[1,imagewidth*imageheight])
x_test1 = np.reshape(imageio.imread("MNIST/%d_test.png"%num2),[1,imagewidth*imageheight])
x_test = np.concatenate((x_test0,x_test1),axis=0)
y_test = y_train
TOTALWEIGHT = 0
def weight_variable(name, shape):
var = tf.get_variable(name,shape,initializer = tf.glorot_uniform_initializer())
global TOTALWEIGHT
if len(shape) == 4:
print("Convolutional layer: {}".format(shape))
TOTALWEIGHT += shape[0]*shape[1]*shape[2]*shape[3]
if len(shape) == 2:
print("fully connected layer: {}".format(shape))
TOTALWEIGHT += shape[0]*shape[1]
return var
def bias_variable(name, shape):
global TOTALWEIGHT
TOTALWEIGHT += shape[0]
return tf.get_variable(name,shape,initializer = tf.zeros_initializer())
def conv2d(x, W, padding = 'SAME',strides=[1,1,1,1]):
return tf.nn.conv2d(x, W, strides=strides, padding=padding)
def batch_norm(input, reuse=False, is_training=args.training):
return tf.contrib.layers.batch_norm(input, decay=0.9, center=True, scale=True, epsilon=1e-3,
is_training=is_training, updates_collections=None, scope=tf.get_variable_scope(), reuse = reuse)
def l2_reg_ortho(weight):
reg = tf.constant(0.)
Wshape = weight.get_shape()
if np.size(weight.get_shape().as_list()) == 2:
cols = int(Wshape[1])
else:
cols = int(Wshape[1]*Wshape[2]*Wshape[3])
rows = int(Wshape[0])
w1 = tf.reshape(weight,[-1,cols])
wt = tf.transpose(w1)
m = tf.matmul(wt,w1)
ident = tf.eye(cols,num_columns=cols)
w_tmp = (m - ident)
height = w_tmp.get_shape().as_list()[0]
u = tf.nn.l2_normalize(tf.random_normal([height,1]),dim=0,epsilon=1e-12)
v = tf.nn.l2_normalize(tf.matmul(tf.transpose(w_tmp), u), dim=0,epsilon=1e-12)
u = tf.nn.l2_normalize(tf.matmul(w_tmp, v), dim=0,epsilon=1e-12)
sigma = tf.norm(tf.reshape(tf.keras.backend.dot(tf.transpose(u), tf.matmul(w_tmp, v)),[-1]))
reg+=sigma**2
return reg
x = tf.placeholder(tf.float32, [None,imagewidth*imageheight])
y = tf.placeholder(tf.float32, [None,CLASS_NUM])
lr = tf.placeholder(tf.float32)
# forward pass
W_conv1 = weight_variable("W_conv1",[imagewidth*imageheight,CLASS_NUM])
b_conv1 = bias_variable("b_conv1",[CLASS_NUM])
fcout = tf.matmul(x, W_conv1) + b_conv1
# backward pass
back_input = tf.matmul((fcout-b_conv1),tf.transpose(W_conv1))
prediction = tf.reshape(tf.nn.softmax(fcout),[-1,CLASS_NUM])
# calculate the loss
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
loss = cross_entropy
if args.orthofactor != 0:
loss = loss + args.orthofactor*l2_reg_ortho(W_conv1)
if args.rrfactor != 0:
loss = loss + args.rrfactor * tf.reduce_mean(tf.nn.l2_loss(back_input - x))
correct_prediction = tf.equal(tf.argmax(prediction,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
train_step = tf.train.AdamOptimizer(lr).minimize(loss)
# init session
sess = tf.Session()
saver = tf.train.Saver(max_to_keep=maxToKeep)
sess.run(tf.global_variables_initializer())
if args.testnum >= 0 and args.modelnum >=0:
loadpath = args.basePath+folderpre+"/test_%04d/model_%04d.ckpt"%(args.testnum,args.modelnum)
saver.restore(sess,loadpath)
print("Model restored from %s."%(loadpath))
Epochnum = int(np.shape(x_train)[0]/BATCH_SIZE)
def saveModel(test_path, save_no):
saver.save(sess, test_path+'model_%04d.ckpt'%save_no)
msg = 'saved Model %04d.'%save_no
return msg
currenttime = time.time()
testindex = 0
if args.training:
for i in range(EPOCHS * Epochnum):
cross_e,_, trainloss = sess.run([cross_entropy , train_step,loss],feed_dict={x: x_train, y: y_train, lr:learningratevalue})
if i % (Epochnum*100) == 0:
epochindex = int(i/(Epochnum*100))
testaccuracy,outputdata= sess.run([accuracy,back_input],feed_dict={x: x_test, y: y_test})
costtime = time.time()-currenttime
print("EPOCHS: %d, train loss:%f, testing accuracy:%f, time consuming:%f"%(epochindex,trainloss,testaccuracy,costtime))
print("cross_e:%f"%cross_e)
testf.write(str(epochindex)+'\t'+str(testaccuracy)+'\r\n')
trainf.write(str(epochindex)+'\t'+str(trainloss)+'\r\n')
timef.write(str(epochindex)+'\t'+str(costtime)+'\r\n')
if (epochindex+1)%2 == 0:
print(saveModel(test_path,epochindex))
# output test image
outputdata = np.reshape(outputdata,[2,28,28])
resultpath = test_path +"backwardtest_img/"
while not os.path.exists(resultpath):
os.mkdir(resultpath)
for ind in range(2):
imageio.imwrite(resultpath + 'test%d_%04d.png'%(ind,testindex),outputdata[ind].astype(np.uint8))
testindex += 1
currenttime = time.time()
|
[
"os.mkdir",
"argparse.ArgumentParser",
"tensorflow.reshape",
"tensorflow.get_variable_scope",
"numpy.shape",
"tensorflow.matmul",
"tensorflow.nn.conv2d",
"tensorflow.nn.softmax",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"os.path.exists",
"tensorflow.placeholder",
"tensorflow.cast",
"numpy.reshape",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"imageio.imread",
"tensorflow.eye",
"tensorflow.Session",
"os.system",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.random_normal",
"tensorflow.zeros_initializer",
"numpy.concatenate",
"tensorflow.glorot_uniform_initializer",
"os.makedirs",
"tensorflow.argmax",
"os.getcwd",
"numpy.zeros",
"time.time",
"tensorflow.nn.l2_loss",
"tensorflow.train.AdamOptimizer"
] |
[((106, 131), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (129, 131), False, 'import argparse\n'), ((2063, 2129), 'os.system', 'os.system', (["('cp %s %s/%s' % (args.runfile, test_path, args.runfile))"], {}), "('cp %s %s/%s' % (args.runfile, test_path, args.runfile))\n", (2072, 2129), False, 'import os\n'), ((2126, 2184), 'os.system', 'os.system', (["('cp %s %s/%s' % (__file__, test_path, __file__))"], {}), "('cp %s %s/%s' % (__file__, test_path, __file__))\n", (2135, 2184), False, 'import os\n'), ((2398, 2415), 'numpy.zeros', 'np.zeros', (['[1, 10]'], {}), '([1, 10])\n', (2406, 2415), True, 'import numpy as np\n'), ((2426, 2443), 'numpy.zeros', 'np.zeros', (['[1, 10]'], {}), '([1, 10])\n', (2434, 2443), True, 'import numpy as np\n'), ((2491, 2535), 'numpy.concatenate', 'np.concatenate', (['(x_train0, x_train1)'], {'axis': '(0)'}), '((x_train0, x_train1), axis=0)\n', (2505, 2535), True, 'import numpy as np\n'), ((2544, 2588), 'numpy.concatenate', 'np.concatenate', (['(y_train0, y_train1)'], {'axis': '(0)'}), '((y_train0, y_train1), axis=0)\n', (2558, 2588), True, 'import numpy as np\n'), ((2792, 2834), 'numpy.concatenate', 'np.concatenate', (['(x_test0, x_test1)'], {'axis': '(0)'}), '((x_test0, x_test1), axis=0)\n', (2806, 2834), True, 'import numpy as np\n'), ((4678, 4738), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, imagewidth * imageheight]'], {}), '(tf.float32, [None, imagewidth * imageheight])\n', (4692, 4738), True, 'import tensorflow as tf\n'), ((4740, 4785), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, CLASS_NUM]'], {}), '(tf.float32, [None, CLASS_NUM])\n', (4754, 4785), True, 'import tensorflow as tf\n'), ((4790, 4816), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (4804, 4816), True, 'import tensorflow as tf\n'), ((5678, 5690), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5688, 5690), True, 'import tensorflow as tf\n'), ((5699, 5736), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': 'maxToKeep'}), '(max_to_keep=maxToKeep)\n', (5713, 5736), True, 'import tensorflow as tf\n'), ((6215, 6226), 'time.time', 'time.time', ([], {}), '()\n', (6224, 6226), False, 'import time\n'), ((1704, 1769), 'os.path.exists', 'os.path.exists', (["(args.basePath + folderpre + '/test_%04d/' % count)"], {}), "(args.basePath + folderpre + '/test_%04d/' % count)\n", (1718, 1769), False, 'import os\n'), ((1848, 1870), 'os.makedirs', 'os.makedirs', (['targetdir'], {}), '(targetdir)\n', (1859, 1870), False, 'import os\n'), ((2237, 2274), 'imageio.imread', 'imageio.imread', (["('MNIST/%d.png' % num1)"], {}), "('MNIST/%d.png' % num1)\n", (2251, 2274), False, 'import imageio\n'), ((2323, 2360), 'imageio.imread', 'imageio.imread', (["('MNIST/%d.png' % num2)"], {}), "('MNIST/%d.png' % num2)\n", (2337, 2360), False, 'import imageio\n'), ((2624, 2666), 'imageio.imread', 'imageio.imread', (["('MNIST/%d_test.png' % num1)"], {}), "('MNIST/%d_test.png' % num1)\n", (2638, 2666), False, 'import imageio\n'), ((2714, 2756), 'imageio.imread', 'imageio.imread', (["('MNIST/%d_test.png' % num2)"], {}), "('MNIST/%d_test.png' % num2)\n", (2728, 2756), False, 'import imageio\n'), ((3509, 3561), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W'], {'strides': 'strides', 'padding': 'padding'}), '(x, W, strides=strides, padding=padding)\n', (3521, 3561), True, 'import tensorflow as tf\n'), ((3897, 3913), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (3908, 3913), True, 'import tensorflow as tf\n'), ((4121, 4151), 'tensorflow.reshape', 'tf.reshape', (['weight', '[-1, cols]'], {}), '(weight, [-1, cols])\n', (4131, 4151), True, 'import tensorflow as tf\n'), ((4159, 4175), 'tensorflow.transpose', 'tf.transpose', (['w1'], {}), '(w1)\n', (4171, 4175), True, 'import tensorflow as tf\n'), ((4185, 4202), 'tensorflow.matmul', 'tf.matmul', (['wt', 'w1'], {}), '(wt, w1)\n', (4194, 4202), True, 'import tensorflow as tf\n'), ((4214, 4244), 'tensorflow.eye', 'tf.eye', (['cols'], {'num_columns': 'cols'}), '(cols, num_columns=cols)\n', (4220, 4244), True, 'import tensorflow as tf\n'), ((4960, 4981), 'tensorflow.matmul', 'tf.matmul', (['x', 'W_conv1'], {}), '(x, W_conv1)\n', (4969, 4981), True, 'import tensorflow as tf\n'), ((5047, 5068), 'tensorflow.transpose', 'tf.transpose', (['W_conv1'], {}), '(W_conv1)\n', (5059, 5068), True, 'import tensorflow as tf\n'), ((5095, 5115), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['fcout'], {}), '(fcout)\n', (5108, 5115), True, 'import tensorflow as tf\n'), ((5184, 5252), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'labels': 'y', 'logits': 'prediction'}), '(labels=y, logits=prediction)\n', (5223, 5252), True, 'import tensorflow as tf\n'), ((5492, 5516), 'tensorflow.argmax', 'tf.argmax', (['prediction', '(1)'], {}), '(prediction, 1)\n', (5501, 5516), True, 'import tensorflow as tf\n'), ((5517, 5532), 'tensorflow.argmax', 'tf.argmax', (['y', '(1)'], {}), '(y, 1)\n', (5526, 5532), True, 'import tensorflow as tf\n'), ((5559, 5598), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (5566, 5598), True, 'import tensorflow as tf\n'), ((5746, 5779), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5777, 5779), True, 'import tensorflow as tf\n'), ((1678, 1689), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1687, 1689), False, 'import os\n'), ((4340, 4369), 'tensorflow.random_normal', 'tf.random_normal', (['[height, 1]'], {}), '([height, 1])\n', (4356, 4369), True, 'import tensorflow as tf\n'), ((4500, 4519), 'tensorflow.matmul', 'tf.matmul', (['w_tmp', 'v'], {}), '(w_tmp, v)\n', (4509, 4519), True, 'import tensorflow as tf\n'), ((5614, 5640), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['lr'], {}), '(lr)\n', (5636, 5640), True, 'import tensorflow as tf\n'), ((2952, 2983), 'tensorflow.glorot_uniform_initializer', 'tf.glorot_uniform_initializer', ([], {}), '()\n', (2981, 2983), True, 'import tensorflow as tf\n'), ((3419, 3441), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (3439, 3441), True, 'import tensorflow as tf\n'), ((3820, 3843), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (3841, 3843), True, 'import tensorflow as tf\n'), ((4427, 4446), 'tensorflow.transpose', 'tf.transpose', (['w_tmp'], {}), '(w_tmp)\n', (4439, 4446), True, 'import tensorflow as tf\n'), ((6020, 6037), 'numpy.shape', 'np.shape', (['x_train'], {}), '(x_train)\n', (6028, 6037), True, 'import numpy as np\n'), ((7191, 7226), 'numpy.reshape', 'np.reshape', (['outputdata', '[2, 28, 28]'], {}), '(outputdata, [2, 28, 28])\n', (7201, 7226), True, 'import numpy as np\n'), ((7566, 7577), 'time.time', 'time.time', ([], {}), '()\n', (7575, 7577), False, 'import time\n'), ((4594, 4609), 'tensorflow.transpose', 'tf.transpose', (['u'], {}), '(u)\n', (4606, 4609), True, 'import tensorflow as tf\n'), ((4611, 4630), 'tensorflow.matmul', 'tf.matmul', (['w_tmp', 'v'], {}), '(w_tmp, v)\n', (4620, 4630), True, 'import tensorflow as tf\n'), ((5430, 5459), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['(back_input - x)'], {}), '(back_input - x)\n', (5443, 5459), True, 'import tensorflow as tf\n'), ((6638, 6649), 'time.time', 'time.time', ([], {}), '()\n', (6647, 6649), False, 'import time\n'), ((7302, 7328), 'os.path.exists', 'os.path.exists', (['resultpath'], {}), '(resultpath)\n', (7316, 7328), False, 'import os\n'), ((7346, 7366), 'os.mkdir', 'os.mkdir', (['resultpath'], {}), '(resultpath)\n', (7354, 7366), False, 'import os\n')]
|
#=======================================================================================================================
# an example file on how to build special test/training cubes using nh3_testcube.py
#=======================================================================================================================
import numpy as np
import pyspeckit.spectrum.models.ammonia_constants as nh3con
from pyspeckit.spectrum.units import SpectroscopicAxis as spaxis
from astropy.utils.console import ProgressBar
import sys
import nh3_testcubes as testcubes
def generate_cubes(nCubes=100, nBorder=1, noise_rms=0.1, output_dir='random_cubes', random_seed=None,
linenames=['oneone', 'twotwo'], remove_low_sep=True, noise_class=True):
xarrList = []
lineIDList = []
for linename in linenames:
# generate spectral axis for each ammonia lines
xarr = spaxis((np.linspace(-500, 499, 1000) * 5.72e-6
+ nh3con.freq_dict[linename] / 1e9),
unit='GHz',
refX=nh3con.freq_dict[linename] / 1e9,
velocity_convention='radio', refX_unit='GHz')
xarrList.append(xarr)
# specify the ID fore each line to appear in saved fits files
if linename is 'oneone':
lineIDList.append('11')
elif linename is 'twotwo':
lineIDList.append('22')
else:
# use line names at it is for lines above (3,3)
lineIDList.append(linename)
# generate random parameters for nCubes
nComps, Temp, Width, Voff, logN = testcubes.generate_parameters(nCubes, random_seed)
gradX, gradY = testcubes.generate_gradients(nCubes, random_seed)
if noise_class:
# Creates a balanced training set with 1comp, noise, and 2comp classes
nComps = np.concatenate((np.zeros(nCubes / 3).astype(int),
np.ones(nCubes / 3).astype(int),
np.ones(nCubes / 3 + nCubes%3).astype(int) + 1))
if remove_low_sep:
Voff = remove_low_vsep(Voff, Width)
cubes = []
for xarr, lineID in zip(xarrList, lineIDList):
# generate cubes for each line specified
cubeList = []
print('----------- generating {0} lines ------------'.format(lineID))
for i in ProgressBar(range(nCubes)):
cube_i = testcubes.make_and_write(nCubes, nComps[i], i, nBorder, xarr, Temp[i], Width[i], Voff[i], logN[i], gradX[i], gradY[i]
, noise_rms, lineID, output_dir)
cubeList.append(cube_i)
cubes.append(cubeList)
return cubes
def remove_low_vsep(Voff, Width):
Voff = Voff.swapaxes(0, 1)
Voff1, Voff2 = Voff[0], Voff[1]
Width = Width.swapaxes(0, 1)
Width1, Width2 = Width[0], Width[1]
# Find where centroids are too close
too_close = np.where(np.abs(Voff1 - Voff2) < np.max(np.column_stack((Width1, Width2)), axis=1))
# Move the centroids farther apart by the length of largest line width
min_Voff = np.min(np.column_stack((Voff2[too_close], Voff1[too_close])), axis=1)
max_Voff = np.max(np.column_stack((Voff2[too_close], Voff1[too_close])), axis=1)
Voff1[too_close] = min_Voff - np.max(np.column_stack((Width1[too_close], Width2[too_close])), axis=1) / 2.
Voff2[too_close] = max_Voff + np.max(np.column_stack((Width1[too_close], Width2[too_close])), axis=1) / 2.
Voff = np.array([Voff1, Voff2]).swapaxes(0, 1)
return Voff
if __name__ == '__main__':
print(sys.argv)
if len(sys.argv) > 1:
generate_cubes(nCubes=int(sys.argv[1]))
else:
generate_cubes()
|
[
"numpy.abs",
"nh3_testcubes.generate_gradients",
"nh3_testcubes.generate_parameters",
"numpy.zeros",
"numpy.ones",
"numpy.array",
"numpy.linspace",
"numpy.column_stack",
"nh3_testcubes.make_and_write"
] |
[((1611, 1661), 'nh3_testcubes.generate_parameters', 'testcubes.generate_parameters', (['nCubes', 'random_seed'], {}), '(nCubes, random_seed)\n', (1640, 1661), True, 'import nh3_testcubes as testcubes\n'), ((1681, 1730), 'nh3_testcubes.generate_gradients', 'testcubes.generate_gradients', (['nCubes', 'random_seed'], {}), '(nCubes, random_seed)\n', (1709, 1730), True, 'import nh3_testcubes as testcubes\n'), ((3079, 3132), 'numpy.column_stack', 'np.column_stack', (['(Voff2[too_close], Voff1[too_close])'], {}), '((Voff2[too_close], Voff1[too_close]))\n', (3094, 3132), True, 'import numpy as np\n'), ((3164, 3217), 'numpy.column_stack', 'np.column_stack', (['(Voff2[too_close], Voff1[too_close])'], {}), '((Voff2[too_close], Voff1[too_close]))\n', (3179, 3217), True, 'import numpy as np\n'), ((2397, 2554), 'nh3_testcubes.make_and_write', 'testcubes.make_and_write', (['nCubes', 'nComps[i]', 'i', 'nBorder', 'xarr', 'Temp[i]', 'Width[i]', 'Voff[i]', 'logN[i]', 'gradX[i]', 'gradY[i]', 'noise_rms', 'lineID', 'output_dir'], {}), '(nCubes, nComps[i], i, nBorder, xarr, Temp[i],\n Width[i], Voff[i], logN[i], gradX[i], gradY[i], noise_rms, lineID,\n output_dir)\n', (2421, 2554), True, 'import nh3_testcubes as testcubes\n'), ((2907, 2928), 'numpy.abs', 'np.abs', (['(Voff1 - Voff2)'], {}), '(Voff1 - Voff2)\n', (2913, 2928), True, 'import numpy as np\n'), ((3460, 3484), 'numpy.array', 'np.array', (['[Voff1, Voff2]'], {}), '([Voff1, Voff2])\n', (3468, 3484), True, 'import numpy as np\n'), ((2938, 2971), 'numpy.column_stack', 'np.column_stack', (['(Width1, Width2)'], {}), '((Width1, Width2))\n', (2953, 2971), True, 'import numpy as np\n'), ((3268, 3323), 'numpy.column_stack', 'np.column_stack', (['(Width1[too_close], Width2[too_close])'], {}), '((Width1[too_close], Width2[too_close]))\n', (3283, 3323), True, 'import numpy as np\n'), ((3379, 3434), 'numpy.column_stack', 'np.column_stack', (['(Width1[too_close], Width2[too_close])'], {}), '((Width1[too_close], Width2[too_close]))\n', (3394, 3434), True, 'import numpy as np\n'), ((910, 938), 'numpy.linspace', 'np.linspace', (['(-500)', '(499)', '(1000)'], {}), '(-500, 499, 1000)\n', (921, 938), True, 'import numpy as np\n'), ((1864, 1884), 'numpy.zeros', 'np.zeros', (['(nCubes / 3)'], {}), '(nCubes / 3)\n', (1872, 1884), True, 'import numpy as np\n'), ((1931, 1950), 'numpy.ones', 'np.ones', (['(nCubes / 3)'], {}), '(nCubes / 3)\n', (1938, 1950), True, 'import numpy as np\n'), ((1997, 2029), 'numpy.ones', 'np.ones', (['(nCubes / 3 + nCubes % 3)'], {}), '(nCubes / 3 + nCubes % 3)\n', (2004, 2029), True, 'import numpy as np\n')]
|
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import csv
import numpy as np
import tqdm
from sklearn.manifold import TSNE
from nnabla import logger
from nnabla.utils.data_iterator import data_iterator_csv_dataset
def func(args):
# Load variable
data_iterator = (lambda: data_iterator_csv_dataset(
uri=args.input,
batch_size=64,
shuffle=False,
normalize=True,
with_memory_cache=False,
with_file_cache=False))
logger.log(99, 'Loading variable...')
dataset = []
with data_iterator() as di:
pbar = tqdm.tqdm(total=di.size)
while len(dataset) < di.size:
data = di.next()
variable = data[di.variables.index(args.variable)]
dataset.extend(variable)
pbar.update(len(variable))
pbar.close()
dataset = np.array(dataset)[:di.size].reshape(di.size, -1)
logger.log(99, 'variable={}, length={}, dim={}'.format(
args.variable, dataset.shape[0], dataset.shape[1]))
# t-SNE
logger.log(99, 'Processing t-SNE...')
dim = int(args.dim)
result = TSNE(n_components=dim, random_state=0).fit_transform(dataset)
# output
with open(args.input, newline='', encoding='utf-8-sig') as f:
rows = [row for row in csv.reader(f)]
row0 = rows.pop(0)
row0.extend([args.variable + '_tsne__{}'.format(i) for i in range(dim)])
for i, y in enumerate(result):
rows[i].extend(y)
with open(args.output, 'w', encoding='utf-8') as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerow(row0)
writer.writerows(rows)
logger.log(99, 't-SNE completed successfully.')
def main():
parser = argparse.ArgumentParser(
description='t-SNE\n\n' +
'<NAME>, <NAME>. Visualizing Data using t-SNE\n' +
'http://jmlr.org/papers/volume9/vandermaaten08a/vandermaaten08a.pdf\n\n',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'-i',
'--input',
help='path to input csv file (csv) default=output_result.csv',
required=True,
default='output_result.csv')
parser.add_argument(
'-v',
'--variable',
help="Variable to be processed (variable) default=x",
required=True,
default="x")
parser.add_argument(
'-d',
'--dim',
help='dimension of the embedded space (variable) default=2',
default=2)
parser.add_argument(
'-o',
'--output',
help='path to output csv file (csv) default=tsne.csv',
required=True,
default='tsne.csv')
parser.set_defaults(func=func)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
|
[
"nnabla.logger.log",
"tqdm.tqdm",
"csv.reader",
"csv.writer",
"argparse.ArgumentParser",
"sklearn.manifold.TSNE",
"nnabla.utils.data_iterator.data_iterator_csv_dataset",
"numpy.array"
] |
[((1028, 1065), 'nnabla.logger.log', 'logger.log', (['(99)', '"""Loading variable..."""'], {}), "(99, 'Loading variable...')\n", (1038, 1065), False, 'from nnabla import logger\n'), ((1583, 1620), 'nnabla.logger.log', 'logger.log', (['(99)', '"""Processing t-SNE..."""'], {}), "(99, 'Processing t-SNE...')\n", (1593, 1620), False, 'from nnabla import logger\n'), ((2182, 2229), 'nnabla.logger.log', 'logger.log', (['(99)', '"""t-SNE completed successfully."""'], {}), "(99, 't-SNE completed successfully.')\n", (2192, 2229), False, 'from nnabla import logger\n'), ((2257, 2493), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '(\'t-SNE\\n\\n\' + \'<NAME>, <NAME>. Visualizing Data using t-SNE\\n\' +\n """http://jmlr.org/papers/volume9/vandermaaten08a/vandermaaten08a.pdf\n\n""")', 'formatter_class': 'argparse.RawTextHelpFormatter'}), '(description=\'t-SNE\\n\\n\' +\n """<NAME>, <NAME>. Visualizing Data using t-SNE\n""" +\n \'http://jmlr.org/papers/volume9/vandermaaten08a/vandermaaten08a.pdf\\n\\n\',\n formatter_class=argparse.RawTextHelpFormatter)\n', (2280, 2493), False, 'import argparse\n'), ((837, 976), 'nnabla.utils.data_iterator.data_iterator_csv_dataset', 'data_iterator_csv_dataset', ([], {'uri': 'args.input', 'batch_size': '(64)', 'shuffle': '(False)', 'normalize': '(True)', 'with_memory_cache': '(False)', 'with_file_cache': '(False)'}), '(uri=args.input, batch_size=64, shuffle=False,\n normalize=True, with_memory_cache=False, with_file_cache=False)\n', (862, 976), False, 'from nnabla.utils.data_iterator import data_iterator_csv_dataset\n'), ((1130, 1154), 'tqdm.tqdm', 'tqdm.tqdm', ([], {'total': 'di.size'}), '(total=di.size)\n', (1139, 1154), False, 'import tqdm\n'), ((2081, 2115), 'csv.writer', 'csv.writer', (['f'], {'lineterminator': '"""\n"""'}), "(f, lineterminator='\\n')\n", (2091, 2115), False, 'import csv\n'), ((1658, 1696), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': 'dim', 'random_state': '(0)'}), '(n_components=dim, random_state=0)\n', (1662, 1696), False, 'from sklearn.manifold import TSNE\n'), ((1397, 1414), 'numpy.array', 'np.array', (['dataset'], {}), '(dataset)\n', (1405, 1414), True, 'import numpy as np\n'), ((1831, 1844), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (1841, 1844), False, 'import csv\n')]
|
from torch.utils.data import DataLoader, Subset
from pathlib import Path
import torch
import torch.nn as nn
import itertools as its
import pandas as pd
import numpy as np
import json
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem
import matplotlib.pyplot as plt
from NeuralGraph.dataset import MolData
from NeuralGraph.model import QSAR
from NeuralGraph.util import dev
def tanimoto_distance(x, y):
idx = x<=y
return 1 - (x[idx].sum() + y[~idx].sum()) / (x[~idx].sum() + y[idx].sum())
def get_circular_fp(smile, radius=6, fp_len=128):
mol = Chem.MolFromSmiles(smile)
fingerprint = Chem.AllChem.GetMorganFingerprintAsBitVect(mol, radius, fp_len)
arr = np.zeros((1,))
DataStructs.ConvertToNumpyArray(fingerprint, arr)
return arr
def get_neural_fp(X, net):
x0, x1, x2 = X
x0, x1, x2 = x0.to(dev), x1.to(dev), x2.to(dev)
x0, x1, x2 = (torch.unsqueeze(x, 0) for x in (x0, x1, x2))
res = net.nfp(x0, x1, x2)
res = res.detach().cpu().numpy()
return res
def mse(x, y):
return ((x-y)**2).mean()
def normalize_array(A):
mean, std = np.mean(A), np.std(A)
A_normed = (A - mean) / std
def restore_function(X):
return X * std + mean
return A_normed, restore_function
def change_net_to_weights(net, lo_bnd, hi_bnd):
for n,m in net.named_children():
if isinstance(m, torch.nn.Linear):
nn.init.uniform_(m.weight, lo_bnd, hi_bnd)
if m.bias is not None:
nn.init.uniform_(m.bias, lo_bnd, hi_bnd)
change_net_to_weights(m, lo_bnd, hi_bnd)
def calc_distance(net, data, smiles, FP_LEN,\
sample_sz=1000, SEED=None):
N, sample_sz = len(data), sample_sz
if SEED: np.random.seed(SEED)
res = [[],[]]
for _ in range(sample_sz):
i, j = np.random.choice(N, 2)
dst0 = tanimoto_distance(get_circular_fp(smiles[i], fp_len=FP_LEN),
get_circular_fp(smiles[j], fp_len=FP_LEN))
dst1 = tanimoto_distance(get_neural_fp(data[i][0], net),
get_neural_fp(data[j][0], net))
res[0].append(dst0)
res[1].append(dst1)
res = np.asarray(res)
return res
def calc_corr(res):
return (np.corrcoef(res[0], res[1])[0,1])
def plot_scatter(net, data, smiles, FP_LEN, filename,\
sample_sz = 1000, SEED=None):
res = calc_distance(net, data, smiles, FP_LEN, \
sample_sz, SEED)
plt.scatter(res[0], res[1], marker='o', facecolors='none', edgecolors='b', alpha=0.3)
plt.xlabel("circular fingerprint distance")
plt.ylabel("neural fingerprint distance")
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.title("Correlation = {:.4f}".format(np.corrcoef(res[0], res[1])[0,1]))
plt.savefig(filename, dpi=300, bbox_inches='tight')
if __name__ == '__main__':
# Load Data
DATAFILE = Path('./dataset/solubility/delaney-processed.csv')
df = pd.read_csv(DATAFILE)
target = df['measured log solubility in mols per litre'].values
target, restore = normalize_array(target)
data = MolData(df['smiles'], target)
print(type(df['smiles'][0]), df['smiles'][0])
tmp = df['smiles'][0]
print(get_circular_fp(tmp))
exit()
# Plot with a random weight and 2048 length as in Figure3Left
gcn_act = ['sigmoid', 'relu', 'tanh']
gop_act = ['sigmoid', 'tanh', 'softmax']
large_weights = [(-1e7, 1e7), (0, 1e7), (-1e3, 1e3), (-10, 10)]
max_degs = [1, 6]
res = {}
for a1, a2, bnds, rd in its.product(gcn_act, gop_act, large_weights,
max_degs):
SEED, FP_LEN = 7, 1<<11
net = QSAR(hid_dim=FP_LEN, n_class=1, max_degree=rd,
gcn_activation=a1,
gop_activation=a2)
print("nbnds", bnds)
change_net_to_weights(net.nfp, *bnds)
tmp = calc_distance(net, data, df['smiles'], FP_LEN, sample_sz=500,
SEED=7)
tmp = calc_corr(tmp)
res[f"gcn-{a1}_gop-{a2}_weights-{bnds}_radius-{rd}"]=tmp
print(f"gcn-{a1}_gop-{a2}_weights-{bnds}_radius-{rd}", tmp)
with open('./output.json', 'w') as fp:
json.dump(res, fp)
exit()
plot_scatter(net,
data,
df['smiles'],
FP_LEN,
"./figs/scatter_nfp_vs_cfp_2048_random_weight.png")
exit()
# Plot with a trained model
OUTPUT = './output/best_delaney.pkl'
net = torch.load(OUTPUT+'.pkg')
SEED, FP_LEN = 7, 1<<11
plot_scatter(net,
data,
df['smiles'],
FP_LEN,
"./figs/scatter_nfp_vs_cfp_128_trained_weight.png")
|
[
"numpy.random.seed",
"pandas.read_csv",
"torch.nn.init.uniform_",
"pathlib.Path",
"numpy.mean",
"numpy.std",
"torch.load",
"itertools.product",
"numpy.random.choice",
"json.dump",
"rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect",
"matplotlib.pyplot.ylim",
"numpy.corrcoef",
"numpy.asarray",
"torch.unsqueeze",
"matplotlib.pyplot.ylabel",
"NeuralGraph.model.QSAR",
"matplotlib.pyplot.xlim",
"rdkit.DataStructs.ConvertToNumpyArray",
"matplotlib.pyplot.scatter",
"numpy.zeros",
"matplotlib.pyplot.xlabel",
"rdkit.Chem.MolFromSmiles",
"matplotlib.pyplot.savefig",
"NeuralGraph.dataset.MolData"
] |
[((576, 601), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smile'], {}), '(smile)\n', (594, 601), False, 'from rdkit import Chem, DataStructs\n'), ((620, 683), 'rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect', 'Chem.AllChem.GetMorganFingerprintAsBitVect', (['mol', 'radius', 'fp_len'], {}), '(mol, radius, fp_len)\n', (662, 683), False, 'from rdkit import Chem, DataStructs\n'), ((694, 708), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (702, 708), True, 'import numpy as np\n'), ((713, 762), 'rdkit.DataStructs.ConvertToNumpyArray', 'DataStructs.ConvertToNumpyArray', (['fingerprint', 'arr'], {}), '(fingerprint, arr)\n', (744, 762), False, 'from rdkit import Chem, DataStructs\n'), ((2189, 2204), 'numpy.asarray', 'np.asarray', (['res'], {}), '(res)\n', (2199, 2204), True, 'import numpy as np\n'), ((2490, 2579), 'matplotlib.pyplot.scatter', 'plt.scatter', (['res[0]', 'res[1]'], {'marker': '"""o"""', 'facecolors': '"""none"""', 'edgecolors': '"""b"""', 'alpha': '(0.3)'}), "(res[0], res[1], marker='o', facecolors='none', edgecolors='b',\n alpha=0.3)\n", (2501, 2579), True, 'import matplotlib.pyplot as plt\n'), ((2580, 2623), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""circular fingerprint distance"""'], {}), "('circular fingerprint distance')\n", (2590, 2623), True, 'import matplotlib.pyplot as plt\n'), ((2628, 2669), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""neural fingerprint distance"""'], {}), "('neural fingerprint distance')\n", (2638, 2669), True, 'import matplotlib.pyplot as plt\n'), ((2674, 2690), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1]'], {}), '([0, 1])\n', (2682, 2690), True, 'import matplotlib.pyplot as plt\n'), ((2695, 2711), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (2703, 2711), True, 'import matplotlib.pyplot as plt\n'), ((2795, 2846), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "(filename, dpi=300, bbox_inches='tight')\n", (2806, 2846), True, 'import matplotlib.pyplot as plt\n'), ((2906, 2956), 'pathlib.Path', 'Path', (['"""./dataset/solubility/delaney-processed.csv"""'], {}), "('./dataset/solubility/delaney-processed.csv')\n", (2910, 2956), False, 'from pathlib import Path\n'), ((2966, 2987), 'pandas.read_csv', 'pd.read_csv', (['DATAFILE'], {}), '(DATAFILE)\n', (2977, 2987), True, 'import pandas as pd\n'), ((3113, 3142), 'NeuralGraph.dataset.MolData', 'MolData', (["df['smiles']", 'target'], {}), "(df['smiles'], target)\n", (3120, 3142), False, 'from NeuralGraph.dataset import MolData\n'), ((3547, 3601), 'itertools.product', 'its.product', (['gcn_act', 'gop_act', 'large_weights', 'max_degs'], {}), '(gcn_act, gop_act, large_weights, max_degs)\n', (3558, 3601), True, 'import itertools as its\n'), ((4512, 4539), 'torch.load', 'torch.load', (["(OUTPUT + '.pkg')"], {}), "(OUTPUT + '.pkg')\n", (4522, 4539), False, 'import torch\n'), ((895, 916), 'torch.unsqueeze', 'torch.unsqueeze', (['x', '(0)'], {}), '(x, 0)\n', (910, 916), False, 'import torch\n'), ((1108, 1118), 'numpy.mean', 'np.mean', (['A'], {}), '(A)\n', (1115, 1118), True, 'import numpy as np\n'), ((1120, 1129), 'numpy.std', 'np.std', (['A'], {}), '(A)\n', (1126, 1129), True, 'import numpy as np\n'), ((1732, 1752), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (1746, 1752), True, 'import numpy as np\n'), ((1817, 1839), 'numpy.random.choice', 'np.random.choice', (['N', '(2)'], {}), '(N, 2)\n', (1833, 1839), True, 'import numpy as np\n'), ((2253, 2280), 'numpy.corrcoef', 'np.corrcoef', (['res[0]', 'res[1]'], {}), '(res[0], res[1])\n', (2264, 2280), True, 'import numpy as np\n'), ((3690, 3778), 'NeuralGraph.model.QSAR', 'QSAR', ([], {'hid_dim': 'FP_LEN', 'n_class': '(1)', 'max_degree': 'rd', 'gcn_activation': 'a1', 'gop_activation': 'a2'}), '(hid_dim=FP_LEN, n_class=1, max_degree=rd, gcn_activation=a1,\n gop_activation=a2)\n', (3694, 3778), False, 'from NeuralGraph.model import QSAR\n'), ((4215, 4233), 'json.dump', 'json.dump', (['res', 'fp'], {}), '(res, fp)\n', (4224, 4233), False, 'import json\n'), ((1400, 1442), 'torch.nn.init.uniform_', 'nn.init.uniform_', (['m.weight', 'lo_bnd', 'hi_bnd'], {}), '(m.weight, lo_bnd, hi_bnd)\n', (1416, 1442), True, 'import torch.nn as nn\n'), ((1494, 1534), 'torch.nn.init.uniform_', 'nn.init.uniform_', (['m.bias', 'lo_bnd', 'hi_bnd'], {}), '(m.bias, lo_bnd, hi_bnd)\n', (1510, 1534), True, 'import torch.nn as nn\n'), ((2756, 2783), 'numpy.corrcoef', 'np.corrcoef', (['res[0]', 'res[1]'], {}), '(res[0], res[1])\n', (2767, 2783), True, 'import numpy as np\n')]
|
# File :all.py
# Author :WJ
# Function :
# Time :2021/02/18
# Version :
# Amend :
import numpy as np
import ConvexPolygon as cp
import HierarchicalClustering as hc
import ConPolyProcess as cs
import LaplacianMatrice as lm
import time
from scipy.optimize import linear_sum_assignment
import Visualization as vs
import TransformationMatrix as tf
# <editor-fold desc="Method">
def conpoly_points(data, clusters, clusters_num=2):
P = []
for k in range(clusters_num):
##根据lables中的值是否等于k,重新组成一个True、False的数组
my_members = clusters == k
##X[my_members, 0] 取出my_members对应位置为True的值的横坐标
data_tem = data[my_members, :]
indexes = cp.ConvexPolygon(data_tem)
points = np.array(data_tem[indexes, :], dtype=np.float32)
while 1:
max, a0, b0 = cs.maxPoints(points=points)
if max > 2:
points = cs.delete_linepoints(points, a0, b0, 3)
else:
break
points = hc.mergeClosePoints(points, 3)
for i in range(len(points)):
P.append(points[i, :])
return np.array(P)
# </editor-fold>
start0 = time.time()
print('求建筑物凸多边形顶点------------------------------------------------------')
# 导入数据
data_dlg = np.loadtxt('..\\data\\Polyline_PCB02_500.txt', delimiter=',')
data_dopp = np.loadtxt('..\\data\\PCB_c1_z5_t20.txt', delimiter='\t')
data_dlg0 = data_dlg[:, 0:2]
data_dopp0 = data_dopp[:, 0:2]
# 设置点云中建筑物聚类数
clusters_num = 2
# 聚类
data_dlg, clusters_dlg = hc.HierarchicalClustering(data_dlg0, clusters_num, 'dlg')
data_dopp, clusters_dopp = hc.HierarchicalClustering(data_dopp0, clusters_num, 'dopp')
# 求每栋建筑物的凸多边形(并对凸多边形顶点进行处理)
P_dlg = conpoly_points(data_dlg, clusters_dlg, clusters_num)
P_dopp = conpoly_points(data_dopp, clusters_dopp, clusters_num)
# 可视化凸多边形顶点
vs.Visualize2PointClouds(data_dlg, P_dlg, 'ConPoly_dlg', feature1=['blue', 'dlg', '.'], feature2=['red', 'vertex', 'o'])
vs.Visualize2PointClouds(data_dopp, P_dopp, 'ConPoly_dopp', feature1=['blue', 'dopp', '.'],
feature2=['red', 'vertex', 'o'])
start1 = time.time()
TIME = start1 - start0
print('耗时:{:.0f} hours {:.0f} minutes {:.0f} seconds'.format(TIME // 3600, TIME % 3600 // 60, TIME % 3600 % 60))
print('图匹配------------------------------------------------------')
# 计算拉普拉斯矩阵
B_dlg = lm.LaplacianMatrice(P_dlg)
B_dopp = lm.LaplacianMatrice(P_dopp)
# 对拉普拉斯矩阵进行谱分解
U_dlg, Lambda_dlg = lm.LaplacianMatrice_decomposed(B_dlg)
U_dopp, Lambda_dopp = lm.LaplacianMatrice_decomposed(B_dopp)
# 计算相异度矩阵
k = min(len(P_dlg), len(P_dopp))
A = lm.corrlation(U_dopp, U_dlg, k)
# 对相似度矩阵进行二分匹配(删除相异度过大的结果)
row_ind, col_ind = linear_sum_assignment(A)
row, col = lm.DeleteLargeValue(A, row_ind, col_ind, 0.9)
# 根据匹配结果对点云重新排序
P_dlg_new=lm.resort_clouds(P_dlg,row)
P_dopp_new=lm.resort_clouds(P_dopp,col)
# 可视化凸多边形交点匹配结果
vs.VisualizeMatch(P_dopp, P_dlg, row, col,'凸多边形顶点')
# 计算变换矩阵(并对dopp进行变换)
R, T = tf.ca_rt(P_dopp_new, P_dlg_new, 'MatchingByConPolyPoints_result.txt')
data_dopp = tf.transformation(data_dopp0, R, T, 'dopp_transformed.txt')
# 可视化原始点云配准结果
vs.Visualize2PointClouds(data_dopp, data_dlg0, 'Macth_dlg&dopp', feature1=['blue', 'dopp', '.'],
feature2=['red', 'dlg', '.'])
start2 = time.time()
TIME = start2 - start1
print('耗时:{:.0f} hours {:.0f} minutes {:.0f} seconds'.format(TIME // 3600, TIME % 3600 // 60, TIME % 3600 % 60))
TIME = time.time() - start0
print('\n总耗时:{:.0f} hours {:.0f} minutes {:.0f} seconds'.format(TIME // 3600, TIME % 3600 // 60, TIME % 3600 % 60))
|
[
"LaplacianMatrice.corrlation",
"HierarchicalClustering.HierarchicalClustering",
"HierarchicalClustering.mergeClosePoints",
"Visualization.VisualizeMatch",
"TransformationMatrix.transformation",
"ConPolyProcess.maxPoints",
"ConPolyProcess.delete_linepoints",
"LaplacianMatrice.resort_clouds",
"LaplacianMatrice.LaplacianMatrice",
"Visualization.Visualize2PointClouds",
"time.time",
"LaplacianMatrice.LaplacianMatrice_decomposed",
"numpy.loadtxt",
"numpy.array",
"LaplacianMatrice.DeleteLargeValue",
"TransformationMatrix.ca_rt",
"ConvexPolygon.ConvexPolygon",
"scipy.optimize.linear_sum_assignment"
] |
[((1201, 1212), 'time.time', 'time.time', ([], {}), '()\n', (1210, 1212), False, 'import time\n'), ((1310, 1371), 'numpy.loadtxt', 'np.loadtxt', (['"""..\\\\data\\\\Polyline_PCB02_500.txt"""'], {'delimiter': '""","""'}), "('..\\\\data\\\\Polyline_PCB02_500.txt', delimiter=',')\n", (1320, 1371), True, 'import numpy as np\n'), ((1385, 1442), 'numpy.loadtxt', 'np.loadtxt', (['"""..\\\\data\\\\PCB_c1_z5_t20.txt"""'], {'delimiter': '"""\t"""'}), "('..\\\\data\\\\PCB_c1_z5_t20.txt', delimiter='\\t')\n", (1395, 1442), True, 'import numpy as np\n'), ((1574, 1631), 'HierarchicalClustering.HierarchicalClustering', 'hc.HierarchicalClustering', (['data_dlg0', 'clusters_num', '"""dlg"""'], {}), "(data_dlg0, clusters_num, 'dlg')\n", (1599, 1631), True, 'import HierarchicalClustering as hc\n'), ((1660, 1719), 'HierarchicalClustering.HierarchicalClustering', 'hc.HierarchicalClustering', (['data_dopp0', 'clusters_num', '"""dopp"""'], {}), "(data_dopp0, clusters_num, 'dopp')\n", (1685, 1719), True, 'import HierarchicalClustering as hc\n'), ((1894, 2018), 'Visualization.Visualize2PointClouds', 'vs.Visualize2PointClouds', (['data_dlg', 'P_dlg', '"""ConPoly_dlg"""'], {'feature1': "['blue', 'dlg', '.']", 'feature2': "['red', 'vertex', 'o']"}), "(data_dlg, P_dlg, 'ConPoly_dlg', feature1=['blue',\n 'dlg', '.'], feature2=['red', 'vertex', 'o'])\n", (1918, 2018), True, 'import Visualization as vs\n'), ((2016, 2145), 'Visualization.Visualize2PointClouds', 'vs.Visualize2PointClouds', (['data_dopp', 'P_dopp', '"""ConPoly_dopp"""'], {'feature1': "['blue', 'dopp', '.']", 'feature2': "['red', 'vertex', 'o']"}), "(data_dopp, P_dopp, 'ConPoly_dopp', feature1=[\n 'blue', 'dopp', '.'], feature2=['red', 'vertex', 'o'])\n", (2040, 2145), True, 'import Visualization as vs\n'), ((2179, 2190), 'time.time', 'time.time', ([], {}), '()\n', (2188, 2190), False, 'import time\n'), ((2420, 2446), 'LaplacianMatrice.LaplacianMatrice', 'lm.LaplacianMatrice', (['P_dlg'], {}), '(P_dlg)\n', (2439, 2446), True, 'import LaplacianMatrice as lm\n'), ((2457, 2484), 'LaplacianMatrice.LaplacianMatrice', 'lm.LaplacianMatrice', (['P_dopp'], {}), '(P_dopp)\n', (2476, 2484), True, 'import LaplacianMatrice as lm\n'), ((2524, 2561), 'LaplacianMatrice.LaplacianMatrice_decomposed', 'lm.LaplacianMatrice_decomposed', (['B_dlg'], {}), '(B_dlg)\n', (2554, 2561), True, 'import LaplacianMatrice as lm\n'), ((2585, 2623), 'LaplacianMatrice.LaplacianMatrice_decomposed', 'lm.LaplacianMatrice_decomposed', (['B_dopp'], {}), '(B_dopp)\n', (2615, 2623), True, 'import LaplacianMatrice as lm\n'), ((2676, 2707), 'LaplacianMatrice.corrlation', 'lm.corrlation', (['U_dopp', 'U_dlg', 'k'], {}), '(U_dopp, U_dlg, k)\n', (2689, 2707), True, 'import LaplacianMatrice as lm\n'), ((2758, 2782), 'scipy.optimize.linear_sum_assignment', 'linear_sum_assignment', (['A'], {}), '(A)\n', (2779, 2782), False, 'from scipy.optimize import linear_sum_assignment\n'), ((2795, 2840), 'LaplacianMatrice.DeleteLargeValue', 'lm.DeleteLargeValue', (['A', 'row_ind', 'col_ind', '(0.9)'], {}), '(A, row_ind, col_ind, 0.9)\n', (2814, 2840), True, 'import LaplacianMatrice as lm\n'), ((2871, 2899), 'LaplacianMatrice.resort_clouds', 'lm.resort_clouds', (['P_dlg', 'row'], {}), '(P_dlg, row)\n', (2887, 2899), True, 'import LaplacianMatrice as lm\n'), ((2911, 2940), 'LaplacianMatrice.resort_clouds', 'lm.resort_clouds', (['P_dopp', 'col'], {}), '(P_dopp, col)\n', (2927, 2940), True, 'import LaplacianMatrice as lm\n'), ((2960, 3012), 'Visualization.VisualizeMatch', 'vs.VisualizeMatch', (['P_dopp', 'P_dlg', 'row', 'col', '"""凸多边形顶点"""'], {}), "(P_dopp, P_dlg, row, col, '凸多边形顶点')\n", (2977, 3012), True, 'import Visualization as vs\n'), ((3046, 3115), 'TransformationMatrix.ca_rt', 'tf.ca_rt', (['P_dopp_new', 'P_dlg_new', '"""MatchingByConPolyPoints_result.txt"""'], {}), "(P_dopp_new, P_dlg_new, 'MatchingByConPolyPoints_result.txt')\n", (3054, 3115), True, 'import TransformationMatrix as tf\n'), ((3129, 3188), 'TransformationMatrix.transformation', 'tf.transformation', (['data_dopp0', 'R', 'T', '"""dopp_transformed.txt"""'], {}), "(data_dopp0, R, T, 'dopp_transformed.txt')\n", (3146, 3188), True, 'import TransformationMatrix as tf\n'), ((3207, 3338), 'Visualization.Visualize2PointClouds', 'vs.Visualize2PointClouds', (['data_dopp', 'data_dlg0', '"""Macth_dlg&dopp"""'], {'feature1': "['blue', 'dopp', '.']", 'feature2': "['red', 'dlg', '.']"}), "(data_dopp, data_dlg0, 'Macth_dlg&dopp', feature1=[\n 'blue', 'dopp', '.'], feature2=['red', 'dlg', '.'])\n", (3231, 3338), True, 'import Visualization as vs\n'), ((3372, 3383), 'time.time', 'time.time', ([], {}), '()\n', (3381, 3383), False, 'import time\n'), ((1155, 1166), 'numpy.array', 'np.array', (['P'], {}), '(P)\n', (1163, 1166), True, 'import numpy as np\n'), ((3534, 3545), 'time.time', 'time.time', ([], {}), '()\n', (3543, 3545), False, 'import time\n'), ((716, 742), 'ConvexPolygon.ConvexPolygon', 'cp.ConvexPolygon', (['data_tem'], {}), '(data_tem)\n', (732, 742), True, 'import ConvexPolygon as cp\n'), ((761, 809), 'numpy.array', 'np.array', (['data_tem[indexes, :]'], {'dtype': 'np.float32'}), '(data_tem[indexes, :], dtype=np.float32)\n', (769, 809), True, 'import numpy as np\n'), ((1036, 1066), 'HierarchicalClustering.mergeClosePoints', 'hc.mergeClosePoints', (['points', '(3)'], {}), '(points, 3)\n', (1055, 1066), True, 'import HierarchicalClustering as hc\n'), ((857, 884), 'ConPolyProcess.maxPoints', 'cs.maxPoints', ([], {'points': 'points'}), '(points=points)\n', (869, 884), True, 'import ConPolyProcess as cs\n'), ((936, 975), 'ConPolyProcess.delete_linepoints', 'cs.delete_linepoints', (['points', 'a0', 'b0', '(3)'], {}), '(points, a0, b0, 3)\n', (956, 975), True, 'import ConPolyProcess as cs\n')]
|
from matplotlib import mlab
import matplotlib.pyplot as plt
import numpy as np
import colorednoise as cn
from automutualinformation import sequential_mutual_information as smi
from automutualinformation import fit_model
beta = 0.5 # the exponent
samples = 10000 # number of samples to generate
y = cn.powerlaw_psd_gaussian(beta, samples)
nbins = 10 # how many bins to compute over
bins = np.linspace(np.min(y), np.max(y), nbins)
y_dig = np.digitize(y, bins, right=True)
range_ = np.arange(1, 10)
def test_compute_mi():
(MI, _), (shuff_MI, _) = smi([y_dig], distances=range_, n_jobs=1)
def test_compute_mi_fit_model():
(MI, _), (shuff_MI, _) = smi([y_dig], distances=range_, n_jobs=1)
decay_model, model_y = fit_model(
distances=range_,
sig=MI - shuff_MI,
)
|
[
"colorednoise.powerlaw_psd_gaussian",
"automutualinformation.sequential_mutual_information",
"numpy.max",
"numpy.min",
"numpy.arange",
"automutualinformation.fit_model",
"numpy.digitize"
] |
[((301, 340), 'colorednoise.powerlaw_psd_gaussian', 'cn.powerlaw_psd_gaussian', (['beta', 'samples'], {}), '(beta, samples)\n', (325, 340), True, 'import colorednoise as cn\n'), ((441, 473), 'numpy.digitize', 'np.digitize', (['y', 'bins'], {'right': '(True)'}), '(y, bins, right=True)\n', (452, 473), True, 'import numpy as np\n'), ((483, 499), 'numpy.arange', 'np.arange', (['(1)', '(10)'], {}), '(1, 10)\n', (492, 499), True, 'import numpy as np\n'), ((404, 413), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (410, 413), True, 'import numpy as np\n'), ((415, 424), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (421, 424), True, 'import numpy as np\n'), ((554, 594), 'automutualinformation.sequential_mutual_information', 'smi', (['[y_dig]'], {'distances': 'range_', 'n_jobs': '(1)'}), '([y_dig], distances=range_, n_jobs=1)\n', (557, 594), True, 'from automutualinformation import sequential_mutual_information as smi\n'), ((659, 699), 'automutualinformation.sequential_mutual_information', 'smi', (['[y_dig]'], {'distances': 'range_', 'n_jobs': '(1)'}), '([y_dig], distances=range_, n_jobs=1)\n', (662, 699), True, 'from automutualinformation import sequential_mutual_information as smi\n'), ((728, 774), 'automutualinformation.fit_model', 'fit_model', ([], {'distances': 'range_', 'sig': '(MI - shuff_MI)'}), '(distances=range_, sig=MI - shuff_MI)\n', (737, 774), False, 'from automutualinformation import fit_model\n')]
|
import logging
import sys
import numpy as np
from tensorflow.keras import backend as K
from tensorflow.keras import layers
from tensorflow.keras.layers import (
AveragePooling2D,
BatchNormalization,
Conv2D,
MaxPooling2D,
SeparableConv2D,
)
from tensorflow.keras.models import Model
sys.setrecursionlimit(2 ** 20)
np.random.seed(2 ** 10)
class SSR_net:
def __init__(self, image_size, stage_num, lambda_local, lambda_d):
if K.image_dim_ordering() == "th":
logging.debug("image_dim_ordering = 'th'")
self._channel_axis = 1
self._input_shape = (3, image_size, image_size)
else:
logging.debug("image_dim_ordering = 'tf'")
self._channel_axis = -1
self._input_shape = (image_size, image_size, 3)
self.stage_num = stage_num
self.lambda_local = lambda_local
self.lambda_d = lambda_d
def __call__(self):
logging.debug("Creating model...")
inputs = layers.Input(shape=self._input_shape)
# -------------------------------------------------------------------------------------------------------------------------
x = Conv2D(32, (3, 3))(inputs)
x = BatchNormalization(axis=self._channel_axis)(x)
x = layers.Activation("relu")(x)
x_layer1 = AveragePooling2D(2, 2)(x)
x = Conv2D(32, (3, 3))(x_layer1)
x = BatchNormalization(axis=self._channel_axis)(x)
x = layers.Activation("relu")(x)
x_layer2 = AveragePooling2D(2, 2)(x)
x = Conv2D(32, (3, 3))(x_layer2)
x = BatchNormalization(axis=self._channel_axis)(x)
x = layers.Activation("relu")(x)
x_layer3 = AveragePooling2D(2, 2)(x)
x = Conv2D(32, (3, 3))(x_layer3)
x = BatchNormalization(axis=self._channel_axis)(x)
x = layers.Activation("relu")(x)
# -------------------------------------------------------------------------------------------------------------------------
s = Conv2D(16, (3, 3))(inputs)
s = BatchNormalization(axis=self._channel_axis)(s)
s = layers.Activation("tanh")(s)
s_layer1 = MaxPooling2D(2, 2)(s)
s = Conv2D(16, (3, 3))(s_layer1)
s = BatchNormalization(axis=self._channel_axis)(s)
s = layers.Activation("tanh")(s)
s_layer2 = MaxPooling2D(2, 2)(s)
s = Conv2D(16, (3, 3))(s_layer2)
s = BatchNormalization(axis=self._channel_axis)(s)
s = layers.Activation("tanh")(s)
s_layer3 = MaxPooling2D(2, 2)(s)
s = Conv2D(16, (3, 3))(s_layer3)
s = BatchNormalization(axis=self._channel_axis)(s)
s = layers.Activation("tanh")(s)
# -------------------------------------------------------------------------------------------------------------------------
# Classifier block
s_layer4 = Conv2D(10, (1, 1), activation="relu")(s)
s_layer4 = layers.Flatten()(s_layer4)
s_layer4_mix = layers.Dropout(0.2)(s_layer4)
s_layer4_mix = layers.Dense(units=self.stage_num[0], activation="relu")(
s_layer4_mix
)
x_layer4 = Conv2D(10, (1, 1), activation="relu")(x)
x_layer4 = layers.Flatten()(x_layer4)
x_layer4_mix = layers.Dropout(0.2)(x_layer4)
x_layer4_mix = layers.Dense(units=self.stage_num[0], activation="relu")(
x_layer4_mix
)
feat_a_s1_pre = layers.Multiply()([s_layer4, x_layer4])
delta_s1 = layers.Dense(1, activation="tanh", name="delta_s1")(feat_a_s1_pre)
feat_a_s1 = layers.Multiply()([s_layer4_mix, x_layer4_mix])
feat_a_s1 = layers.Dense(2 * self.stage_num[0], activation="relu")(feat_a_s1)
pred_a_s1 = layers.Dense(
units=self.stage_num[0], activation="relu", name="pred_age_stage1"
)(feat_a_s1)
# feat_local_s1 = layers.Lambda(lambda x: x/10)(feat_a_s1)
# feat_a_s1_local = Dropout(0.2)(pred_a_s1)
local_s1 = layers.Dense(
units=self.stage_num[0], activation="tanh", name="local_delta_stage1",
)(feat_a_s1)
# -------------------------------------------------------------------------------------------------------------------------
s_layer2 = Conv2D(10, (1, 1), activation="relu")(s_layer2)
s_layer2 = MaxPooling2D(4, 4)(s_layer2)
s_layer2 = layers.Flatten()(s_layer2)
s_layer2_mix = layers.Dropout(0.2)(s_layer2)
s_layer2_mix = layers.Dense(self.stage_num[1], activation="relu")(s_layer2_mix)
x_layer2 = Conv2D(10, (1, 1), activation="relu")(x_layer2)
x_layer2 = AveragePooling2D(4, 4)(x_layer2)
x_layer2 = layers.Flatten()(x_layer2)
x_layer2_mix = layers.Dropout(0.2)(x_layer2)
x_layer2_mix = layers.Dense(self.stage_num[1], activation="relu")(x_layer2_mix)
feat_a_s2_pre = layers.Multiply()([s_layer2, x_layer2])
delta_s2 = layers.Dense(1, activation="tanh", name="delta_s2")(feat_a_s2_pre)
feat_a_s2 = layers.Multiply()([s_layer2_mix, x_layer2_mix])
feat_a_s2 = layers.Dense(2 * self.stage_num[1], activation="relu")(feat_a_s2)
pred_a_s2 = layers.Dense(
units=self.stage_num[1], activation="relu", name="pred_age_stage2"
)(feat_a_s2)
# feat_local_s2 = layers.Lambda(lambda x: x/10)(feat_a_s2)
# feat_a_s2_local = Dropout(0.2)(pred_a_s2)
local_s2 = layers.Dense(
units=self.stage_num[1], activation="tanh", name="local_delta_stage2",
)(feat_a_s2)
# -------------------------------------------------------------------------------------------------------------------------
s_layer1 = Conv2D(10, (1, 1), activation="relu")(s_layer1)
s_layer1 = MaxPooling2D(8, 8)(s_layer1)
s_layer1 = layers.Flatten()(s_layer1)
s_layer1_mix = layers.Dropout(0.2)(s_layer1)
s_layer1_mix = layers.Dense(self.stage_num[2], activation="relu")(s_layer1_mix)
x_layer1 = Conv2D(10, (1, 1), activation="relu")(x_layer1)
x_layer1 = AveragePooling2D(8, 8)(x_layer1)
x_layer1 = layers.Flatten()(x_layer1)
x_layer1_mix = layers.Dropout(0.2)(x_layer1)
x_layer1_mix = layers.Dense(self.stage_num[2], activation="relu")(x_layer1_mix)
feat_a_s3_pre = layers.Multiply()([s_layer1, x_layer1])
delta_s3 = layers.Dense(1, activation="tanh", name="delta_s3")(feat_a_s3_pre)
feat_a_s3 = layers.Multiply()([s_layer1_mix, x_layer1_mix])
feat_a_s3 = layers.Dense(2 * self.stage_num[2], activation="relu")(feat_a_s3)
pred_a_s3 = layers.Dense(
units=self.stage_num[2], activation="relu", name="pred_age_stage3"
)(feat_a_s3)
# feat_local_s3 = layers.Lambda(lambda x: x/10)(feat_a_s3)
# feat_a_s3_local = Dropout(0.2)(pred_a_s3)
local_s3 = layers.Dense(
units=self.stage_num[2], activation="tanh", name="local_delta_stage3",
)(feat_a_s3)
# -------------------------------------------------------------------------------------------------------------------------
def merge_age(x, s1, s2, s3, lambda_local, lambda_d):
a = x[0][:, 0] * 0
b = x[0][:, 0] * 0
c = x[0][:, 0] * 0
# A = s1 * s2 * s3
V = 101
for i in range(0, s1):
a = a + (i + lambda_local * x[6][:, i]) * x[0][:, i]
a = K.expand_dims(a, -1)
a = a / (s1 * (1 + lambda_d * x[3]))
for j in range(0, s2):
b = b + (j + lambda_local * x[7][:, j]) * x[1][:, j]
b = K.expand_dims(b, -1)
b = b / (s1 * (1 + lambda_d * x[3])) / (s2 * (1 + lambda_d * x[4]))
for k in range(0, s3):
c = c + (k + lambda_local * x[8][:, k]) * x[2][:, k]
c = K.expand_dims(c, -1)
c = (
c
/ (s1 * (1 + lambda_d * x[3]))
/ (s2 * (1 + lambda_d * x[4]))
/ (s3 * (1 + lambda_d * x[5]))
)
age = (a + b + c) * V
return age
pred_a = layers.Lambda(
merge_age,
arguments={
"s1": self.stage_num[0],
"s2": self.stage_num[1],
"s3": self.stage_num[2],
"lambda_local": self.lambda_local,
"lambda_d": self.lambda_d,
},
name="pred_a",
)(
[
pred_a_s1,
pred_a_s2,
pred_a_s3,
delta_s1,
delta_s2,
delta_s3,
local_s1,
local_s2,
local_s3,
]
)
model = Model(inputs=inputs, outputs=pred_a)
return model
class SSR_net_general:
def __init__(self, image_size, stage_num, lambda_local, lambda_d):
if K.image_dim_ordering() == "th":
logging.debug("image_dim_ordering = 'th'")
self._channel_axis = 1
self._input_shape = (3, image_size, image_size)
else:
logging.debug("image_dim_ordering = 'tf'")
self._channel_axis = -1
self._input_shape = (image_size, image_size, 3)
self.stage_num = stage_num
self.lambda_local = lambda_local
self.lambda_d = lambda_d
def __call__(self):
logging.debug("Creating model...")
inputs = layers.Input(shape=self._input_shape)
# -------------------------------------------------------------------------------------------------------------------------
x = Conv2D(32, (3, 3))(inputs)
x = BatchNormalization(axis=self._channel_axis)(x)
x = layers.Activation("relu")(x)
x_layer1 = AveragePooling2D(2, 2)(x)
x = Conv2D(32, (3, 3))(x_layer1)
x = BatchNormalization(axis=self._channel_axis)(x)
x = layers.Activation("relu")(x)
x_layer2 = AveragePooling2D(2, 2)(x)
x = Conv2D(32, (3, 3))(x_layer2)
x = BatchNormalization(axis=self._channel_axis)(x)
x = layers.Activation("relu")(x)
x_layer3 = AveragePooling2D(2, 2)(x)
x = Conv2D(32, (3, 3))(x_layer3)
x = BatchNormalization(axis=self._channel_axis)(x)
x = layers.Activation("relu")(x)
# -------------------------------------------------------------------------------------------------------------------------
s = Conv2D(16, (3, 3))(inputs)
s = BatchNormalization(axis=self._channel_axis)(s)
s = layers.Activation("tanh")(s)
s_layer1 = MaxPooling2D(2, 2)(s)
s = Conv2D(16, (3, 3))(s_layer1)
s = BatchNormalization(axis=self._channel_axis)(s)
s = layers.Activation("tanh")(s)
s_layer2 = MaxPooling2D(2, 2)(s)
s = Conv2D(16, (3, 3))(s_layer2)
s = BatchNormalization(axis=self._channel_axis)(s)
s = layers.Activation("tanh")(s)
s_layer3 = MaxPooling2D(2, 2)(s)
s = Conv2D(16, (3, 3))(s_layer3)
s = BatchNormalization(axis=self._channel_axis)(s)
s = layers.Activation("tanh")(s)
# -------------------------------------------------------------------------------------------------------------------------
# Classifier block
s_layer4 = Conv2D(10, (1, 1), activation="relu")(s)
s_layer4 = layers.Flatten()(s_layer4)
s_layer4_mix = layers.Dropout(0.2)(s_layer4)
s_layer4_mix = layers.Dense(units=self.stage_num[0], activation="relu")(
s_layer4_mix
)
x_layer4 = Conv2D(10, (1, 1), activation="relu")(x)
x_layer4 = layers.Flatten()(x_layer4)
x_layer4_mix = layers.Dropout(0.2)(x_layer4)
x_layer4_mix = layers.Dense(units=self.stage_num[0], activation="relu")(
x_layer4_mix
)
feat_s1_pre = layers.Multiply()([s_layer4, x_layer4])
delta_s1 = layers.Dense(1, activation="tanh", name="delta_s1")(feat_s1_pre)
feat_s1 = layers.Multiply()([s_layer4_mix, x_layer4_mix])
feat_s1 = layers.Dense(2 * self.stage_num[0], activation="relu")(feat_s1)
pred_s1 = layers.Dense(
units=self.stage_num[0], activation="relu", name="pred_stage1"
)(feat_s1)
local_s1 = layers.Dense(
units=self.stage_num[0], activation="tanh", name="local_delta_stage1",
)(feat_s1)
# -------------------------------------------------------------------------------------------------------------------------
s_layer2 = Conv2D(10, (1, 1), activation="relu")(s_layer2)
s_layer2 = MaxPooling2D(4, 4)(s_layer2)
s_layer2 = layers.Flatten()(s_layer2)
s_layer2_mix = layers.Dropout(0.2)(s_layer2)
s_layer2_mix = layers.Dense(self.stage_num[1], activation="relu")(s_layer2_mix)
x_layer2 = Conv2D(10, (1, 1), activation="relu")(x_layer2)
x_layer2 = AveragePooling2D(4, 4)(x_layer2)
x_layer2 = layers.Flatten()(x_layer2)
x_layer2_mix = layers.Dropout(0.2)(x_layer2)
x_layer2_mix = layers.Dense(self.stage_num[1], activation="relu")(x_layer2_mix)
feat_s2_pre = layers.Multiply()([s_layer2, x_layer2])
delta_s2 = layers.Dense(1, activation="tanh", name="delta_s2")(feat_s2_pre)
feat_s2 = layers.Multiply()([s_layer2_mix, x_layer2_mix])
feat_s2 = layers.Dense(2 * self.stage_num[1], activation="relu")(feat_s2)
pred_s2 = layers.Dense(
units=self.stage_num[1], activation="relu", name="pred_stage2"
)(feat_s2)
local_s2 = layers.Dense(
units=self.stage_num[1], activation="tanh", name="local_delta_stage2",
)(feat_s2)
# -------------------------------------------------------------------------------------------------------------------------
s_layer1 = Conv2D(10, (1, 1), activation="relu")(s_layer1)
s_layer1 = MaxPooling2D(8, 8)(s_layer1)
s_layer1 = layers.Flatten()(s_layer1)
s_layer1_mix = layers.Dropout(0.2)(s_layer1)
s_layer1_mix = layers.Dense(self.stage_num[2], activation="relu")(s_layer1_mix)
x_layer1 = Conv2D(10, (1, 1), activation="relu")(x_layer1)
x_layer1 = AveragePooling2D(8, 8)(x_layer1)
x_layer1 = layers.Flatten()(x_layer1)
x_layer1_mix = layers.Dropout(0.2)(x_layer1)
x_layer1_mix = layers.Dense(self.stage_num[2], activation="relu")(x_layer1_mix)
feat_s3_pre = layers.Multiply()([s_layer1, x_layer1])
delta_s3 = layers.Dense(1, activation="tanh", name="delta_s3")(feat_s3_pre)
feat_s3 = layers.Multiply()([s_layer1_mix, x_layer1_mix])
feat_s3 = layers.Dense(2 * self.stage_num[2], activation="relu")(feat_s3)
pred_s3 = layers.Dense(
units=self.stage_num[2], activation="relu", name="pred_stage3"
)(feat_s3)
local_s3 = layers.Dense(
units=self.stage_num[2], activation="tanh", name="local_delta_stage3",
)(feat_s3)
# -------------------------------------------------------------------------------------------------------------------------
def SSR_module(x, s1, s2, s3, lambda_local, lambda_d):
a = x[0][:, 0] * 0
b = x[0][:, 0] * 0
c = x[0][:, 0] * 0
V = 1
for i in range(0, s1):
a = a + (i + lambda_local * x[6][:, i]) * x[0][:, i]
a = K.expand_dims(a, -1)
a = a / (s1 * (1 + lambda_d * x[3]))
for j in range(0, s2):
b = b + (j + lambda_local * x[7][:, j]) * x[1][:, j]
b = K.expand_dims(b, -1)
b = b / (s1 * (1 + lambda_d * x[3])) / (s2 * (1 + lambda_d * x[4]))
for k in range(0, s3):
c = c + (k + lambda_local * x[8][:, k]) * x[2][:, k]
c = K.expand_dims(c, -1)
c = (
c
/ (s1 * (1 + lambda_d * x[3]))
/ (s2 * (1 + lambda_d * x[4]))
/ (s3 * (1 + lambda_d * x[5]))
)
out = (a + b + c) * V
return out
pred = layers.Lambda(
SSR_module,
arguments={
"s1": self.stage_num[0],
"s2": self.stage_num[1],
"s3": self.stage_num[2],
"lambda_local": self.lambda_local,
"lambda_d": self.lambda_d,
},
name="pred",
)(
[
pred_s1,
pred_s2,
pred_s3,
delta_s1,
delta_s2,
delta_s3,
local_s1,
local_s2,
local_s3,
]
)
model = Model(inputs=inputs, outputs=pred)
return model
class SSR_net_MT:
def __init__(self, image_size, num_classes, stage_num, lambda_d):
if K.image_dim_ordering() == "th":
logging.debug("image_dim_ordering = 'th'")
self._channel_axis = 1
self._input_shape = (3, image_size, image_size)
else:
logging.debug("image_dim_ordering = 'tf'")
self._channel_axis = -1
self._input_shape = (image_size, image_size, 3)
self.num_classes = num_classes
self.stage_num = stage_num
self.lambda_d = lambda_d
def __call__(self):
logging.debug("Creating model...")
img_inputs = layers.Input(self._input_shape)
# -------------------------------------------------------------------------------------------------------------------------
x = SeparableConv2D(16, (3, 3), padding="same")(img_inputs)
x = BatchNormalization(axis=-1)(x)
x = layers.Activation("relu")(x)
x_layer1 = AveragePooling2D((2, 2))(x)
x = SeparableConv2D(32, (3, 3), padding="same")(x_layer1)
x = BatchNormalization(axis=-1)(x)
x = layers.Activation("relu")(x)
x = SeparableConv2D(32, (3, 3), padding="same")(x)
x = BatchNormalization(axis=-1)(x)
x = layers.Activation("relu")(x)
x_layer2 = AveragePooling2D((2, 2))(x)
x = SeparableConv2D(64, (3, 3), padding="same")(x_layer2)
x = BatchNormalization(axis=-1)(x)
x = layers.Activation("relu")(x)
x = SeparableConv2D(64, (3, 3), padding="same")(x)
x = BatchNormalization(axis=-1)(x)
x = layers.Activation("relu")(x)
x_layer3 = AveragePooling2D((2, 2))(x)
x = SeparableConv2D(128, (3, 3), padding="same")(x_layer3)
x = BatchNormalization(axis=-1)(x)
x = layers.Activation("relu")(x)
x = SeparableConv2D(128, (3, 3), padding="same")(x)
x = BatchNormalization(axis=-1)(x)
x_layer4 = layers.Activation("relu")(x)
# -------------------------------------------------------------------------------------------------------------------------
s = SeparableConv2D(16, (3, 3), padding="same")(img_inputs)
s = BatchNormalization(axis=-1)(s)
s = layers.Activation("tanh")(s)
s_layer1 = MaxPooling2D((2, 2))(s)
s = SeparableConv2D(32, (3, 3), padding="same")(s_layer1)
s = BatchNormalization(axis=-1)(s)
s = layers.Activation("tanh")(s)
s = SeparableConv2D(32, (3, 3), padding="same")(s)
s = BatchNormalization(axis=-1)(s)
s = layers.Activation("tanh")(s)
s_layer2 = MaxPooling2D((2, 2))(s)
s = SeparableConv2D(64, (3, 3), padding="same")(s_layer2)
s = BatchNormalization(axis=-1)(s)
s = layers.Activation("tanh")(s)
s = SeparableConv2D(64, (3, 3), padding="same")(s)
s = BatchNormalization(axis=-1)(s)
s = layers.Activation("tanh")(s)
s_layer3 = MaxPooling2D((2, 2))(s)
s = SeparableConv2D(128, (3, 3), padding="same")(s_layer3)
s = BatchNormalization(axis=-1)(s)
s = layers.Activation("tanh")(s)
s = SeparableConv2D(128, (3, 3), padding="same")(s)
s = BatchNormalization(axis=-1)(s)
s_layer4 = layers.Activation("tanh")(s)
# -------------------------------------------------------------------------------------------------------------------------
# Classifier block
s_layer4 = Conv2D(64, (1, 1), activation="tanh")(s_layer4)
s_layer4 = MaxPooling2D((2, 2))(s_layer4)
x_layer4 = Conv2D(64, (1, 1), activation="relu")(x_layer4)
x_layer4 = AveragePooling2D((2, 2))(x_layer4)
feat_s1_pre = layers.Multiply()([s_layer4, x_layer4])
feat_s1_pre = layers.Flatten()(feat_s1_pre)
feat_delta_s1 = layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s1_pre
)
delta_s1 = layers.Dense(self.num_classes, activation="tanh", name="delta_s1")(
feat_delta_s1
)
feat_local_s1 = layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s1_pre
)
local_s1 = layers.Dense(
units=self.num_classes, activation="tanh", name="local_delta_stage1"
)(feat_local_s1)
feat_pred_s1 = layers.Dense(
self.stage_num[0] * self.num_classes, activation="relu"
)(feat_s1_pre)
pred_a_s1 = layers.Reshape((self.num_classes, self.stage_num[0]))(feat_pred_s1)
# -------------------------------------------------------------------------------------------------------------------------
s_layer3 = Conv2D(64, (1, 1), activation="tanh")(s_layer3)
s_layer3 = MaxPooling2D((2, 2))(s_layer3)
x_layer3 = Conv2D(64, (1, 1), activation="relu")(x_layer3)
x_layer3 = AveragePooling2D((2, 2))(x_layer3)
feat_s2_pre = layers.Multiply()([s_layer3, x_layer3])
feat_s2_pre = layers.Flatten()(feat_s2_pre)
feat_delta_s2 = layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s2_pre
)
delta_s2 = layers.Dense(self.num_classes, activation="tanh", name="delta_s2")(
feat_delta_s2
)
feat_local_s2 = layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s2_pre
)
local_s2 = layers.Dense(
units=self.num_classes, activation="tanh", name="local_delta_stage2"
)(feat_local_s2)
feat_pred_s2 = layers.Dense(
self.stage_num[1] * self.num_classes, activation="relu"
)(feat_s2_pre)
pred_a_s2 = layers.Reshape((self.num_classes, self.stage_num[1]))(feat_pred_s2)
# -------------------------------------------------------------------------------------------------------------------------
s_layer2 = Conv2D(64, (1, 1), activation="tanh")(s_layer2)
s_layer2 = MaxPooling2D((2, 2))(s_layer2)
x_layer2 = Conv2D(64, (1, 1), activation="relu")(x_layer2)
x_layer2 = AveragePooling2D((2, 2))(x_layer2)
feat_s3_pre = layers.Multiply()([s_layer2, x_layer2])
feat_s3_pre = layers.Flatten()(feat_s3_pre)
feat_delta_s3 = layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s3_pre
)
delta_s3 = layers.Dense(self.num_classes, activation="tanh", name="delta_s3")(
feat_delta_s3
)
feat_local_s3 = layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s3_pre
)
local_s3 = layers.Dense(
units=self.num_classes, activation="tanh", name="local_delta_stage3"
)(feat_local_s3)
feat_pred_s3 = layers.Dense(
self.stage_num[2] * self.num_classes, activation="relu"
)(feat_s3_pre)
pred_a_s3 = layers.Reshape((self.num_classes, self.stage_num[2]))(feat_pred_s3)
# -------------------------------------------------------------------------------------------------------------------------
def SSR_module(x, s1, s2, s3, lambda_d):
a = x[0][:, :, 0] * 0
b = x[0][:, :, 0] * 0
c = x[0][:, :, 0] * 0
di = s1 // 2
dj = s2 // 2
dk = s3 // 2
V = 99
# lambda_d = 0.9
for i in range(0, s1):
a = a + (i - di + x[6]) * x[0][:, :, i]
# a = K.expand_dims(a,-1)
a = a / (s1 * (1 + lambda_d * x[3]))
for j in range(0, s2):
b = b + (j - dj + x[7]) * x[1][:, :, j]
# b = K.expand_dims(b,-1)
b = b / (s1 * (1 + lambda_d * x[3])) / (s2 * (1 + lambda_d * x[4]))
for k in range(0, s3):
c = c + (k - dk + x[8]) * x[2][:, :, k]
# c = K.expand_dims(c,-1)
c = (
c
/ (s1 * (1 + lambda_d * x[3]))
/ (s2 * (1 + lambda_d * x[4]))
/ (s3 * (1 + lambda_d * x[5]))
)
pred = (a + b + c) * V
return pred
pred_pose = layers.Lambda(
SSR_module,
arguments={
"s1": self.stage_num[0],
"s2": self.stage_num[1],
"s3": self.stage_num[2],
"lambda_d": self.lambda_d,
},
name="pred_pose",
)(
[
pred_a_s1,
pred_a_s2,
pred_a_s3,
delta_s1,
delta_s2,
delta_s3,
local_s1,
local_s2,
local_s3,
]
)
model = Model(inputs=img_inputs, outputs=pred_pose)
return model
class SSR_net_ori_MT:
def __init__(self, image_size, num_classes, stage_num, lambda_d):
if K.image_dim_ordering() == "th":
logging.debug("image_dim_ordering = 'th'")
self._channel_axis = 1
self._input_shape = (3, image_size, image_size)
else:
logging.debug("image_dim_ordering = 'tf'")
self._channel_axis = -1
self._input_shape = (image_size, image_size, 3)
self.num_classes = num_classes
self.stage_num = stage_num
self.lambda_d = lambda_d
def __call__(self):
logging.debug("Creating model...")
img_inputs = layers.Input(self._input_shape)
# -------------------------------------------------------------------------------------------------------------------------
x = Conv2D(32, (3, 3), padding="same")(img_inputs)
x = BatchNormalization(axis=self._channel_axis)(x)
x = layers.Activation("relu")(x)
x_layer1 = AveragePooling2D(2, 2)(x)
x = Conv2D(32, (3, 3), padding="same")(x_layer1)
x = BatchNormalization(axis=self._channel_axis)(x)
x = layers.Activation("relu")(x)
x_layer2 = AveragePooling2D(2, 2)(x)
x = Conv2D(32, (3, 3), padding="same")(x_layer2)
x = BatchNormalization(axis=self._channel_axis)(x)
x = layers.Activation("relu")(x)
x_layer3 = AveragePooling2D(2, 2)(x)
x = Conv2D(32, (3, 3), padding="same")(x_layer3)
x = BatchNormalization(axis=self._channel_axis)(x)
x_layer4 = layers.Activation("relu")(x)
# -------------------------------------------------------------------------------------------------------------------------
s = Conv2D(16, (3, 3), padding="same")(img_inputs)
s = BatchNormalization(axis=self._channel_axis)(s)
s = layers.Activation("tanh")(s)
s_layer1 = MaxPooling2D(2, 2)(s)
s = Conv2D(16, (3, 3), padding="same")(s_layer1)
s = BatchNormalization(axis=self._channel_axis)(s)
s = layers.Activation("tanh")(s)
s_layer2 = MaxPooling2D(2, 2)(s)
s = Conv2D(16, (3, 3), padding="same")(s_layer2)
s = BatchNormalization(axis=self._channel_axis)(s)
s = layers.Activation("tanh")(s)
s_layer3 = MaxPooling2D(2, 2)(s)
s = Conv2D(16, (3, 3), padding="same")(s_layer3)
s = BatchNormalization(axis=self._channel_axis)(s)
s_layer4 = layers.Activation("tanh")(s)
# -------------------------------------------------------------------------------------------------------------------------
# Classifier block
s_layer4 = Conv2D(64, (1, 1), activation="tanh")(s_layer4)
s_layer4 = MaxPooling2D((2, 2))(s_layer4)
x_layer4 = Conv2D(64, (1, 1), activation="relu")(x_layer4)
x_layer4 = AveragePooling2D((2, 2))(x_layer4)
feat_s1_pre = layers.Multiply()([s_layer4, x_layer4])
feat_s1_pre = layers.Flatten()(feat_s1_pre)
feat_delta_s1 = layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s1_pre
)
delta_s1 = layers.Dense(self.num_classes, activation="tanh", name="delta_s1")(
feat_delta_s1
)
feat_local_s1 = layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s1_pre
)
local_s1 = layers.Dense(
units=self.num_classes, activation="tanh", name="local_delta_stage1"
)(feat_local_s1)
feat_pred_s1 = layers.Dense(
self.stage_num[0] * self.num_classes, activation="relu"
)(feat_s1_pre)
pred_a_s1 = layers.Reshape((self.num_classes, self.stage_num[0]))(feat_pred_s1)
# -------------------------------------------------------------------------------------------------------------------------
s_layer3 = Conv2D(64, (1, 1), activation="tanh")(s_layer3)
s_layer3 = MaxPooling2D((2, 2))(s_layer3)
x_layer3 = Conv2D(64, (1, 1), activation="relu")(x_layer3)
x_layer3 = AveragePooling2D((2, 2))(x_layer3)
feat_s2_pre = layers.Multiply()([s_layer3, x_layer3])
feat_s2_pre = layers.Flatten()(feat_s2_pre)
feat_delta_s2 = layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s2_pre
)
delta_s2 = layers.Dense(self.num_classes, activation="tanh", name="delta_s2")(
feat_delta_s2
)
feat_local_s2 = layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s2_pre
)
local_s2 = layers.Dense(
units=self.num_classes, activation="tanh", name="local_delta_stage2"
)(feat_local_s2)
feat_pred_s2 = layers.Dense(
self.stage_num[1] * self.num_classes, activation="relu"
)(feat_s2_pre)
pred_a_s2 = layers.Reshape((self.num_classes, self.stage_num[1]))(feat_pred_s2)
# -------------------------------------------------------------------------------------------------------------------------
s_layer2 = Conv2D(64, (1, 1), activation="tanh")(s_layer2)
s_layer2 = MaxPooling2D((2, 2))(s_layer2)
x_layer2 = Conv2D(64, (1, 1), activation="relu")(x_layer2)
x_layer2 = AveragePooling2D((2, 2))(x_layer2)
feat_s3_pre = layers.Multiply()([s_layer2, x_layer2])
feat_s3_pre = layers.Flatten()(feat_s3_pre)
feat_delta_s3 = layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s3_pre
)
delta_s3 = layers.Dense(self.num_classes, activation="tanh", name="delta_s3")(
feat_delta_s3
)
feat_local_s3 = layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s3_pre
)
local_s3 = layers.Dense(
units=self.num_classes, activation="tanh", name="local_delta_stage3"
)(feat_local_s3)
feat_pred_s3 = layers.Dense(
self.stage_num[2] * self.num_classes, activation="relu"
)(feat_s3_pre)
pred_a_s3 = layers.Reshape((self.num_classes, self.stage_num[2]))(feat_pred_s3)
# -------------------------------------------------------------------------------------------------------------------------
def SSR_module(x, s1, s2, s3, lambda_d):
a = x[0][:, :, 0] * 0
b = x[0][:, :, 0] * 0
c = x[0][:, :, 0] * 0
di = s1 // 2
dj = s2 // 2
dk = s3 // 2
V = 99
# lambda_d = 0.9
for i in range(0, s1):
a = a + (i - di + x[6]) * x[0][:, :, i]
# a = K.expand_dims(a,-1)
a = a / (s1 * (1 + lambda_d * x[3]))
for j in range(0, s2):
b = b + (j - dj + x[7]) * x[1][:, :, j]
# b = K.expand_dims(b,-1)
b = b / (s1 * (1 + lambda_d * x[3])) / (s2 * (1 + lambda_d * x[4]))
for k in range(0, s3):
c = c + (k - dk + x[8]) * x[2][:, :, k]
# c = K.expand_dims(c,-1)
c = (
c
/ (s1 * (1 + lambda_d * x[3]))
/ (s2 * (1 + lambda_d * x[4]))
/ (s3 * (1 + lambda_d * x[5]))
)
pred = (a + b + c) * V
return pred
pred_pose = layers.Lambda(
SSR_module,
arguments={
"s1": self.stage_num[0],
"s2": self.stage_num[1],
"s3": self.stage_num[2],
"lambda_d": self.lambda_d,
},
name="pred_pose",
)(
[
pred_a_s1,
pred_a_s2,
pred_a_s3,
delta_s1,
delta_s2,
delta_s3,
local_s1,
local_s2,
local_s3,
]
)
model = Model(inputs=img_inputs, outputs=pred_pose)
return model
|
[
"numpy.random.seed",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Multiply",
"sys.setrecursionlimit",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.backend.expand_dims",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.SeparableConv2D",
"tensorflow.keras.layers.AveragePooling2D",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.models.Model",
"logging.debug",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.backend.image_dim_ordering",
"tensorflow.keras.layers.Lambda"
] |
[((304, 334), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(2 ** 20)'], {}), '(2 ** 20)\n', (325, 334), False, 'import sys\n'), ((335, 358), 'numpy.random.seed', 'np.random.seed', (['(2 ** 10)'], {}), '(2 ** 10)\n', (349, 358), True, 'import numpy as np\n'), ((949, 983), 'logging.debug', 'logging.debug', (['"""Creating model..."""'], {}), "('Creating model...')\n", (962, 983), False, 'import logging\n'), ((1002, 1039), 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': 'self._input_shape'}), '(shape=self._input_shape)\n', (1014, 1039), False, 'from tensorflow.keras import layers\n'), ((8753, 8789), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'pred_a'}), '(inputs=inputs, outputs=pred_a)\n', (8758, 8789), False, 'from tensorflow.keras.models import Model\n'), ((9410, 9444), 'logging.debug', 'logging.debug', (['"""Creating model..."""'], {}), "('Creating model...')\n", (9423, 9444), False, 'import logging\n'), ((9463, 9500), 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': 'self._input_shape'}), '(shape=self._input_shape)\n', (9475, 9500), False, 'from tensorflow.keras import layers\n'), ((16756, 16790), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'pred'}), '(inputs=inputs, outputs=pred)\n', (16761, 16790), False, 'from tensorflow.keras.models import Model\n'), ((17403, 17437), 'logging.debug', 'logging.debug', (['"""Creating model..."""'], {}), "('Creating model...')\n", (17416, 17437), False, 'import logging\n'), ((17460, 17491), 'tensorflow.keras.layers.Input', 'layers.Input', (['self._input_shape'], {}), '(self._input_shape)\n', (17472, 17491), False, 'from tensorflow.keras import layers\n'), ((25488, 25531), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'img_inputs', 'outputs': 'pred_pose'}), '(inputs=img_inputs, outputs=pred_pose)\n', (25493, 25531), False, 'from tensorflow.keras.models import Model\n'), ((26148, 26182), 'logging.debug', 'logging.debug', (['"""Creating model..."""'], {}), "('Creating model...')\n", (26161, 26182), False, 'import logging\n'), ((26205, 26236), 'tensorflow.keras.layers.Input', 'layers.Input', (['self._input_shape'], {}), '(self._input_shape)\n', (26217, 26236), False, 'from tensorflow.keras import layers\n'), ((33415, 33458), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'img_inputs', 'outputs': 'pred_pose'}), '(inputs=img_inputs, outputs=pred_pose)\n', (33420, 33458), False, 'from tensorflow.keras.models import Model\n'), ((459, 481), 'tensorflow.keras.backend.image_dim_ordering', 'K.image_dim_ordering', ([], {}), '()\n', (479, 481), True, 'from tensorflow.keras import backend as K\n'), ((503, 545), 'logging.debug', 'logging.debug', (['"""image_dim_ordering = \'th\'"""'], {}), '("image_dim_ordering = \'th\'")\n', (516, 545), False, 'import logging\n'), ((667, 709), 'logging.debug', 'logging.debug', (['"""image_dim_ordering = \'tf\'"""'], {}), '("image_dim_ordering = \'tf\'")\n', (680, 709), False, 'import logging\n'), ((1185, 1203), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {}), '(32, (3, 3))\n', (1191, 1203), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((1224, 1267), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'self._channel_axis'}), '(axis=self._channel_axis)\n', (1242, 1267), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((1283, 1308), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (1300, 1308), False, 'from tensorflow.keras import layers\n'), ((1331, 1353), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (1347, 1353), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((1369, 1387), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {}), '(32, (3, 3))\n', (1375, 1387), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((1410, 1453), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'self._channel_axis'}), '(axis=self._channel_axis)\n', (1428, 1453), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((1469, 1494), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (1486, 1494), False, 'from tensorflow.keras import layers\n'), ((1517, 1539), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (1533, 1539), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((1555, 1573), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {}), '(32, (3, 3))\n', (1561, 1573), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((1596, 1639), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'self._channel_axis'}), '(axis=self._channel_axis)\n', (1614, 1639), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((1655, 1680), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (1672, 1680), False, 'from tensorflow.keras import layers\n'), ((1703, 1725), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (1719, 1725), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((1741, 1759), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {}), '(32, (3, 3))\n', (1747, 1759), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((1782, 1825), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'self._channel_axis'}), '(axis=self._channel_axis)\n', (1800, 1825), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((1841, 1866), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (1858, 1866), False, 'from tensorflow.keras import layers\n'), ((2014, 2032), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {}), '(16, (3, 3))\n', (2020, 2032), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((2053, 2096), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'self._channel_axis'}), '(axis=self._channel_axis)\n', (2071, 2096), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((2112, 2137), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""tanh"""'], {}), "('tanh')\n", (2129, 2137), False, 'from tensorflow.keras import layers\n'), ((2160, 2178), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (2172, 2178), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((2194, 2212), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {}), '(16, (3, 3))\n', (2200, 2212), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((2235, 2278), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'self._channel_axis'}), '(axis=self._channel_axis)\n', (2253, 2278), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((2294, 2319), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""tanh"""'], {}), "('tanh')\n", (2311, 2319), False, 'from tensorflow.keras import layers\n'), ((2342, 2360), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (2354, 2360), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((2376, 2394), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {}), '(16, (3, 3))\n', (2382, 2394), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((2417, 2460), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'self._channel_axis'}), '(axis=self._channel_axis)\n', (2435, 2460), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((2476, 2501), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""tanh"""'], {}), "('tanh')\n", (2493, 2501), False, 'from tensorflow.keras import layers\n'), ((2524, 2542), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (2536, 2542), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((2558, 2576), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {}), '(16, (3, 3))\n', (2564, 2576), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((2599, 2642), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'self._channel_axis'}), '(axis=self._channel_axis)\n', (2617, 2642), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((2658, 2683), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""tanh"""'], {}), "('tanh')\n", (2675, 2683), False, 'from tensorflow.keras import layers\n'), ((2866, 2903), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(10)', '(1, 1)'], {'activation': '"""relu"""'}), "(10, (1, 1), activation='relu')\n", (2872, 2903), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((2926, 2942), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (2940, 2942), False, 'from tensorflow.keras import layers\n'), ((2976, 2995), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (2990, 2995), False, 'from tensorflow.keras import layers\n'), ((3029, 3085), 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': 'self.stage_num[0]', 'activation': '"""relu"""'}), "(units=self.stage_num[0], activation='relu')\n", (3041, 3085), False, 'from tensorflow.keras import layers\n'), ((3142, 3179), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(10)', '(1, 1)'], {'activation': '"""relu"""'}), "(10, (1, 1), activation='relu')\n", (3148, 3179), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((3202, 3218), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (3216, 3218), False, 'from tensorflow.keras import layers\n'), ((3252, 3271), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (3266, 3271), False, 'from tensorflow.keras import layers\n'), ((3305, 3361), 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': 'self.stage_num[0]', 'activation': '"""relu"""'}), "(units=self.stage_num[0], activation='relu')\n", (3317, 3361), False, 'from tensorflow.keras import layers\n'), ((3423, 3440), 'tensorflow.keras.layers.Multiply', 'layers.Multiply', ([], {}), '()\n', (3438, 3440), False, 'from tensorflow.keras import layers\n'), ((3482, 3533), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {'activation': '"""tanh"""', 'name': '"""delta_s1"""'}), "(1, activation='tanh', name='delta_s1')\n", (3494, 3533), False, 'from tensorflow.keras import layers\n'), ((3570, 3587), 'tensorflow.keras.layers.Multiply', 'layers.Multiply', ([], {}), '()\n', (3585, 3587), False, 'from tensorflow.keras import layers\n'), ((3638, 3692), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(2 * self.stage_num[0])'], {'activation': '"""relu"""'}), "(2 * self.stage_num[0], activation='relu')\n", (3650, 3692), False, 'from tensorflow.keras import layers\n'), ((3724, 3809), 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': 'self.stage_num[0]', 'activation': '"""relu"""', 'name': '"""pred_age_stage1"""'}), "(units=self.stage_num[0], activation='relu', name='pred_age_stage1'\n )\n", (3736, 3809), False, 'from tensorflow.keras import layers\n'), ((3976, 4064), 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': 'self.stage_num[0]', 'activation': '"""tanh"""', 'name': '"""local_delta_stage1"""'}), "(units=self.stage_num[0], activation='tanh', name=\n 'local_delta_stage1')\n", (3988, 4064), False, 'from tensorflow.keras import layers\n'), ((4245, 4282), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(10)', '(1, 1)'], {'activation': '"""relu"""'}), "(10, (1, 1), activation='relu')\n", (4251, 4282), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((4312, 4330), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(4)', '(4)'], {}), '(4, 4)\n', (4324, 4330), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((4360, 4376), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (4374, 4376), False, 'from tensorflow.keras import layers\n'), ((4410, 4429), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (4424, 4429), False, 'from tensorflow.keras import layers\n'), ((4463, 4513), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['self.stage_num[1]'], {'activation': '"""relu"""'}), "(self.stage_num[1], activation='relu')\n", (4475, 4513), False, 'from tensorflow.keras import layers\n'), ((4548, 4585), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(10)', '(1, 1)'], {'activation': '"""relu"""'}), "(10, (1, 1), activation='relu')\n", (4554, 4585), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((4615, 4637), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', (['(4)', '(4)'], {}), '(4, 4)\n', (4631, 4637), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((4667, 4683), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (4681, 4683), False, 'from tensorflow.keras import layers\n'), ((4717, 4736), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (4731, 4736), False, 'from tensorflow.keras import layers\n'), ((4770, 4820), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['self.stage_num[1]'], {'activation': '"""relu"""'}), "(self.stage_num[1], activation='relu')\n", (4782, 4820), False, 'from tensorflow.keras import layers\n'), ((4860, 4877), 'tensorflow.keras.layers.Multiply', 'layers.Multiply', ([], {}), '()\n', (4875, 4877), False, 'from tensorflow.keras import layers\n'), ((4919, 4970), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {'activation': '"""tanh"""', 'name': '"""delta_s2"""'}), "(1, activation='tanh', name='delta_s2')\n", (4931, 4970), False, 'from tensorflow.keras import layers\n'), ((5007, 5024), 'tensorflow.keras.layers.Multiply', 'layers.Multiply', ([], {}), '()\n', (5022, 5024), False, 'from tensorflow.keras import layers\n'), ((5075, 5129), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(2 * self.stage_num[1])'], {'activation': '"""relu"""'}), "(2 * self.stage_num[1], activation='relu')\n", (5087, 5129), False, 'from tensorflow.keras import layers\n'), ((5161, 5246), 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': 'self.stage_num[1]', 'activation': '"""relu"""', 'name': '"""pred_age_stage2"""'}), "(units=self.stage_num[1], activation='relu', name='pred_age_stage2'\n )\n", (5173, 5246), False, 'from tensorflow.keras import layers\n'), ((5413, 5501), 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': 'self.stage_num[1]', 'activation': '"""tanh"""', 'name': '"""local_delta_stage2"""'}), "(units=self.stage_num[1], activation='tanh', name=\n 'local_delta_stage2')\n", (5425, 5501), False, 'from tensorflow.keras import layers\n'), ((5682, 5719), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(10)', '(1, 1)'], {'activation': '"""relu"""'}), "(10, (1, 1), activation='relu')\n", (5688, 5719), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((5749, 5767), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(8)', '(8)'], {}), '(8, 8)\n', (5761, 5767), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((5797, 5813), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (5811, 5813), False, 'from tensorflow.keras import layers\n'), ((5847, 5866), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (5861, 5866), False, 'from tensorflow.keras import layers\n'), ((5900, 5950), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['self.stage_num[2]'], {'activation': '"""relu"""'}), "(self.stage_num[2], activation='relu')\n", (5912, 5950), False, 'from tensorflow.keras import layers\n'), ((5985, 6022), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(10)', '(1, 1)'], {'activation': '"""relu"""'}), "(10, (1, 1), activation='relu')\n", (5991, 6022), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((6052, 6074), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', (['(8)', '(8)'], {}), '(8, 8)\n', (6068, 6074), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((6104, 6120), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (6118, 6120), False, 'from tensorflow.keras import layers\n'), ((6154, 6173), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (6168, 6173), False, 'from tensorflow.keras import layers\n'), ((6207, 6257), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['self.stage_num[2]'], {'activation': '"""relu"""'}), "(self.stage_num[2], activation='relu')\n", (6219, 6257), False, 'from tensorflow.keras import layers\n'), ((6297, 6314), 'tensorflow.keras.layers.Multiply', 'layers.Multiply', ([], {}), '()\n', (6312, 6314), False, 'from tensorflow.keras import layers\n'), ((6356, 6407), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {'activation': '"""tanh"""', 'name': '"""delta_s3"""'}), "(1, activation='tanh', name='delta_s3')\n", (6368, 6407), False, 'from tensorflow.keras import layers\n'), ((6444, 6461), 'tensorflow.keras.layers.Multiply', 'layers.Multiply', ([], {}), '()\n', (6459, 6461), False, 'from tensorflow.keras import layers\n'), ((6512, 6566), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(2 * self.stage_num[2])'], {'activation': '"""relu"""'}), "(2 * self.stage_num[2], activation='relu')\n", (6524, 6566), False, 'from tensorflow.keras import layers\n'), ((6598, 6683), 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': 'self.stage_num[2]', 'activation': '"""relu"""', 'name': '"""pred_age_stage3"""'}), "(units=self.stage_num[2], activation='relu', name='pred_age_stage3'\n )\n", (6610, 6683), False, 'from tensorflow.keras import layers\n'), ((6850, 6938), 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': 'self.stage_num[2]', 'activation': '"""tanh"""', 'name': '"""local_delta_stage3"""'}), "(units=self.stage_num[2], activation='tanh', name=\n 'local_delta_stage3')\n", (6862, 6938), False, 'from tensorflow.keras import layers\n'), ((7428, 7448), 'tensorflow.keras.backend.expand_dims', 'K.expand_dims', (['a', '(-1)'], {}), '(a, -1)\n', (7441, 7448), True, 'from tensorflow.keras import backend as K\n'), ((7619, 7639), 'tensorflow.keras.backend.expand_dims', 'K.expand_dims', (['b', '(-1)'], {}), '(b, -1)\n', (7632, 7639), True, 'from tensorflow.keras import backend as K\n'), ((7841, 7861), 'tensorflow.keras.backend.expand_dims', 'K.expand_dims', (['c', '(-1)'], {}), '(c, -1)\n', (7854, 7861), True, 'from tensorflow.keras import backend as K\n'), ((8129, 8327), 'tensorflow.keras.layers.Lambda', 'layers.Lambda', (['merge_age'], {'arguments': "{'s1': self.stage_num[0], 's2': self.stage_num[1], 's3': self.stage_num[2],\n 'lambda_local': self.lambda_local, 'lambda_d': self.lambda_d}", 'name': '"""pred_a"""'}), "(merge_age, arguments={'s1': self.stage_num[0], 's2': self.\n stage_num[1], 's3': self.stage_num[2], 'lambda_local': self.\n lambda_local, 'lambda_d': self.lambda_d}, name='pred_a')\n", (8142, 8327), False, 'from tensorflow.keras import layers\n'), ((8920, 8942), 'tensorflow.keras.backend.image_dim_ordering', 'K.image_dim_ordering', ([], {}), '()\n', (8940, 8942), True, 'from tensorflow.keras import backend as K\n'), ((8964, 9006), 'logging.debug', 'logging.debug', (['"""image_dim_ordering = \'th\'"""'], {}), '("image_dim_ordering = \'th\'")\n', (8977, 9006), False, 'import logging\n'), ((9128, 9170), 'logging.debug', 'logging.debug', (['"""image_dim_ordering = \'tf\'"""'], {}), '("image_dim_ordering = \'tf\'")\n', (9141, 9170), False, 'import logging\n'), ((9646, 9664), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {}), '(32, (3, 3))\n', (9652, 9664), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((9685, 9728), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'self._channel_axis'}), '(axis=self._channel_axis)\n', (9703, 9728), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((9744, 9769), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (9761, 9769), False, 'from tensorflow.keras import layers\n'), ((9792, 9814), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (9808, 9814), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((9830, 9848), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {}), '(32, (3, 3))\n', (9836, 9848), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((9871, 9914), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'self._channel_axis'}), '(axis=self._channel_axis)\n', (9889, 9914), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((9930, 9955), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (9947, 9955), False, 'from tensorflow.keras import layers\n'), ((9978, 10000), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (9994, 10000), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((10016, 10034), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {}), '(32, (3, 3))\n', (10022, 10034), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((10057, 10100), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'self._channel_axis'}), '(axis=self._channel_axis)\n', (10075, 10100), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((10116, 10141), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (10133, 10141), False, 'from tensorflow.keras import layers\n'), ((10164, 10186), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (10180, 10186), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((10202, 10220), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {}), '(32, (3, 3))\n', (10208, 10220), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((10243, 10286), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'self._channel_axis'}), '(axis=self._channel_axis)\n', (10261, 10286), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((10302, 10327), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (10319, 10327), False, 'from tensorflow.keras import layers\n'), ((10475, 10493), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {}), '(16, (3, 3))\n', (10481, 10493), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((10514, 10557), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'self._channel_axis'}), '(axis=self._channel_axis)\n', (10532, 10557), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((10573, 10598), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""tanh"""'], {}), "('tanh')\n", (10590, 10598), False, 'from tensorflow.keras import layers\n'), ((10621, 10639), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (10633, 10639), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((10655, 10673), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {}), '(16, (3, 3))\n', (10661, 10673), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((10696, 10739), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'self._channel_axis'}), '(axis=self._channel_axis)\n', (10714, 10739), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((10755, 10780), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""tanh"""'], {}), "('tanh')\n", (10772, 10780), False, 'from tensorflow.keras import layers\n'), ((10803, 10821), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (10815, 10821), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((10837, 10855), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {}), '(16, (3, 3))\n', (10843, 10855), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((10878, 10921), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'self._channel_axis'}), '(axis=self._channel_axis)\n', (10896, 10921), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((10937, 10962), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""tanh"""'], {}), "('tanh')\n", (10954, 10962), False, 'from tensorflow.keras import layers\n'), ((10985, 11003), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (10997, 11003), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((11019, 11037), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {}), '(16, (3, 3))\n', (11025, 11037), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((11060, 11103), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'self._channel_axis'}), '(axis=self._channel_axis)\n', (11078, 11103), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((11119, 11144), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""tanh"""'], {}), "('tanh')\n", (11136, 11144), False, 'from tensorflow.keras import layers\n'), ((11327, 11364), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(10)', '(1, 1)'], {'activation': '"""relu"""'}), "(10, (1, 1), activation='relu')\n", (11333, 11364), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((11387, 11403), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (11401, 11403), False, 'from tensorflow.keras import layers\n'), ((11437, 11456), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (11451, 11456), False, 'from tensorflow.keras import layers\n'), ((11490, 11546), 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': 'self.stage_num[0]', 'activation': '"""relu"""'}), "(units=self.stage_num[0], activation='relu')\n", (11502, 11546), False, 'from tensorflow.keras import layers\n'), ((11603, 11640), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(10)', '(1, 1)'], {'activation': '"""relu"""'}), "(10, (1, 1), activation='relu')\n", (11609, 11640), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((11663, 11679), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (11677, 11679), False, 'from tensorflow.keras import layers\n'), ((11713, 11732), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (11727, 11732), False, 'from tensorflow.keras import layers\n'), ((11766, 11822), 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': 'self.stage_num[0]', 'activation': '"""relu"""'}), "(units=self.stage_num[0], activation='relu')\n", (11778, 11822), False, 'from tensorflow.keras import layers\n'), ((11882, 11899), 'tensorflow.keras.layers.Multiply', 'layers.Multiply', ([], {}), '()\n', (11897, 11899), False, 'from tensorflow.keras import layers\n'), ((11941, 11992), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {'activation': '"""tanh"""', 'name': '"""delta_s1"""'}), "(1, activation='tanh', name='delta_s1')\n", (11953, 11992), False, 'from tensorflow.keras import layers\n'), ((12025, 12042), 'tensorflow.keras.layers.Multiply', 'layers.Multiply', ([], {}), '()\n', (12040, 12042), False, 'from tensorflow.keras import layers\n'), ((12091, 12145), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(2 * self.stage_num[0])'], {'activation': '"""relu"""'}), "(2 * self.stage_num[0], activation='relu')\n", (12103, 12145), False, 'from tensorflow.keras import layers\n'), ((12173, 12249), 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': 'self.stage_num[0]', 'activation': '"""relu"""', 'name': '"""pred_stage1"""'}), "(units=self.stage_num[0], activation='relu', name='pred_stage1')\n", (12185, 12249), False, 'from tensorflow.keras import layers\n'), ((12300, 12388), 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': 'self.stage_num[0]', 'activation': '"""tanh"""', 'name': '"""local_delta_stage1"""'}), "(units=self.stage_num[0], activation='tanh', name=\n 'local_delta_stage1')\n", (12312, 12388), False, 'from tensorflow.keras import layers\n'), ((12567, 12604), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(10)', '(1, 1)'], {'activation': '"""relu"""'}), "(10, (1, 1), activation='relu')\n", (12573, 12604), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((12634, 12652), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(4)', '(4)'], {}), '(4, 4)\n', (12646, 12652), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((12682, 12698), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (12696, 12698), False, 'from tensorflow.keras import layers\n'), ((12732, 12751), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (12746, 12751), False, 'from tensorflow.keras import layers\n'), ((12785, 12835), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['self.stage_num[1]'], {'activation': '"""relu"""'}), "(self.stage_num[1], activation='relu')\n", (12797, 12835), False, 'from tensorflow.keras import layers\n'), ((12870, 12907), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(10)', '(1, 1)'], {'activation': '"""relu"""'}), "(10, (1, 1), activation='relu')\n", (12876, 12907), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((12937, 12959), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', (['(4)', '(4)'], {}), '(4, 4)\n', (12953, 12959), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((12989, 13005), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (13003, 13005), False, 'from tensorflow.keras import layers\n'), ((13039, 13058), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (13053, 13058), False, 'from tensorflow.keras import layers\n'), ((13092, 13142), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['self.stage_num[1]'], {'activation': '"""relu"""'}), "(self.stage_num[1], activation='relu')\n", (13104, 13142), False, 'from tensorflow.keras import layers\n'), ((13180, 13197), 'tensorflow.keras.layers.Multiply', 'layers.Multiply', ([], {}), '()\n', (13195, 13197), False, 'from tensorflow.keras import layers\n'), ((13239, 13290), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {'activation': '"""tanh"""', 'name': '"""delta_s2"""'}), "(1, activation='tanh', name='delta_s2')\n", (13251, 13290), False, 'from tensorflow.keras import layers\n'), ((13323, 13340), 'tensorflow.keras.layers.Multiply', 'layers.Multiply', ([], {}), '()\n', (13338, 13340), False, 'from tensorflow.keras import layers\n'), ((13389, 13443), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(2 * self.stage_num[1])'], {'activation': '"""relu"""'}), "(2 * self.stage_num[1], activation='relu')\n", (13401, 13443), False, 'from tensorflow.keras import layers\n'), ((13471, 13547), 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': 'self.stage_num[1]', 'activation': '"""relu"""', 'name': '"""pred_stage2"""'}), "(units=self.stage_num[1], activation='relu', name='pred_stage2')\n", (13483, 13547), False, 'from tensorflow.keras import layers\n'), ((13598, 13686), 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': 'self.stage_num[1]', 'activation': '"""tanh"""', 'name': '"""local_delta_stage2"""'}), "(units=self.stage_num[1], activation='tanh', name=\n 'local_delta_stage2')\n", (13610, 13686), False, 'from tensorflow.keras import layers\n'), ((13865, 13902), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(10)', '(1, 1)'], {'activation': '"""relu"""'}), "(10, (1, 1), activation='relu')\n", (13871, 13902), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((13932, 13950), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(8)', '(8)'], {}), '(8, 8)\n', (13944, 13950), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((13980, 13996), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (13994, 13996), False, 'from tensorflow.keras import layers\n'), ((14030, 14049), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (14044, 14049), False, 'from tensorflow.keras import layers\n'), ((14083, 14133), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['self.stage_num[2]'], {'activation': '"""relu"""'}), "(self.stage_num[2], activation='relu')\n", (14095, 14133), False, 'from tensorflow.keras import layers\n'), ((14168, 14205), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(10)', '(1, 1)'], {'activation': '"""relu"""'}), "(10, (1, 1), activation='relu')\n", (14174, 14205), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((14235, 14257), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', (['(8)', '(8)'], {}), '(8, 8)\n', (14251, 14257), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((14287, 14303), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (14301, 14303), False, 'from tensorflow.keras import layers\n'), ((14337, 14356), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (14351, 14356), False, 'from tensorflow.keras import layers\n'), ((14390, 14440), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['self.stage_num[2]'], {'activation': '"""relu"""'}), "(self.stage_num[2], activation='relu')\n", (14402, 14440), False, 'from tensorflow.keras import layers\n'), ((14478, 14495), 'tensorflow.keras.layers.Multiply', 'layers.Multiply', ([], {}), '()\n', (14493, 14495), False, 'from tensorflow.keras import layers\n'), ((14537, 14588), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {'activation': '"""tanh"""', 'name': '"""delta_s3"""'}), "(1, activation='tanh', name='delta_s3')\n", (14549, 14588), False, 'from tensorflow.keras import layers\n'), ((14621, 14638), 'tensorflow.keras.layers.Multiply', 'layers.Multiply', ([], {}), '()\n', (14636, 14638), False, 'from tensorflow.keras import layers\n'), ((14687, 14741), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(2 * self.stage_num[2])'], {'activation': '"""relu"""'}), "(2 * self.stage_num[2], activation='relu')\n", (14699, 14741), False, 'from tensorflow.keras import layers\n'), ((14769, 14845), 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': 'self.stage_num[2]', 'activation': '"""relu"""', 'name': '"""pred_stage3"""'}), "(units=self.stage_num[2], activation='relu', name='pred_stage3')\n", (14781, 14845), False, 'from tensorflow.keras import layers\n'), ((14896, 14984), 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': 'self.stage_num[2]', 'activation': '"""tanh"""', 'name': '"""local_delta_stage3"""'}), "(units=self.stage_num[2], activation='tanh', name=\n 'local_delta_stage3')\n", (14908, 14984), False, 'from tensorflow.keras import layers\n'), ((15440, 15460), 'tensorflow.keras.backend.expand_dims', 'K.expand_dims', (['a', '(-1)'], {}), '(a, -1)\n', (15453, 15460), True, 'from tensorflow.keras import backend as K\n'), ((15631, 15651), 'tensorflow.keras.backend.expand_dims', 'K.expand_dims', (['b', '(-1)'], {}), '(b, -1)\n', (15644, 15651), True, 'from tensorflow.keras import backend as K\n'), ((15853, 15873), 'tensorflow.keras.backend.expand_dims', 'K.expand_dims', (['c', '(-1)'], {}), '(c, -1)\n', (15866, 15873), True, 'from tensorflow.keras import backend as K\n'), ((16139, 16336), 'tensorflow.keras.layers.Lambda', 'layers.Lambda', (['SSR_module'], {'arguments': "{'s1': self.stage_num[0], 's2': self.stage_num[1], 's3': self.stage_num[2],\n 'lambda_local': self.lambda_local, 'lambda_d': self.lambda_d}", 'name': '"""pred"""'}), "(SSR_module, arguments={'s1': self.stage_num[0], 's2': self.\n stage_num[1], 's3': self.stage_num[2], 'lambda_local': self.\n lambda_local, 'lambda_d': self.lambda_d}, name='pred')\n", (16152, 16336), False, 'from tensorflow.keras import layers\n'), ((16915, 16937), 'tensorflow.keras.backend.image_dim_ordering', 'K.image_dim_ordering', ([], {}), '()\n', (16935, 16937), True, 'from tensorflow.keras import backend as K\n'), ((16959, 17001), 'logging.debug', 'logging.debug', (['"""image_dim_ordering = \'th\'"""'], {}), '("image_dim_ordering = \'th\'")\n', (16972, 17001), False, 'import logging\n'), ((17123, 17165), 'logging.debug', 'logging.debug', (['"""image_dim_ordering = \'tf\'"""'], {}), '("image_dim_ordering = \'tf\'")\n', (17136, 17165), False, 'import logging\n'), ((17636, 17679), 'tensorflow.keras.layers.SeparableConv2D', 'SeparableConv2D', (['(16)', '(3, 3)'], {'padding': '"""same"""'}), "(16, (3, 3), padding='same')\n", (17651, 17679), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((17704, 17731), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (17722, 17731), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((17747, 17772), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (17764, 17772), False, 'from tensorflow.keras import layers\n'), ((17795, 17819), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', (['(2, 2)'], {}), '((2, 2))\n', (17811, 17819), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((17835, 17878), 'tensorflow.keras.layers.SeparableConv2D', 'SeparableConv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""'}), "(32, (3, 3), padding='same')\n", (17850, 17878), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((17901, 17928), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (17919, 17928), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((17944, 17969), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (17961, 17969), False, 'from tensorflow.keras import layers\n'), ((17985, 18028), 'tensorflow.keras.layers.SeparableConv2D', 'SeparableConv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""'}), "(32, (3, 3), padding='same')\n", (18000, 18028), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((18044, 18071), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (18062, 18071), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((18087, 18112), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (18104, 18112), False, 'from tensorflow.keras import layers\n'), ((18135, 18159), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', (['(2, 2)'], {}), '((2, 2))\n', (18151, 18159), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((18175, 18218), 'tensorflow.keras.layers.SeparableConv2D', 'SeparableConv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3), padding='same')\n", (18190, 18218), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((18241, 18268), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (18259, 18268), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((18284, 18309), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (18301, 18309), False, 'from tensorflow.keras import layers\n'), ((18325, 18368), 'tensorflow.keras.layers.SeparableConv2D', 'SeparableConv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3), padding='same')\n", (18340, 18368), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((18384, 18411), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (18402, 18411), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((18427, 18452), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (18444, 18452), False, 'from tensorflow.keras import layers\n'), ((18475, 18499), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', (['(2, 2)'], {}), '((2, 2))\n', (18491, 18499), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((18515, 18559), 'tensorflow.keras.layers.SeparableConv2D', 'SeparableConv2D', (['(128)', '(3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3), padding='same')\n", (18530, 18559), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((18582, 18609), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (18600, 18609), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((18625, 18650), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (18642, 18650), False, 'from tensorflow.keras import layers\n'), ((18666, 18710), 'tensorflow.keras.layers.SeparableConv2D', 'SeparableConv2D', (['(128)', '(3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3), padding='same')\n", (18681, 18710), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((18726, 18753), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (18744, 18753), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((18776, 18801), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (18793, 18801), False, 'from tensorflow.keras import layers\n'), ((18949, 18992), 'tensorflow.keras.layers.SeparableConv2D', 'SeparableConv2D', (['(16)', '(3, 3)'], {'padding': '"""same"""'}), "(16, (3, 3), padding='same')\n", (18964, 18992), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((19017, 19044), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (19035, 19044), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((19060, 19085), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""tanh"""'], {}), "('tanh')\n", (19077, 19085), False, 'from tensorflow.keras import layers\n'), ((19108, 19128), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (19120, 19128), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((19144, 19187), 'tensorflow.keras.layers.SeparableConv2D', 'SeparableConv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""'}), "(32, (3, 3), padding='same')\n", (19159, 19187), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((19210, 19237), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (19228, 19237), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((19253, 19278), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""tanh"""'], {}), "('tanh')\n", (19270, 19278), False, 'from tensorflow.keras import layers\n'), ((19294, 19337), 'tensorflow.keras.layers.SeparableConv2D', 'SeparableConv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""'}), "(32, (3, 3), padding='same')\n", (19309, 19337), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((19353, 19380), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (19371, 19380), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((19396, 19421), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""tanh"""'], {}), "('tanh')\n", (19413, 19421), False, 'from tensorflow.keras import layers\n'), ((19444, 19464), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (19456, 19464), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((19480, 19523), 'tensorflow.keras.layers.SeparableConv2D', 'SeparableConv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3), padding='same')\n", (19495, 19523), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((19546, 19573), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (19564, 19573), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((19589, 19614), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""tanh"""'], {}), "('tanh')\n", (19606, 19614), False, 'from tensorflow.keras import layers\n'), ((19630, 19673), 'tensorflow.keras.layers.SeparableConv2D', 'SeparableConv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3), padding='same')\n", (19645, 19673), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((19689, 19716), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (19707, 19716), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((19732, 19757), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""tanh"""'], {}), "('tanh')\n", (19749, 19757), False, 'from tensorflow.keras import layers\n'), ((19780, 19800), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (19792, 19800), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((19816, 19860), 'tensorflow.keras.layers.SeparableConv2D', 'SeparableConv2D', (['(128)', '(3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3), padding='same')\n", (19831, 19860), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((19883, 19910), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (19901, 19910), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((19926, 19951), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""tanh"""'], {}), "('tanh')\n", (19943, 19951), False, 'from tensorflow.keras import layers\n'), ((19967, 20011), 'tensorflow.keras.layers.SeparableConv2D', 'SeparableConv2D', (['(128)', '(3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3), padding='same')\n", (19982, 20011), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((20027, 20054), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (20045, 20054), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((20077, 20102), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""tanh"""'], {}), "('tanh')\n", (20094, 20102), False, 'from tensorflow.keras import layers\n'), ((20285, 20322), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(1, 1)'], {'activation': '"""tanh"""'}), "(64, (1, 1), activation='tanh')\n", (20291, 20322), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((20352, 20372), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (20364, 20372), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((20403, 20440), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(1, 1)'], {'activation': '"""relu"""'}), "(64, (1, 1), activation='relu')\n", (20409, 20440), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((20470, 20494), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', (['(2, 2)'], {}), '((2, 2))\n', (20486, 20494), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((20528, 20545), 'tensorflow.keras.layers.Multiply', 'layers.Multiply', ([], {}), '()\n', (20543, 20545), False, 'from tensorflow.keras import layers\n'), ((20590, 20606), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (20604, 20606), False, 'from tensorflow.keras import layers\n'), ((20644, 20697), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(2 * self.num_classes)'], {'activation': '"""tanh"""'}), "(2 * self.num_classes, activation='tanh')\n", (20656, 20697), False, 'from tensorflow.keras import layers\n'), ((20752, 20818), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['self.num_classes'], {'activation': '"""tanh"""', 'name': '"""delta_s1"""'}), "(self.num_classes, activation='tanh', name='delta_s1')\n", (20764, 20818), False, 'from tensorflow.keras import layers\n'), ((20881, 20934), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(2 * self.num_classes)'], {'activation': '"""tanh"""'}), "(2 * self.num_classes, activation='tanh')\n", (20893, 20934), False, 'from tensorflow.keras import layers\n'), ((20989, 21076), 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': 'self.num_classes', 'activation': '"""tanh"""', 'name': '"""local_delta_stage1"""'}), "(units=self.num_classes, activation='tanh', name=\n 'local_delta_stage1')\n", (21001, 21076), False, 'from tensorflow.keras import layers\n'), ((21133, 21202), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(self.stage_num[0] * self.num_classes)'], {'activation': '"""relu"""'}), "(self.stage_num[0] * self.num_classes, activation='relu')\n", (21145, 21202), False, 'from tensorflow.keras import layers\n'), ((21258, 21311), 'tensorflow.keras.layers.Reshape', 'layers.Reshape', (['(self.num_classes, self.stage_num[0])'], {}), '((self.num_classes, self.stage_num[0]))\n', (21272, 21311), False, 'from tensorflow.keras import layers\n'), ((21477, 21514), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(1, 1)'], {'activation': '"""tanh"""'}), "(64, (1, 1), activation='tanh')\n", (21483, 21514), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((21544, 21564), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (21556, 21564), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((21595, 21632), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(1, 1)'], {'activation': '"""relu"""'}), "(64, (1, 1), activation='relu')\n", (21601, 21632), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((21662, 21686), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', (['(2, 2)'], {}), '((2, 2))\n', (21678, 21686), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((21720, 21737), 'tensorflow.keras.layers.Multiply', 'layers.Multiply', ([], {}), '()\n', (21735, 21737), False, 'from tensorflow.keras import layers\n'), ((21782, 21798), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (21796, 21798), False, 'from tensorflow.keras import layers\n'), ((21836, 21889), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(2 * self.num_classes)'], {'activation': '"""tanh"""'}), "(2 * self.num_classes, activation='tanh')\n", (21848, 21889), False, 'from tensorflow.keras import layers\n'), ((21944, 22010), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['self.num_classes'], {'activation': '"""tanh"""', 'name': '"""delta_s2"""'}), "(self.num_classes, activation='tanh', name='delta_s2')\n", (21956, 22010), False, 'from tensorflow.keras import layers\n'), ((22073, 22126), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(2 * self.num_classes)'], {'activation': '"""tanh"""'}), "(2 * self.num_classes, activation='tanh')\n", (22085, 22126), False, 'from tensorflow.keras import layers\n'), ((22181, 22268), 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': 'self.num_classes', 'activation': '"""tanh"""', 'name': '"""local_delta_stage2"""'}), "(units=self.num_classes, activation='tanh', name=\n 'local_delta_stage2')\n", (22193, 22268), False, 'from tensorflow.keras import layers\n'), ((22325, 22394), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(self.stage_num[1] * self.num_classes)'], {'activation': '"""relu"""'}), "(self.stage_num[1] * self.num_classes, activation='relu')\n", (22337, 22394), False, 'from tensorflow.keras import layers\n'), ((22450, 22503), 'tensorflow.keras.layers.Reshape', 'layers.Reshape', (['(self.num_classes, self.stage_num[1])'], {}), '((self.num_classes, self.stage_num[1]))\n', (22464, 22503), False, 'from tensorflow.keras import layers\n'), ((22669, 22706), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(1, 1)'], {'activation': '"""tanh"""'}), "(64, (1, 1), activation='tanh')\n", (22675, 22706), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((22736, 22756), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (22748, 22756), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((22787, 22824), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(1, 1)'], {'activation': '"""relu"""'}), "(64, (1, 1), activation='relu')\n", (22793, 22824), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((22854, 22878), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', (['(2, 2)'], {}), '((2, 2))\n', (22870, 22878), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((22912, 22929), 'tensorflow.keras.layers.Multiply', 'layers.Multiply', ([], {}), '()\n', (22927, 22929), False, 'from tensorflow.keras import layers\n'), ((22974, 22990), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (22988, 22990), False, 'from tensorflow.keras import layers\n'), ((23028, 23081), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(2 * self.num_classes)'], {'activation': '"""tanh"""'}), "(2 * self.num_classes, activation='tanh')\n", (23040, 23081), False, 'from tensorflow.keras import layers\n'), ((23136, 23202), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['self.num_classes'], {'activation': '"""tanh"""', 'name': '"""delta_s3"""'}), "(self.num_classes, activation='tanh', name='delta_s3')\n", (23148, 23202), False, 'from tensorflow.keras import layers\n'), ((23265, 23318), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(2 * self.num_classes)'], {'activation': '"""tanh"""'}), "(2 * self.num_classes, activation='tanh')\n", (23277, 23318), False, 'from tensorflow.keras import layers\n'), ((23373, 23460), 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': 'self.num_classes', 'activation': '"""tanh"""', 'name': '"""local_delta_stage3"""'}), "(units=self.num_classes, activation='tanh', name=\n 'local_delta_stage3')\n", (23385, 23460), False, 'from tensorflow.keras import layers\n'), ((23517, 23586), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(self.stage_num[2] * self.num_classes)'], {'activation': '"""relu"""'}), "(self.stage_num[2] * self.num_classes, activation='relu')\n", (23529, 23586), False, 'from tensorflow.keras import layers\n'), ((23642, 23695), 'tensorflow.keras.layers.Reshape', 'layers.Reshape', (['(self.num_classes, self.stage_num[2])'], {}), '((self.num_classes, self.stage_num[2]))\n', (23656, 23695), False, 'from tensorflow.keras import layers\n'), ((24911, 25078), 'tensorflow.keras.layers.Lambda', 'layers.Lambda', (['SSR_module'], {'arguments': "{'s1': self.stage_num[0], 's2': self.stage_num[1], 's3': self.stage_num[2],\n 'lambda_d': self.lambda_d}", 'name': '"""pred_pose"""'}), "(SSR_module, arguments={'s1': self.stage_num[0], 's2': self.\n stage_num[1], 's3': self.stage_num[2], 'lambda_d': self.lambda_d}, name\n ='pred_pose')\n", (24924, 25078), False, 'from tensorflow.keras import layers\n'), ((25660, 25682), 'tensorflow.keras.backend.image_dim_ordering', 'K.image_dim_ordering', ([], {}), '()\n', (25680, 25682), True, 'from tensorflow.keras import backend as K\n'), ((25704, 25746), 'logging.debug', 'logging.debug', (['"""image_dim_ordering = \'th\'"""'], {}), '("image_dim_ordering = \'th\'")\n', (25717, 25746), False, 'import logging\n'), ((25868, 25910), 'logging.debug', 'logging.debug', (['"""image_dim_ordering = \'tf\'"""'], {}), '("image_dim_ordering = \'tf\'")\n', (25881, 25910), False, 'import logging\n'), ((26381, 26415), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""'}), "(32, (3, 3), padding='same')\n", (26387, 26415), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((26440, 26483), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'self._channel_axis'}), '(axis=self._channel_axis)\n', (26458, 26483), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((26499, 26524), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (26516, 26524), False, 'from tensorflow.keras import layers\n'), ((26547, 26569), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (26563, 26569), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((26585, 26619), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""'}), "(32, (3, 3), padding='same')\n", (26591, 26619), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((26642, 26685), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'self._channel_axis'}), '(axis=self._channel_axis)\n', (26660, 26685), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((26701, 26726), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (26718, 26726), False, 'from tensorflow.keras import layers\n'), ((26749, 26771), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (26765, 26771), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((26787, 26821), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""'}), "(32, (3, 3), padding='same')\n", (26793, 26821), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((26844, 26887), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'self._channel_axis'}), '(axis=self._channel_axis)\n', (26862, 26887), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((26903, 26928), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (26920, 26928), False, 'from tensorflow.keras import layers\n'), ((26951, 26973), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (26967, 26973), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((26989, 27023), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""'}), "(32, (3, 3), padding='same')\n", (26995, 27023), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((27046, 27089), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'self._channel_axis'}), '(axis=self._channel_axis)\n', (27064, 27089), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((27112, 27137), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (27129, 27137), False, 'from tensorflow.keras import layers\n'), ((27285, 27319), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {'padding': '"""same"""'}), "(16, (3, 3), padding='same')\n", (27291, 27319), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((27344, 27387), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'self._channel_axis'}), '(axis=self._channel_axis)\n', (27362, 27387), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((27403, 27428), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""tanh"""'], {}), "('tanh')\n", (27420, 27428), False, 'from tensorflow.keras import layers\n'), ((27451, 27469), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (27463, 27469), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((27485, 27519), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {'padding': '"""same"""'}), "(16, (3, 3), padding='same')\n", (27491, 27519), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((27542, 27585), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'self._channel_axis'}), '(axis=self._channel_axis)\n', (27560, 27585), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((27601, 27626), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""tanh"""'], {}), "('tanh')\n", (27618, 27626), False, 'from tensorflow.keras import layers\n'), ((27649, 27667), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (27661, 27667), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((27683, 27717), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {'padding': '"""same"""'}), "(16, (3, 3), padding='same')\n", (27689, 27717), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((27740, 27783), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'self._channel_axis'}), '(axis=self._channel_axis)\n', (27758, 27783), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((27799, 27824), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""tanh"""'], {}), "('tanh')\n", (27816, 27824), False, 'from tensorflow.keras import layers\n'), ((27847, 27865), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (27859, 27865), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((27881, 27915), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {'padding': '"""same"""'}), "(16, (3, 3), padding='same')\n", (27887, 27915), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((27938, 27981), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'self._channel_axis'}), '(axis=self._channel_axis)\n', (27956, 27981), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((28004, 28029), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""tanh"""'], {}), "('tanh')\n", (28021, 28029), False, 'from tensorflow.keras import layers\n'), ((28212, 28249), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(1, 1)'], {'activation': '"""tanh"""'}), "(64, (1, 1), activation='tanh')\n", (28218, 28249), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((28279, 28299), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (28291, 28299), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((28330, 28367), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(1, 1)'], {'activation': '"""relu"""'}), "(64, (1, 1), activation='relu')\n", (28336, 28367), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((28397, 28421), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', (['(2, 2)'], {}), '((2, 2))\n', (28413, 28421), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((28455, 28472), 'tensorflow.keras.layers.Multiply', 'layers.Multiply', ([], {}), '()\n', (28470, 28472), False, 'from tensorflow.keras import layers\n'), ((28517, 28533), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (28531, 28533), False, 'from tensorflow.keras import layers\n'), ((28571, 28624), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(2 * self.num_classes)'], {'activation': '"""tanh"""'}), "(2 * self.num_classes, activation='tanh')\n", (28583, 28624), False, 'from tensorflow.keras import layers\n'), ((28679, 28745), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['self.num_classes'], {'activation': '"""tanh"""', 'name': '"""delta_s1"""'}), "(self.num_classes, activation='tanh', name='delta_s1')\n", (28691, 28745), False, 'from tensorflow.keras import layers\n'), ((28808, 28861), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(2 * self.num_classes)'], {'activation': '"""tanh"""'}), "(2 * self.num_classes, activation='tanh')\n", (28820, 28861), False, 'from tensorflow.keras import layers\n'), ((28916, 29003), 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': 'self.num_classes', 'activation': '"""tanh"""', 'name': '"""local_delta_stage1"""'}), "(units=self.num_classes, activation='tanh', name=\n 'local_delta_stage1')\n", (28928, 29003), False, 'from tensorflow.keras import layers\n'), ((29060, 29129), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(self.stage_num[0] * self.num_classes)'], {'activation': '"""relu"""'}), "(self.stage_num[0] * self.num_classes, activation='relu')\n", (29072, 29129), False, 'from tensorflow.keras import layers\n'), ((29185, 29238), 'tensorflow.keras.layers.Reshape', 'layers.Reshape', (['(self.num_classes, self.stage_num[0])'], {}), '((self.num_classes, self.stage_num[0]))\n', (29199, 29238), False, 'from tensorflow.keras import layers\n'), ((29404, 29441), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(1, 1)'], {'activation': '"""tanh"""'}), "(64, (1, 1), activation='tanh')\n", (29410, 29441), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((29471, 29491), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (29483, 29491), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((29522, 29559), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(1, 1)'], {'activation': '"""relu"""'}), "(64, (1, 1), activation='relu')\n", (29528, 29559), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((29589, 29613), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', (['(2, 2)'], {}), '((2, 2))\n', (29605, 29613), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((29647, 29664), 'tensorflow.keras.layers.Multiply', 'layers.Multiply', ([], {}), '()\n', (29662, 29664), False, 'from tensorflow.keras import layers\n'), ((29709, 29725), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (29723, 29725), False, 'from tensorflow.keras import layers\n'), ((29763, 29816), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(2 * self.num_classes)'], {'activation': '"""tanh"""'}), "(2 * self.num_classes, activation='tanh')\n", (29775, 29816), False, 'from tensorflow.keras import layers\n'), ((29871, 29937), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['self.num_classes'], {'activation': '"""tanh"""', 'name': '"""delta_s2"""'}), "(self.num_classes, activation='tanh', name='delta_s2')\n", (29883, 29937), False, 'from tensorflow.keras import layers\n'), ((30000, 30053), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(2 * self.num_classes)'], {'activation': '"""tanh"""'}), "(2 * self.num_classes, activation='tanh')\n", (30012, 30053), False, 'from tensorflow.keras import layers\n'), ((30108, 30195), 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': 'self.num_classes', 'activation': '"""tanh"""', 'name': '"""local_delta_stage2"""'}), "(units=self.num_classes, activation='tanh', name=\n 'local_delta_stage2')\n", (30120, 30195), False, 'from tensorflow.keras import layers\n'), ((30252, 30321), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(self.stage_num[1] * self.num_classes)'], {'activation': '"""relu"""'}), "(self.stage_num[1] * self.num_classes, activation='relu')\n", (30264, 30321), False, 'from tensorflow.keras import layers\n'), ((30377, 30430), 'tensorflow.keras.layers.Reshape', 'layers.Reshape', (['(self.num_classes, self.stage_num[1])'], {}), '((self.num_classes, self.stage_num[1]))\n', (30391, 30430), False, 'from tensorflow.keras import layers\n'), ((30596, 30633), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(1, 1)'], {'activation': '"""tanh"""'}), "(64, (1, 1), activation='tanh')\n", (30602, 30633), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((30663, 30683), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (30675, 30683), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((30714, 30751), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(1, 1)'], {'activation': '"""relu"""'}), "(64, (1, 1), activation='relu')\n", (30720, 30751), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((30781, 30805), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', (['(2, 2)'], {}), '((2, 2))\n', (30797, 30805), False, 'from tensorflow.keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D, SeparableConv2D\n'), ((30839, 30856), 'tensorflow.keras.layers.Multiply', 'layers.Multiply', ([], {}), '()\n', (30854, 30856), False, 'from tensorflow.keras import layers\n'), ((30901, 30917), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (30915, 30917), False, 'from tensorflow.keras import layers\n'), ((30955, 31008), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(2 * self.num_classes)'], {'activation': '"""tanh"""'}), "(2 * self.num_classes, activation='tanh')\n", (30967, 31008), False, 'from tensorflow.keras import layers\n'), ((31063, 31129), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['self.num_classes'], {'activation': '"""tanh"""', 'name': '"""delta_s3"""'}), "(self.num_classes, activation='tanh', name='delta_s3')\n", (31075, 31129), False, 'from tensorflow.keras import layers\n'), ((31192, 31245), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(2 * self.num_classes)'], {'activation': '"""tanh"""'}), "(2 * self.num_classes, activation='tanh')\n", (31204, 31245), False, 'from tensorflow.keras import layers\n'), ((31300, 31387), 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': 'self.num_classes', 'activation': '"""tanh"""', 'name': '"""local_delta_stage3"""'}), "(units=self.num_classes, activation='tanh', name=\n 'local_delta_stage3')\n", (31312, 31387), False, 'from tensorflow.keras import layers\n'), ((31444, 31513), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(self.stage_num[2] * self.num_classes)'], {'activation': '"""relu"""'}), "(self.stage_num[2] * self.num_classes, activation='relu')\n", (31456, 31513), False, 'from tensorflow.keras import layers\n'), ((31569, 31622), 'tensorflow.keras.layers.Reshape', 'layers.Reshape', (['(self.num_classes, self.stage_num[2])'], {}), '((self.num_classes, self.stage_num[2]))\n', (31583, 31622), False, 'from tensorflow.keras import layers\n'), ((32838, 33005), 'tensorflow.keras.layers.Lambda', 'layers.Lambda', (['SSR_module'], {'arguments': "{'s1': self.stage_num[0], 's2': self.stage_num[1], 's3': self.stage_num[2],\n 'lambda_d': self.lambda_d}", 'name': '"""pred_pose"""'}), "(SSR_module, arguments={'s1': self.stage_num[0], 's2': self.\n stage_num[1], 's3': self.stage_num[2], 'lambda_d': self.lambda_d}, name\n ='pred_pose')\n", (32851, 33005), False, 'from tensorflow.keras import layers\n')]
|
"""
This script gather functions related to the SZ spectrum
"""
import numpy as np
import astropy.units as u
from astropy import constants as const
from astropy.cosmology import Planck15 as cosmo
#===================================================
#========== CMB intensity
#===================================================
def get_I0_CMB():
"""
Compute the CMB intensity
Parameters
----------
Outputs
--------
- I0 (quantity): the CMB intensity (homogeneous to MJy/sr)
"""
I0 = 2*(const.k_B*cosmo.Tcmb0)**3/(const.h*const.c)**2*u.sr**-1
return I0.to('MJy sr-1')
#===================================================
#========== Non relativistic tSZ spectrum
#===================================================
def tsz_spec(frequency):
"""
Compute the non relativistic SZ spectrum, f(nu)
as in delta I_nu = I0 f(nu) y
Parameters
----------
- frequency (quantity): frequency array homogeneous to GHz
Outputs
--------
- SZ spectrum: f(nu)
"""
x = const.h * frequency / (const.k_B * cosmo.Tcmb0)
f_nu = x**4 * np.exp(x) / (np.exp(x)-1)**2 * (x*(np.exp(x)+1)/(np.exp(x)-1) - 4)
return f_nu
#===================================================
#========== Relativistic tSZ spectrum
#===================================================
def tsz_spec_relativistic(frequency, kBT):
"""
Compute the relativistic SZ spectrum, f(nu, T)
as in delta I_nu = I0 f(nu, T) y
Parameters
----------
- frequency (quantity): frequency array homogeneous to GHz
- temperature (quantity): frequency array homogeneous to GHz
Outputs
--------
- SZ spectrum: f(nu, T)
"""
#========== Make sure that frequency and temperature are arrays
if type(frequency.to_value()) == float:
frequency = np.array([frequency.to_value()]) * frequency.unit
if type(kBT.to_value()) == float:
kBT = np.array([kBT.to_value()]) * kBT.unit
#========== Replicate to work with grids
f_grid = (np.tile(frequency, [len(kBT),1])).T
t_grid = (np.tile(kBT, [len(frequency),1]))
#========== Function variable
theta = t_grid.to_value('keV')/(const.m_e*const.c**2).to_value('keV')
x = (const.h*f_grid/(const.k_B*cosmo.Tcmb0)).to_value('')
#========== Region where x < 1.2
f1 = x**4 * np.exp(x)/(np.exp(x)-1)**2
xtil = x*(np.exp(x)+1)/(np.exp(x)-1)
s = 2*x/(np.exp(x/2)-np.exp(-x/2))
y0 = xtil-4.0
y1a = -10.+47./2.*xtil-42./5.*xtil**(2.)
y1b = 0.7*xtil**(3.)+s**(2.)*(-21./5.+7./5.*xtil)
y1 = y1a+y1b
y2a = -15/2.+1023./8.*xtil-868./5.*xtil**(2.)
y2b = 329./5.*xtil**(3.)-44./5.*xtil**(4.)
y2c = 11./30.*xtil**(5.)
y2d = -434./5.+658/5.*xtil-242./5.*xtil**(2.)+143./30.*xtil**(3.)
y2e = -44./5.+187./60.*xtil
y2 = y2a+y2b+y2c+s**(2.)*y2d+s**(4.)*y2e
y3a = 15./2.+2505./8.*xtil-7098./5.*xtil**(2.)
y3b = 1425.3*xtil**(3.)-18594./35.*xtil**(4.)
y3c = 12059./140.*xtil**(5.)-128./21.*xtil**(6.)+16./105.*xtil**(7.)
y3d1 = -709.8+14253/5.*xtil-102267./35.*xtil**(2.)
y3d2 = 156767./140.*xtil**(3.)-1216./7.*xtil**(4.)+64./7.*xtil**(5.)
y3d = s**(2.)*(y3d1+y3d2)
y3e1 = -18594./35.+205003./280.*xtil
y3e2 = -1920./7.*xtil**(2.)+1024./35.*xtil**(3.)
y3e = s**(4.)*(y3e1+y3e2)
y3f = s**(6.)*(-544./21.+922./105.*xtil)
y3 = y3a+y3b+y3c+y3d+y3e+y3f
y4a = -135./32.+30375./128.*xtil-6239.1*xtil**(2.)
y4b = 61472.7/4.*xtil**(3.)-12438.9*xtil**(4.)
y4c = 35570.3/8.*xtil**(5.)-16568./21.*xtil**(6.)
y4d = 7516./105.*xtil**(7.)-22./7.*xtil**(8.)+11./210.*xtil**(9.)
y4e1 = -62391./20.+614727./20.*xtil
y4e2 = -1368279./20.*xtil**(2.)+4624139./80.*xtil**(3.)
y4e3 = -157396./7.*xtil**(4.)+30064./7.*xtil**(5.)
y4e4 = -2717./7.*xtil**(6.)+2761./210.*xtil**(7.)
y4e = s**(2.)*(y4e1+y4e2+y4e3+y4e4)
y4f1 = -12438.9+6046951./160.*xtil
y4f2 = -248520./7.*xtil**(2.)+481024./35.*xtil**(3.)
y4f3 = -15972./7.*xtil**(4.)+18689./140.*xtil**(5.)
y4f = s**(4.)*(y4f1+y4f2+y4f3)
y4g1 = -70414./21.+465992./105.*xtil
y4g2 = -11792./7.*xtil**(2.)+19778./105.*xtil**(3.)
y4g = s**(6.)*(y4g1+y4g2)
y4h = s**(8.)*(-682./7.+7601./210.*xtil)
y4 = y4a+y4b+y4c+y4d+y4e+y4f+y4g+y4h
DI_over_tau_over_theta_lt12 = f1*(y0+theta*y1+theta**(2.)*y2+theta**(3.)*y3+theta**(4.)*y4)
#========== Region where x > 1.2 if T > 20.0 keV
Tlim = 20.0
x_0 = 3.830 * (1.0 + 1.1674*theta - 0.8533*theta**2.)
a_ij = np.array([
[[-1.81317E+1+x*0],[ 9.97038E+1+x*0],[-6.07438E+1+x*0],[ 1.05143E+3+x*0],[-2.86734E+3+x*0],[ 7.73353E+3+x*0],[-8.16644E+3+x*0],[-5.37712E+3+x*0],[ 1.52226E+4+x*0],[ 7.18726E+3+x*0],[-1.39548E+4+x*0],[-2.08464E+4+x*0],[ 1.79040E+4+x*0]],
[[ 1.68733E+2+x*0],[-6.07829E+2+x*0],[ 1.14933E+3+x*0],[-2.42382E+2+x*0],[-7.73030E+2+x*0],[ 5.33993E+3+x*0],[-4.03443E+3+x*0],[ 3.00692E+3+x*0],[ 9.58809E+3+x*0],[ 8.16574E+3+x*0],[-6.13322E+3+x*0],[-1.48117E+4+x*0],[ 3.43816E+4+x*0]],
[[-6.69883E+2+x*0],[ 1.59654E+3+x*0],[-3.33375E+3+x*0],[-2.13234E+3+x*0],[-1.80812E+2+x*0],[ 3.75605E+3+x*0],[-4.75180E+3+x*0],[-4.50495E+3+x*0],[ 5.38753E+3+x*0],[ 5.03355E+3+x*0],[-1.18396E+4+x*0],[-8.58473E+3+x*0],[ 3.96316E+4+x*0]],
[[ 1.56222E+3+x*0],[-1.78598E+3+x*0],[ 5.13747E+3+x*0],[ 4.10404E+3+x*0],[ 5.54775E+2+x*0],[-3.89994E+3+x*0],[-1.22455E+3+x*0],[ 1.03747E+3+x*0],[ 4.32237E+3+x*0],[ 1.03805E+3+x*0],[-1.47172E+4+x*0],[-1.23591E+4+x*0],[ 1.77290E+4+x*0]],
[[-2.34712E+3+x*0],[ 2.78197E+2+x*0],[-5.49648E+3+x*0],[-5.94988E+2+x*0],[-1.47060E+3+x*0],[-2.84032E+2+x*0],[-1.15352E+3+x*0],[-1.17893E+3+x*0],[ 7.01209E+3+x*0],[ 4.75631E+3+x*0],[-5.13807E+3+x*0],[-8.73615E+3+x*0],[ 9.41580E+3+x*0]],
[[ 1.92894E+3+x*0],[ 1.17970E+3+x*0],[ 3.13650E+3+x*0],[-2.91121E+2+x*0],[-1.15006E+3+x*0],[ 4.17375E+3+x*0],[-3.31788E+2+x*0],[ 1.37973E+3+x*0],[-2.48966E+3+x*0],[ 4.82005E+3+x*0],[-1.06121E+4+x*0],[-1.19394E+4+x*0],[ 1.34908E+4+x*0]],
[[ 6.40881E+2+x*0],[-6.81789E+2+x*0],[ 1.20037E+3+x*0],[-3.27298E+3+x*0],[ 1.02988E+2+x*0],[ 2.03514E+3+x*0],[-2.80502E+3+x*0],[ 8.83880E+2+x*0],[ 1.68409E+3+x*0],[ 4.26227E+3+x*0],[-6.37868E+3+x*0],[-1.11597E+4+x*0],[ 1.46861E+4+x*0]],
[[-4.02494E+3+x*0],[-1.37983E+3+x*0],[-1.65623E+3+x*0],[ 7.36120E+1+x*0],[ 2.66656E+3+x*0],[-2.30516E+3+x*0],[ 5.22182E+3+x*0],[-8.53317E+3+x*0],[ 3.75800E+2+x*0],[ 8.49249E+2+x*0],[-6.88736E+3+x*0],[-1.01475E+4+x*0],[ 4.75820E+3+x*0]],
[[ 4.59247E+3+x*0],[ 3.04203E+3+x*0],[-2.11039E+3+x*0],[ 1.32383E+3+x*0],[ 1.10646E+3+x*0],[-3.53827E+3+x*0],[-1.12073E+3+x*0],[-5.47633E+3+x*0],[ 9.85745E+3+x*0],[ 5.72138E+3+x*0],[ 6.86444E+3+x*0],[-5.72696E+3+x*0],[ 1.29053E+3+x*0]],
[[-1.61848E+3+x*0],[-1.83704E+3+x*0],[ 2.06738E+3+x*0],[ 4.00292E+3+x*0],[-3.72824E+1+x*0],[ 9.10086E+2+x*0],[ 3.72526E+3+x*0],[ 3.41895E+3+x*0],[ 1.31241E+3+x*0],[ 6.68089E+3+x*0],[-4.34269E+3+x*0],[-5.42296E+3+x*0],[ 2.83445E+3+x*0]],
[[-1.00239E+3+x*0],[-1.24281E+3+x*0],[ 2.46998E+3+x*0],[-4.25837E+3+x*0],[-1.83515E+2+x*0],[-6.47138E+2+x*0],[-7.35806E+3+x*0],[-1.50866E+3+x*0],[-2.47275E+3+x*0],[ 9.09399E+3+x*0],[-2.75851E+3+x*0],[-6.75104E+3+x*0],[ 7.00899E+2+x*0]],
[[ 1.04911E+3+x*0],[ 2.07475E+3+x*0],[-3.83953E+3+x*0],[ 7.79924E+2+x*0],[-4.08658E+3+x*0],[ 4.43432E+3+x*0],[ 3.23015E+2+x*0],[ 6.16180E+3+x*0],[-1.00851E+4+x*0],[ 7.65063E+3+x*0],[ 1.52880E+3+x*0],[-6.08330E+3+x*0],[ 1.23369E+3+x*0]],
[[-2.61041E+2+x*0],[-7.22803E+2+x*0],[ 1.34581E+3+x*0],[ 5.90851E+2+x*0],[ 3.32198E+2+x*0],[ 2.58340E+3+x*0],[-5.97604E+2+x*0],[-4.34018E+3+x*0],[-3.58925E+3+x*0],[ 2.59165E+3+x*0],[ 6.76140E+3+x*0],[-6.22138E+3+x*0],[ 4.40668E+3+x*0]]
])[:,:,0,:]
theta_ei = np.array([
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]]
])[:,:,0,:,:]
theta_ei = np.transpose(theta_ei, (1,0,2,3))
Zj = np.array([
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]]
])[:,:,0,:,:]
G_theta_x = np.sum(np.sum(a_ij*theta_ei*Zj, 1), 0)
DI_over_tau_over_theta_gt12 = x**2.0 * np.exp(-x) * (x-x_0) * G_theta_x
#========== Pick the region
f_nu = DI_over_tau_over_theta_lt12
w_gt12 = (x > 1.2) * (t_grid > 20*u.keV)
f_nu[w_gt12] = DI_over_tau_over_theta_gt12[w_gt12]
return f_nu
|
[
"numpy.array",
"numpy.exp",
"numpy.transpose",
"numpy.sum"
] |
[((11551, 11587), 'numpy.transpose', 'np.transpose', (['theta_ei', '(1, 0, 2, 3)'], {}), '(theta_ei, (1, 0, 2, 3))\n', (11563, 11587), True, 'import numpy as np\n'), ((4546, 8066), 'numpy.array', 'np.array', (['[[[-18.1317 + x * 0], [99.7038 + x * 0], [-60.7438 + x * 0], [1051.43 + x *\n 0], [-2867.34 + x * 0], [7733.53 + x * 0], [-8166.44 + x * 0], [-\n 5377.12 + x * 0], [15222.6 + x * 0], [7187.26 + x * 0], [-13954.8 + x *\n 0], [-20846.4 + x * 0], [17904.0 + x * 0]], [[168.733 + x * 0], [-\n 607.829 + x * 0], [1149.33 + x * 0], [-242.382 + x * 0], [-773.03 + x *\n 0], [5339.93 + x * 0], [-4034.43 + x * 0], [3006.92 + x * 0], [9588.09 +\n x * 0], [8165.74 + x * 0], [-6133.22 + x * 0], [-14811.7 + x * 0], [\n 34381.6 + x * 0]], [[-669.883 + x * 0], [1596.54 + x * 0], [-3333.75 + \n x * 0], [-2132.34 + x * 0], [-180.812 + x * 0], [3756.05 + x * 0], [-\n 4751.8 + x * 0], [-4504.95 + x * 0], [5387.53 + x * 0], [5033.55 + x * \n 0], [-11839.6 + x * 0], [-8584.73 + x * 0], [39631.6 + x * 0]], [[\n 1562.22 + x * 0], [-1785.98 + x * 0], [5137.47 + x * 0], [4104.04 + x *\n 0], [554.775 + x * 0], [-3899.94 + x * 0], [-1224.55 + x * 0], [1037.47 +\n x * 0], [4322.37 + x * 0], [1038.05 + x * 0], [-14717.2 + x * 0], [-\n 12359.1 + x * 0], [17729.0 + x * 0]], [[-2347.12 + x * 0], [278.197 + x *\n 0], [-5496.48 + x * 0], [-594.988 + x * 0], [-1470.6 + x * 0], [-\n 284.032 + x * 0], [-1153.52 + x * 0], [-1178.93 + x * 0], [7012.09 + x *\n 0], [4756.31 + x * 0], [-5138.07 + x * 0], [-8736.15 + x * 0], [9415.8 +\n x * 0]], [[1928.94 + x * 0], [1179.7 + x * 0], [3136.5 + x * 0], [-\n 291.121 + x * 0], [-1150.06 + x * 0], [4173.75 + x * 0], [-331.788 + x *\n 0], [1379.73 + x * 0], [-2489.66 + x * 0], [4820.05 + x * 0], [-10612.1 +\n x * 0], [-11939.4 + x * 0], [13490.8 + x * 0]], [[640.881 + x * 0], [-\n 681.789 + x * 0], [1200.37 + x * 0], [-3272.98 + x * 0], [102.988 + x *\n 0], [2035.14 + x * 0], [-2805.02 + x * 0], [883.88 + x * 0], [1684.09 +\n x * 0], [4262.27 + x * 0], [-6378.68 + x * 0], [-11159.7 + x * 0], [\n 14686.1 + x * 0]], [[-4024.94 + x * 0], [-1379.83 + x * 0], [-1656.23 +\n x * 0], [73.612 + x * 0], [2666.56 + x * 0], [-2305.16 + x * 0], [\n 5221.82 + x * 0], [-8533.17 + x * 0], [375.8 + x * 0], [849.249 + x * 0\n ], [-6887.36 + x * 0], [-10147.5 + x * 0], [4758.2 + x * 0]], [[4592.47 +\n x * 0], [3042.03 + x * 0], [-2110.39 + x * 0], [1323.83 + x * 0], [\n 1106.46 + x * 0], [-3538.27 + x * 0], [-1120.73 + x * 0], [-5476.33 + x *\n 0], [9857.45 + x * 0], [5721.38 + x * 0], [6864.44 + x * 0], [-5726.96 +\n x * 0], [1290.53 + x * 0]], [[-1618.48 + x * 0], [-1837.04 + x * 0], [\n 2067.38 + x * 0], [4002.92 + x * 0], [-37.2824 + x * 0], [910.086 + x *\n 0], [3725.26 + x * 0], [3418.95 + x * 0], [1312.41 + x * 0], [6680.89 +\n x * 0], [-4342.69 + x * 0], [-5422.96 + x * 0], [2834.45 + x * 0]], [[-\n 1002.39 + x * 0], [-1242.81 + x * 0], [2469.98 + x * 0], [-4258.37 + x *\n 0], [-183.515 + x * 0], [-647.138 + x * 0], [-7358.06 + x * 0], [-\n 1508.66 + x * 0], [-2472.75 + x * 0], [9093.99 + x * 0], [-2758.51 + x *\n 0], [-6751.04 + x * 0], [700.899 + x * 0]], [[1049.11 + x * 0], [\n 2074.75 + x * 0], [-3839.53 + x * 0], [779.924 + x * 0], [-4086.58 + x *\n 0], [4434.32 + x * 0], [323.015 + x * 0], [6161.8 + x * 0], [-10085.1 +\n x * 0], [7650.63 + x * 0], [1528.8 + x * 0], [-6083.3 + x * 0], [\n 1233.69 + x * 0]], [[-261.041 + x * 0], [-722.803 + x * 0], [1345.81 + \n x * 0], [590.851 + x * 0], [332.198 + x * 0], [2583.4 + x * 0], [-\n 597.604 + x * 0], [-4340.18 + x * 0], [-3589.25 + x * 0], [2591.65 + x *\n 0], [6761.4 + x * 0], [-6221.38 + x * 0], [4406.68 + x * 0]]]'], {}), '([[[-18.1317 + x * 0], [99.7038 + x * 0], [-60.7438 + x * 0], [\n 1051.43 + x * 0], [-2867.34 + x * 0], [7733.53 + x * 0], [-8166.44 + x *\n 0], [-5377.12 + x * 0], [15222.6 + x * 0], [7187.26 + x * 0], [-13954.8 +\n x * 0], [-20846.4 + x * 0], [17904.0 + x * 0]], [[168.733 + x * 0], [-\n 607.829 + x * 0], [1149.33 + x * 0], [-242.382 + x * 0], [-773.03 + x *\n 0], [5339.93 + x * 0], [-4034.43 + x * 0], [3006.92 + x * 0], [9588.09 +\n x * 0], [8165.74 + x * 0], [-6133.22 + x * 0], [-14811.7 + x * 0], [\n 34381.6 + x * 0]], [[-669.883 + x * 0], [1596.54 + x * 0], [-3333.75 + \n x * 0], [-2132.34 + x * 0], [-180.812 + x * 0], [3756.05 + x * 0], [-\n 4751.8 + x * 0], [-4504.95 + x * 0], [5387.53 + x * 0], [5033.55 + x * \n 0], [-11839.6 + x * 0], [-8584.73 + x * 0], [39631.6 + x * 0]], [[\n 1562.22 + x * 0], [-1785.98 + x * 0], [5137.47 + x * 0], [4104.04 + x *\n 0], [554.775 + x * 0], [-3899.94 + x * 0], [-1224.55 + x * 0], [1037.47 +\n x * 0], [4322.37 + x * 0], [1038.05 + x * 0], [-14717.2 + x * 0], [-\n 12359.1 + x * 0], [17729.0 + x * 0]], [[-2347.12 + x * 0], [278.197 + x *\n 0], [-5496.48 + x * 0], [-594.988 + x * 0], [-1470.6 + x * 0], [-\n 284.032 + x * 0], [-1153.52 + x * 0], [-1178.93 + x * 0], [7012.09 + x *\n 0], [4756.31 + x * 0], [-5138.07 + x * 0], [-8736.15 + x * 0], [9415.8 +\n x * 0]], [[1928.94 + x * 0], [1179.7 + x * 0], [3136.5 + x * 0], [-\n 291.121 + x * 0], [-1150.06 + x * 0], [4173.75 + x * 0], [-331.788 + x *\n 0], [1379.73 + x * 0], [-2489.66 + x * 0], [4820.05 + x * 0], [-10612.1 +\n x * 0], [-11939.4 + x * 0], [13490.8 + x * 0]], [[640.881 + x * 0], [-\n 681.789 + x * 0], [1200.37 + x * 0], [-3272.98 + x * 0], [102.988 + x *\n 0], [2035.14 + x * 0], [-2805.02 + x * 0], [883.88 + x * 0], [1684.09 +\n x * 0], [4262.27 + x * 0], [-6378.68 + x * 0], [-11159.7 + x * 0], [\n 14686.1 + x * 0]], [[-4024.94 + x * 0], [-1379.83 + x * 0], [-1656.23 +\n x * 0], [73.612 + x * 0], [2666.56 + x * 0], [-2305.16 + x * 0], [\n 5221.82 + x * 0], [-8533.17 + x * 0], [375.8 + x * 0], [849.249 + x * 0\n ], [-6887.36 + x * 0], [-10147.5 + x * 0], [4758.2 + x * 0]], [[4592.47 +\n x * 0], [3042.03 + x * 0], [-2110.39 + x * 0], [1323.83 + x * 0], [\n 1106.46 + x * 0], [-3538.27 + x * 0], [-1120.73 + x * 0], [-5476.33 + x *\n 0], [9857.45 + x * 0], [5721.38 + x * 0], [6864.44 + x * 0], [-5726.96 +\n x * 0], [1290.53 + x * 0]], [[-1618.48 + x * 0], [-1837.04 + x * 0], [\n 2067.38 + x * 0], [4002.92 + x * 0], [-37.2824 + x * 0], [910.086 + x *\n 0], [3725.26 + x * 0], [3418.95 + x * 0], [1312.41 + x * 0], [6680.89 +\n x * 0], [-4342.69 + x * 0], [-5422.96 + x * 0], [2834.45 + x * 0]], [[-\n 1002.39 + x * 0], [-1242.81 + x * 0], [2469.98 + x * 0], [-4258.37 + x *\n 0], [-183.515 + x * 0], [-647.138 + x * 0], [-7358.06 + x * 0], [-\n 1508.66 + x * 0], [-2472.75 + x * 0], [9093.99 + x * 0], [-2758.51 + x *\n 0], [-6751.04 + x * 0], [700.899 + x * 0]], [[1049.11 + x * 0], [\n 2074.75 + x * 0], [-3839.53 + x * 0], [779.924 + x * 0], [-4086.58 + x *\n 0], [4434.32 + x * 0], [323.015 + x * 0], [6161.8 + x * 0], [-10085.1 +\n x * 0], [7650.63 + x * 0], [1528.8 + x * 0], [-6083.3 + x * 0], [\n 1233.69 + x * 0]], [[-261.041 + x * 0], [-722.803 + x * 0], [1345.81 + \n x * 0], [590.851 + x * 0], [332.198 + x * 0], [2583.4 + x * 0], [-\n 597.604 + x * 0], [-4340.18 + x * 0], [-3589.25 + x * 0], [2591.65 + x *\n 0], [6761.4 + x * 0], [-6221.38 + x * 0], [4406.68 + x * 0]]])\n', (4554, 8066), True, 'import numpy as np\n'), ((7777, 13393), 'numpy.array', 'np.array', (['[[[x * 0 + (10 * theta) ** 0.0], [x * 0 + (10 * theta) ** 1.0], [x * 0 + (\n 10 * theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x * 0 + (10 *\n theta) ** 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 * theta) **\n 6.0], [x * 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) ** 8.0], [x *\n 0 + (10 * theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x * 0 + (10 *\n theta) ** 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (10 * theta\n ) ** 0.0], [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 * theta) ** 2.0],\n [x * 0 + (10 * theta) ** 3.0], [x * 0 + (10 * theta) ** 4.0], [x * 0 + \n (10 * theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x * 0 + (10 *\n theta) ** 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 * theta) **\n 9.0], [x * 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) ** 11.0], [\n x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0], [x * 0 +\n (10 * theta) ** 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + (10 *\n theta) ** 3.0], [x * 0 + (10 * theta) ** 4.0], [x * 0 + (10 * theta) **\n 5.0], [x * 0 + (10 * theta) ** 6.0], [x * 0 + (10 * theta) ** 7.0], [x *\n 0 + (10 * theta) ** 8.0], [x * 0 + (10 * theta) ** 9.0], [x * 0 + (10 *\n theta) ** 10.0], [x * 0 + (10 * theta) ** 11.0], [x * 0 + (10 * theta) **\n 12.0]], [[x * 0 + (10 * theta) ** 0.0], [x * 0 + (10 * theta) ** 1.0],\n [x * 0 + (10 * theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x * 0 + \n (10 * theta) ** 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 *\n theta) ** 6.0], [x * 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) **\n 8.0], [x * 0 + (10 * theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x *\n 0 + (10 * theta) ** 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (\n 10 * theta) ** 0.0], [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 *\n theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x * 0 + (10 * theta) **\n 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x *\n 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 *\n theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) **\n 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0],\n [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + \n (10 * theta) ** 3.0], [x * 0 + (10 * theta) ** 4.0], [x * 0 + (10 *\n theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x * 0 + (10 * theta) **\n 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 * theta) ** 9.0], [x *\n 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) ** 11.0], [x * 0 + (10 *\n theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0], [x * 0 + (10 * theta) **\n 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x *\n 0 + (10 * theta) ** 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 *\n theta) ** 6.0], [x * 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) **\n 8.0], [x * 0 + (10 * theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x *\n 0 + (10 * theta) ** 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (\n 10 * theta) ** 0.0], [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 *\n theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x * 0 + (10 * theta) **\n 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x *\n 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 *\n theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) **\n 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0],\n [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + \n (10 * theta) ** 3.0], [x * 0 + (10 * theta) ** 4.0], [x * 0 + (10 *\n theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x * 0 + (10 * theta) **\n 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 * theta) ** 9.0], [x *\n 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) ** 11.0], [x * 0 + (10 *\n theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0], [x * 0 + (10 * theta) **\n 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x *\n 0 + (10 * theta) ** 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 *\n theta) ** 6.0], [x * 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) **\n 8.0], [x * 0 + (10 * theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x *\n 0 + (10 * theta) ** 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (\n 10 * theta) ** 0.0], [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 *\n theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x * 0 + (10 * theta) **\n 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x *\n 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 *\n theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) **\n 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0],\n [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + \n (10 * theta) ** 3.0], [x * 0 + (10 * theta) ** 4.0], [x * 0 + (10 *\n theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x * 0 + (10 * theta) **\n 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 * theta) ** 9.0], [x *\n 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) ** 11.0], [x * 0 + (10 *\n theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0], [x * 0 + (10 * theta) **\n 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x *\n 0 + (10 * theta) ** 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 *\n theta) ** 6.0], [x * 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) **\n 8.0], [x * 0 + (10 * theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x *\n 0 + (10 * theta) ** 11.0], [x * 0 + (10 * theta) ** 12.0]]]'], {}), '([[[x * 0 + (10 * theta) ** 0.0], [x * 0 + (10 * theta) ** 1.0], [x *\n 0 + (10 * theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x * 0 + (10 *\n theta) ** 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 * theta) **\n 6.0], [x * 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) ** 8.0], [x *\n 0 + (10 * theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x * 0 + (10 *\n theta) ** 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (10 * theta\n ) ** 0.0], [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 * theta) ** 2.0],\n [x * 0 + (10 * theta) ** 3.0], [x * 0 + (10 * theta) ** 4.0], [x * 0 + \n (10 * theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x * 0 + (10 *\n theta) ** 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 * theta) **\n 9.0], [x * 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) ** 11.0], [\n x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0], [x * 0 +\n (10 * theta) ** 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + (10 *\n theta) ** 3.0], [x * 0 + (10 * theta) ** 4.0], [x * 0 + (10 * theta) **\n 5.0], [x * 0 + (10 * theta) ** 6.0], [x * 0 + (10 * theta) ** 7.0], [x *\n 0 + (10 * theta) ** 8.0], [x * 0 + (10 * theta) ** 9.0], [x * 0 + (10 *\n theta) ** 10.0], [x * 0 + (10 * theta) ** 11.0], [x * 0 + (10 * theta) **\n 12.0]], [[x * 0 + (10 * theta) ** 0.0], [x * 0 + (10 * theta) ** 1.0],\n [x * 0 + (10 * theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x * 0 + \n (10 * theta) ** 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 *\n theta) ** 6.0], [x * 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) **\n 8.0], [x * 0 + (10 * theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x *\n 0 + (10 * theta) ** 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (\n 10 * theta) ** 0.0], [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 *\n theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x * 0 + (10 * theta) **\n 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x *\n 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 *\n theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) **\n 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0],\n [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + \n (10 * theta) ** 3.0], [x * 0 + (10 * theta) ** 4.0], [x * 0 + (10 *\n theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x * 0 + (10 * theta) **\n 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 * theta) ** 9.0], [x *\n 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) ** 11.0], [x * 0 + (10 *\n theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0], [x * 0 + (10 * theta) **\n 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x *\n 0 + (10 * theta) ** 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 *\n theta) ** 6.0], [x * 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) **\n 8.0], [x * 0 + (10 * theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x *\n 0 + (10 * theta) ** 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (\n 10 * theta) ** 0.0], [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 *\n theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x * 0 + (10 * theta) **\n 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x *\n 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 *\n theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) **\n 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0],\n [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + \n (10 * theta) ** 3.0], [x * 0 + (10 * theta) ** 4.0], [x * 0 + (10 *\n theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x * 0 + (10 * theta) **\n 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 * theta) ** 9.0], [x *\n 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) ** 11.0], [x * 0 + (10 *\n theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0], [x * 0 + (10 * theta) **\n 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x *\n 0 + (10 * theta) ** 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 *\n theta) ** 6.0], [x * 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) **\n 8.0], [x * 0 + (10 * theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x *\n 0 + (10 * theta) ** 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (\n 10 * theta) ** 0.0], [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 *\n theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x * 0 + (10 * theta) **\n 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x *\n 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 *\n theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) **\n 11.0], [x * 0 + (10 * theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0],\n [x * 0 + (10 * theta) ** 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + \n (10 * theta) ** 3.0], [x * 0 + (10 * theta) ** 4.0], [x * 0 + (10 *\n theta) ** 5.0], [x * 0 + (10 * theta) ** 6.0], [x * 0 + (10 * theta) **\n 7.0], [x * 0 + (10 * theta) ** 8.0], [x * 0 + (10 * theta) ** 9.0], [x *\n 0 + (10 * theta) ** 10.0], [x * 0 + (10 * theta) ** 11.0], [x * 0 + (10 *\n theta) ** 12.0]], [[x * 0 + (10 * theta) ** 0.0], [x * 0 + (10 * theta) **\n 1.0], [x * 0 + (10 * theta) ** 2.0], [x * 0 + (10 * theta) ** 3.0], [x *\n 0 + (10 * theta) ** 4.0], [x * 0 + (10 * theta) ** 5.0], [x * 0 + (10 *\n theta) ** 6.0], [x * 0 + (10 * theta) ** 7.0], [x * 0 + (10 * theta) **\n 8.0], [x * 0 + (10 * theta) ** 9.0], [x * 0 + (10 * theta) ** 10.0], [x *\n 0 + (10 * theta) ** 11.0], [x * 0 + (10 * theta) ** 12.0]]])\n', (7785, 13393), True, 'import numpy as np\n'), ((11599, 15475), 'numpy.array', 'np.array', (['[[[(0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]]]'], {}), '([[[(0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [\n (0.05 * x) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 *\n x) ** 6.0], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** \n 9.0], [(0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]],\n [[(0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 *\n x) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** \n 6.0], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]], [[(\n 0.05 * x) ** 0.0], [(0.05 * x) ** 1.0], [(0.05 * x) ** 2.0], [(0.05 * x\n ) ** 3.0], [(0.05 * x) ** 4.0], [(0.05 * x) ** 5.0], [(0.05 * x) ** 6.0\n ], [(0.05 * x) ** 7.0], [(0.05 * x) ** 8.0], [(0.05 * x) ** 9.0], [(\n 0.05 * x) ** 10.0], [(0.05 * x) ** 11.0], [(0.05 * x) ** 12.0]]])\n', (11607, 15475), True, 'import numpy as np\n'), ((14384, 14415), 'numpy.sum', 'np.sum', (['(a_ij * theta_ei * Zj)', '(1)'], {}), '(a_ij * theta_ei * Zj, 1)\n', (14390, 14415), True, 'import numpy as np\n'), ((2365, 2374), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (2371, 2374), True, 'import numpy as np\n'), ((2420, 2429), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (2426, 2429), True, 'import numpy as np\n'), ((2446, 2459), 'numpy.exp', 'np.exp', (['(x / 2)'], {}), '(x / 2)\n', (2452, 2459), True, 'import numpy as np\n'), ((2458, 2472), 'numpy.exp', 'np.exp', (['(-x / 2)'], {}), '(-x / 2)\n', (2464, 2472), True, 'import numpy as np\n'), ((1118, 1127), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1124, 1127), True, 'import numpy as np\n'), ((2376, 2385), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (2382, 2385), True, 'import numpy as np\n'), ((2406, 2415), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (2412, 2415), True, 'import numpy as np\n'), ((14460, 14470), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (14466, 14470), True, 'import numpy as np\n'), ((1131, 1140), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1137, 1140), True, 'import numpy as np\n'), ((1167, 1176), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1173, 1176), True, 'import numpy as np\n'), ((1153, 1162), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1159, 1162), True, 'import numpy as np\n')]
|
# PhysiBoSS Tab
import os
from ipywidgets import Layout, Label, Text, Checkbox, Button, HBox, VBox, Box, \
FloatText, BoundedIntText, BoundedFloatText, HTMLMath, Dropdown, interactive, Output
from collections import deque, Counter
import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
import matplotlib.colors as mplc
import numpy as np
import glob
import platform
import numpy as np
import csv
import itertools
import copy
import scipy
# from debug import debug_view
class PhysiBoSSTab(object):
def __init__(self):
# tab_height = '520px'
# tab_layout = Layout(width='900px', # border='2px solid black',
# height=tab_height, overflow_y='scroll')
self.output_dir = '.'
self.figsize_width = 15.0 # allow extra for colormap
self.figsize_height = 8
constWidth = '180px'
# self.fig = plt.figure(figsize=(6, 6))
# self.fig = plt.figure(figsize=(7, 7))
config_file = "data/PhysiCell_settings.xml"
self.cell_lines = {}
self.cell_lines_by_name = {}
self.cell_lines_array = ["All"]
if os.path.isfile(config_file):
try:
tree = ET.parse(config_file)
except:
print("Cannot parse",config_file, "- check it's XML syntax.")
return
root = tree.getroot()
uep = root.find('.//cell_definitions') # find unique entry point (uep)
for child in uep.findall('cell_definition'):
self.cell_lines[int(child.attrib["ID"])] = child.attrib["name"]
self.cell_lines_by_name[child.attrib["name"]] = int(child.attrib["ID"])
self.cell_lines_array.append(child.attrib["name"])
# print(child.attrib['name'])
else:
print("config.xml does not exist")
max_frames = 0
self.svg_plot = interactive(self.create_area_chart, frame=(0, max_frames), percentage=(0.0, 10.0), total=False, cell_line=self.cell_lines_array, continuous_update=False)
plot_size = '500px' # small: controls the size of the tab height, not the plot (rf. figsize for that)
plot_size = '700px' # medium
plot_size = '750px' # medium
self.svg_plot.layout.width = '1000px'
self.svg_plot.layout.height = '700px'
self.use_defaults = True
self.axes_min = 0.0
self.axes_max = 2000 # hmm, this can change (TODO?)
self.max_frames = BoundedIntText(
min=0, max=99999, value=max_frames,
description='Max',
layout=Layout(width='160px'),
# layout=Layout(flex='1 1 auto', width='auto'), #Layout(width='160px'),
)
self.max_frames.observe(self.update_max_frames)
items_auto = [Label('select slider: drag or left/right arrows'),
self.max_frames,
]
box_layout = Layout(display='flex',
flex_flow='row',
align_items='stretch',
width='900px')
row1 = Box(children=items_auto, layout=box_layout)
self.tab = VBox([row1, self.svg_plot])
self.count_dict = {}
self.file_dict = {}
self.cells_indexes = np.zeros((0))
self.up_to_frame = 0
def update(self, rdir=''):
# with debug_view:
# print("SVG: update rdir=", rdir)
if rdir:
self.output_dir = rdir
all_files = sorted(glob.glob(os.path.join(self.output_dir, 'snapshot*.svg')))
if len(all_files) > 0:
last_file = all_files[-1]
self.max_frames.value = int(last_file[-12:-4]) # assumes naming scheme: "snapshot%08d.svg"
# self.create_dict(self.max_frames.value, self.output_dir)
# self.state_counter(self.max_frames.value)
# with debug_view:
# print("SVG: added %s files" % len(all_files))
def update_max_frames(self,_b):
self.svg_plot.children[0].max = self.max_frames.value
def create_dict(self, number_of_files, folder):
"create a dictionary with the states file in the folder 'output', half of the dict is used to calculate the percentage of the node, the other half is for the states"
if number_of_files > 0:
for i in range (0, number_of_files):
if "state_step{0}".format(i) not in self.file_dict.keys():
states_dict = {}
with open(os.path.join(self.output_dir, 'states_%08u.csv' % i), newline='') as csvfile:
states_reader = csv.reader(csvfile, delimiter=',')
for row in states_reader:
if row[0] != 'ID':
states_dict[int(row[0])] = row[1]
self.file_dict["state_step{0}".format(i)] = states_dict
def state_counter(self, number_of_files, percentage, cell_indexes, cell_line):
"create a dict with the states of the network, it can be used to print states pie chart"
self.count_dict = {}
temp_dict = {}
max_cell = 0
if number_of_files > 0:
for i in range (0, number_of_files):
state_list = []
for key in self.file_dict["state_step{0}".format(i)]:
if cell_line == 'All' or self.cells_indexes[key] == self.cell_lines_by_name[cell_line]:
state_list.append(self.file_dict["state_step{0}".format(i)][key])
state_counts = Counter(state_list)
max_cell = max_cell + sum(state_counts.values())
temp_dict["state_count{0}".format(i)] = state_counts
self.count_dict = self.filter_states(max_cell, temp_dict, percentage)
def create_cell_indexes(self, frame, cell_line):
for i in range(self.up_to_frame, frame):
fname = "output%08d_cells_physicell.mat" % i
full_fname = os.path.join(self.output_dir, fname)
if not os.path.isfile(full_fname):
print("Once output files are generated, click the slider.") # No: output00000000_microenvironment0.mat
return
info_dict = {}
scipy.io.loadmat(full_fname, info_dict)
M = info_dict['cells'][[0,5], :].astype(int)
self.cells_indexes.resize((max(self.cells_indexes.shape[0], M[0, :].max(axis=0)+1)))
self.cells_indexes[M[0, :]] = M[1, :]
self.up_to_frame = frame
return self.cells_indexes
def create_area_chart(self, frame=None, total=False, percentage=(0.0, 100.0), cell_line="All"):
"plot an area chart with the evolution of the network states during the simulation"
cells_indexes = None
if cell_line != "All":
cells_indexes = self.create_cell_indexes(frame, cell_line)
if np.sum(cells_indexes == self.cell_lines_by_name[cell_line]) == 0:
print("There are no %s cells." % cell_line)
return
self.create_dict(frame, self.output_dir)
self.state_counter(frame, percentage, cells_indexes, cell_line)
state_list = []
all_state = []
a = []
for k in self.count_dict:
state_list.append([key for key, value in self.count_dict[k].items() if value > 0])
for l in state_list:
for state in l:
all_state.append(state)
all_state = list(dict.fromkeys(all_state))
for state_count in self.count_dict:
b = []
for states in all_state:
try:
b.append(self.count_dict[state_count][states])
except:
b.append(0)
a.append(b)
a = np.array(a)
#print(a)
a = np.transpose(a)
if not total:
percent = a / a.sum(axis=0).astype(float) * 100
else:
percent = a
x = np.arange(len(self.count_dict))
self.fig = plt.figure(figsize=(self.figsize_width, self.figsize_height))
ax = self.fig.add_subplot(111)
ax.stackplot(x, percent, labels=all_state)
ax.legend(labels=all_state, loc='upper center', bbox_to_anchor=(0.5, -0.05),shadow=True, ncol=2)
# ax.legend(labels=all_state, bbox_to_anchor=(1.05, 1), loc='lower center', borderaxespad=0.)
if not total:
ax.set_ylabel('Percent (%)')
else:
ax.set_ylabel("Total")
ax.margins(0, 0) # Set margins to avoid "whitespace"
# plt.show()
def filter_states(self, max_cell, all_counts, percentage):
"""max_cell = 0
all_counts = {}
for i in range (0, number_of_files):
state_list = []
for key in file_dict["state_step{0}".format(i)]:
state_list.append(file_dict["state_step{0}".format(i)][key])
state_counts = Counter(state_list)
max_cell = max_cell + sum(state_counts.values())
all_counts[i] = state_counts"""
copy_all_counts = copy.deepcopy(all_counts)
state_list = []
all_state = []
for k in all_counts:
state_list.append(list(all_counts[k].keys()))
for l in state_list:
for state in l:
all_state.append(state)
all_state = list(dict.fromkeys(all_state))
banned_list = []
for state in all_state:
a = 0
for i in all_counts.keys():
try:
a = a + all_counts[i][state]
except:
a = a + 0
if (a < (percentage/100) * max_cell):
banned_list.append(state)
for i in all_counts.keys():
del all_counts[i][state]
for i in all_counts.keys():
b = 0
for state in banned_list:
try:
b = b + copy_all_counts[i][state]
except:
b = b + 0
all_counts[i]["others"] = b
return all_counts
|
[
"ipywidgets.interactive",
"copy.deepcopy",
"xml.etree.ElementTree.parse",
"numpy.sum",
"csv.reader",
"scipy.io.loadmat",
"collections.Counter",
"numpy.zeros",
"numpy.transpose",
"ipywidgets.Box",
"os.path.isfile",
"matplotlib.pyplot.figure",
"numpy.array",
"ipywidgets.Label",
"ipywidgets.Layout",
"ipywidgets.VBox",
"os.path.join"
] |
[((1211, 1238), 'os.path.isfile', 'os.path.isfile', (['config_file'], {}), '(config_file)\n', (1225, 1238), False, 'import os\n'), ((2007, 2169), 'ipywidgets.interactive', 'interactive', (['self.create_area_chart'], {'frame': '(0, max_frames)', 'percentage': '(0.0, 10.0)', 'total': '(False)', 'cell_line': 'self.cell_lines_array', 'continuous_update': '(False)'}), '(self.create_area_chart, frame=(0, max_frames), percentage=(0.0,\n 10.0), total=False, cell_line=self.cell_lines_array, continuous_update=\n False)\n', (2018, 2169), False, 'from ipywidgets import Layout, Label, Text, Checkbox, Button, HBox, VBox, Box, FloatText, BoundedIntText, BoundedFloatText, HTMLMath, Dropdown, interactive, Output\n'), ((3023, 3100), 'ipywidgets.Layout', 'Layout', ([], {'display': '"""flex"""', 'flex_flow': '"""row"""', 'align_items': '"""stretch"""', 'width': '"""900px"""'}), "(display='flex', flex_flow='row', align_items='stretch', width='900px')\n", (3029, 3100), False, 'from ipywidgets import Layout, Label, Text, Checkbox, Button, HBox, VBox, Box, FloatText, BoundedIntText, BoundedFloatText, HTMLMath, Dropdown, interactive, Output\n'), ((3176, 3219), 'ipywidgets.Box', 'Box', ([], {'children': 'items_auto', 'layout': 'box_layout'}), '(children=items_auto, layout=box_layout)\n', (3179, 3219), False, 'from ipywidgets import Layout, Label, Text, Checkbox, Button, HBox, VBox, Box, FloatText, BoundedIntText, BoundedFloatText, HTMLMath, Dropdown, interactive, Output\n'), ((3240, 3267), 'ipywidgets.VBox', 'VBox', (['[row1, self.svg_plot]'], {}), '([row1, self.svg_plot])\n', (3244, 3267), False, 'from ipywidgets import Layout, Label, Text, Checkbox, Button, HBox, VBox, Box, FloatText, BoundedIntText, BoundedFloatText, HTMLMath, Dropdown, interactive, Output\n'), ((3354, 3365), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (3362, 3365), True, 'import numpy as np\n'), ((8050, 8061), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (8058, 8061), True, 'import numpy as np\n'), ((8092, 8107), 'numpy.transpose', 'np.transpose', (['a'], {}), '(a)\n', (8104, 8107), True, 'import numpy as np\n'), ((8292, 8353), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(self.figsize_width, self.figsize_height)'}), '(figsize=(self.figsize_width, self.figsize_height))\n', (8302, 8353), True, 'import matplotlib.pyplot as plt\n'), ((9348, 9373), 'copy.deepcopy', 'copy.deepcopy', (['all_counts'], {}), '(all_counts)\n', (9361, 9373), False, 'import copy\n'), ((2909, 2958), 'ipywidgets.Label', 'Label', (['"""select slider: drag or left/right arrows"""'], {}), "('select slider: drag or left/right arrows')\n", (2914, 2958), False, 'from ipywidgets import Layout, Label, Text, Checkbox, Button, HBox, VBox, Box, FloatText, BoundedIntText, BoundedFloatText, HTMLMath, Dropdown, interactive, Output\n'), ((6144, 6180), 'os.path.join', 'os.path.join', (['self.output_dir', 'fname'], {}), '(self.output_dir, fname)\n', (6156, 6180), False, 'import os\n'), ((6425, 6464), 'scipy.io.loadmat', 'scipy.io.loadmat', (['full_fname', 'info_dict'], {}), '(full_fname, info_dict)\n', (6441, 6464), False, 'import scipy\n'), ((1293, 1314), 'xml.etree.ElementTree.parse', 'ET.parse', (['config_file'], {}), '(config_file)\n', (1301, 1314), True, 'import xml.etree.ElementTree as ET\n'), ((2713, 2734), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""160px"""'}), "(width='160px')\n", (2719, 2734), False, 'from ipywidgets import Layout, Label, Text, Checkbox, Button, HBox, VBox, Box, FloatText, BoundedIntText, BoundedFloatText, HTMLMath, Dropdown, interactive, Output\n'), ((3602, 3648), 'os.path.join', 'os.path.join', (['self.output_dir', '"""snapshot*.svg"""'], {}), "(self.output_dir, 'snapshot*.svg')\n", (3614, 3648), False, 'import os\n'), ((5686, 5705), 'collections.Counter', 'Counter', (['state_list'], {}), '(state_list)\n', (5693, 5705), False, 'from collections import deque, Counter\n'), ((6213, 6239), 'os.path.isfile', 'os.path.isfile', (['full_fname'], {}), '(full_fname)\n', (6227, 6239), False, 'import os\n'), ((7114, 7173), 'numpy.sum', 'np.sum', (['(cells_indexes == self.cell_lines_by_name[cell_line])'], {}), '(cells_indexes == self.cell_lines_by_name[cell_line])\n', (7120, 7173), True, 'import numpy as np\n'), ((4710, 4744), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (4720, 4744), False, 'import csv\n'), ((4592, 4644), 'os.path.join', 'os.path.join', (['self.output_dir', "('states_%08u.csv' % i)"], {}), "(self.output_dir, 'states_%08u.csv' % i)\n", (4604, 4644), False, 'import os\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.