code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import numpy as np
import torch
import torch.nn as nn
from modules.envelope import Envelope
from modules.initializers import GlorotOrthogonal
class EmbeddingBlock(nn.Module):
def __init__(self,
emb_size,
num_radial,
bessel_funcs,
cutoff,
envelope_exponent,
num_atom_types=95,
activation=None):
super(EmbeddingBlock, self).__init__()
self.bessel_funcs = bessel_funcs
self.cutoff = cutoff
self.activation = activation
self.envelope = Envelope(envelope_exponent)
self.embedding = nn.Embedding(num_atom_types, emb_size)
self.dense_rbf = nn.Linear(num_radial, emb_size)
self.dense = nn.Linear(emb_size * 3, emb_size)
self.reset_params()
def reset_params(self):
nn.init.uniform_(self.embedding.weight, a=-np.sqrt(3), b=np.sqrt(3))
GlorotOrthogonal(self.dense_rbf.weight)
GlorotOrthogonal(self.dense.weight)
def edge_init(self, edges):
""" msg emb init """
# m init
rbf = self.dense_rbf(edges.data['rbf'])
if self.activation is not None:
rbf = self.activation(rbf)
m = torch.cat([edges.src['h'], edges.dst['h'], rbf], dim=-1)
m = self.dense(m)
if self.activation is not None:
m = self.activation(m)
# rbf_env init
d_scaled = edges.data['d'] / self.cutoff
rbf_env = [f(d_scaled) for f in self.bessel_funcs]
rbf_env = torch.stack(rbf_env, dim=1)
d_cutoff = self.envelope(d_scaled)
rbf_env = d_cutoff[:, None] * rbf_env
return {'m': m, 'rbf_env': rbf_env}
def forward(self, g):
g.ndata['h'] = self.embedding(g.ndata['Z'])
g.apply_edges(self.edge_init)
return g
|
[
"torch.stack",
"torch.nn.Embedding",
"torch.cat",
"modules.envelope.Envelope",
"torch.nn.Linear",
"numpy.sqrt",
"modules.initializers.GlorotOrthogonal"
] |
[((598, 625), 'modules.envelope.Envelope', 'Envelope', (['envelope_exponent'], {}), '(envelope_exponent)\n', (606, 625), False, 'from modules.envelope import Envelope\n'), ((651, 689), 'torch.nn.Embedding', 'nn.Embedding', (['num_atom_types', 'emb_size'], {}), '(num_atom_types, emb_size)\n', (663, 689), True, 'import torch.nn as nn\n'), ((715, 746), 'torch.nn.Linear', 'nn.Linear', (['num_radial', 'emb_size'], {}), '(num_radial, emb_size)\n', (724, 746), True, 'import torch.nn as nn\n'), ((768, 801), 'torch.nn.Linear', 'nn.Linear', (['(emb_size * 3)', 'emb_size'], {}), '(emb_size * 3, emb_size)\n', (777, 801), True, 'import torch.nn as nn\n'), ((948, 987), 'modules.initializers.GlorotOrthogonal', 'GlorotOrthogonal', (['self.dense_rbf.weight'], {}), '(self.dense_rbf.weight)\n', (964, 987), False, 'from modules.initializers import GlorotOrthogonal\n'), ((996, 1031), 'modules.initializers.GlorotOrthogonal', 'GlorotOrthogonal', (['self.dense.weight'], {}), '(self.dense.weight)\n', (1012, 1031), False, 'from modules.initializers import GlorotOrthogonal\n'), ((1251, 1307), 'torch.cat', 'torch.cat', (["[edges.src['h'], edges.dst['h'], rbf]"], {'dim': '(-1)'}), "([edges.src['h'], edges.dst['h'], rbf], dim=-1)\n", (1260, 1307), False, 'import torch\n'), ((1567, 1594), 'torch.stack', 'torch.stack', (['rbf_env'], {'dim': '(1)'}), '(rbf_env, dim=1)\n', (1578, 1594), False, 'import torch\n'), ((928, 938), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (935, 938), True, 'import numpy as np\n'), ((914, 924), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (921, 924), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
""" Various registration routines to reduce duplication. """
import numpy as np
import sksurgerycore.transforms.matrix as mt
import sksurgerysurfacematch.interfaces.rigid_registration as rr
def do_rigid_registration(reconstructed_cloud,
reference_cloud,
rigid_registration: rr.RigidRegistration,
initial_ref2recon: np.ndarray = None,
):
"""
Triggers a rigid body registration using rigid_registration.
:param reconstructed_cloud: [Nx3] point cloud, e.g. from video.
:param reference_cloud: [Mx3] point cloud, e.g. from CT/MR
:param rigid_registration: Object that implements a rigid registration.
:param initial_ref2recon_transform: [4x4] ndarray representing an initial \
estimate.
:return: residual (float), [4x4] transform
"""
if initial_ref2recon is not None:
reference_cloud = \
np.matmul(
initial_ref2recon[0:3, 0:3], np.transpose(reference_cloud)) \
+ initial_ref2recon[0:3, 3].reshape((3, 1))
reference_cloud = np.transpose(reference_cloud)
# Do registration. Best to register recon points to
# the provided model (likely from CT or MR), and then invert.
residual, transform = \
rigid_registration.register(reconstructed_cloud,
reference_cloud
)
transform = np.linalg.inv(transform)
# Combine initial, if we have one.
if initial_ref2recon is not None:
init_mat = \
mt.construct_rigid_transformation(
initial_ref2recon[0:3, 0:3],
initial_ref2recon[0:3, 3]
)
transform = np.matmul(transform, init_mat)
return residual, transform
|
[
"numpy.transpose",
"numpy.linalg.inv",
"numpy.matmul",
"sksurgerycore.transforms.matrix.construct_rigid_transformation"
] |
[((1492, 1516), 'numpy.linalg.inv', 'np.linalg.inv', (['transform'], {}), '(transform)\n', (1505, 1516), True, 'import numpy as np\n'), ((1148, 1177), 'numpy.transpose', 'np.transpose', (['reference_cloud'], {}), '(reference_cloud)\n', (1160, 1177), True, 'import numpy as np\n'), ((1628, 1721), 'sksurgerycore.transforms.matrix.construct_rigid_transformation', 'mt.construct_rigid_transformation', (['initial_ref2recon[0:3, 0:3]', 'initial_ref2recon[0:3, 3]'], {}), '(initial_ref2recon[0:3, 0:3],\n initial_ref2recon[0:3, 3])\n', (1661, 1721), True, 'import sksurgerycore.transforms.matrix as mt\n'), ((1784, 1814), 'numpy.matmul', 'np.matmul', (['transform', 'init_mat'], {}), '(transform, init_mat)\n', (1793, 1814), True, 'import numpy as np\n'), ((1033, 1062), 'numpy.transpose', 'np.transpose', (['reference_cloud'], {}), '(reference_cloud)\n', (1045, 1062), True, 'import numpy as np\n')]
|
# From https://groups.google.com/forum/#!topic/networkx-discuss/FwYk0ixLDuY
# Plot weighted directed positive/negative network graph
import networkx as nx
import matplotlib.pyplot as plt
from matplotlib.patches import FancyArrowPatch, Circle
import numpy as np
def draw_curvy_network(G, pos, ax, node_radius=0.02, node_color='b', node_edge_color='b', node_alpha=0.5, edge_color=None, edge_alpha=0.5, edge_width=None):
assert isinstance(G, nx.Graph), "G must be a NetworkX graph!"
# Convert node colors to lists
def _to_list(x, N):
if isinstance(x, list):
assert len(x) == N
return x
else:
return [x] * N
node_radius = _to_list(node_radius, len(G.nodes()))
node_color = _to_list(node_color, len(G.nodes()))
node_edge_color = _to_list(node_edge_color, len(G.nodes()))
node_alpha = _to_list(node_alpha, len(G.nodes()))
if edge_color is None:
edge_color = _to_list('k', len(G.edges()))
edge_alpha = _to_list(edge_alpha, len(G.edges()))
# if user specify edge-width it is not the same
if edge_width is None:
edge_width = 2
edge_width = _to_list(edge_width, len(G.edges()))
# Plot the nodes
for n, r, a, fc, ec in zip(G, node_radius, node_alpha, node_color, node_edge_color):
c = Circle(pos[n], radius=r, alpha=a, fc=fc, ec=ec)
ax.add_patch(c)
G.node[n]['patch'] = c
# Plot the edges
seen = {}
for (u, v, d), a, lw, ec in zip(G.edges(data=True), edge_alpha, edge_width, edge_color):
n1 = G.node[u]['patch']
n2 = G.node[v]['patch']
rad = -0.1
if (u, v) in seen:
rad = seen.get((u, v))
rad = (rad + np.sign(rad) * 0.1) * -1
e = FancyArrowPatch(n1.center, n2.center, patchA=n1, patchB=n2, arrowstyle='-|>',
connectionstyle='arc3,rad=%s' % rad, mutation_scale=10.0, lw=lw, alpha=a, color=ec)
seen[(u, v)] = rad
ax.add_patch(e)
return e
if __name__ == "__main__":
from hips.plotting.colormaps import harvard_colors
color = harvard_colors()[0:10]
G = nx.MultiDiGraph([(1, 1), (1, 2), (2, 1), (2, 3), (3, 4), (2, 4), (3, 2)])
pos = nx.spring_layout(G)
ax = plt.gca()
edge_width = [5, 0.9, 0.8, 2, 2, 1, 5]
edge_color = [color[0], color[0], color[0], color[0], color[1], color[1], color[1]]
draw_curvy_network(G, pos, ax, node_color='k', node_edge_color='k', edge_width=edge_width, edge_color=edge_color)
ax.autoscale()
plt.axis('equal')
plt.axis('off')
# plt.savefig("graph.pdf")
plt.show()
|
[
"matplotlib.pyplot.show",
"networkx.MultiDiGraph",
"hips.plotting.colormaps.harvard_colors",
"matplotlib.patches.FancyArrowPatch",
"matplotlib.pyplot.axis",
"matplotlib.patches.Circle",
"numpy.sign",
"networkx.spring_layout",
"matplotlib.pyplot.gca"
] |
[((2141, 2214), 'networkx.MultiDiGraph', 'nx.MultiDiGraph', (['[(1, 1), (1, 2), (2, 1), (2, 3), (3, 4), (2, 4), (3, 2)]'], {}), '([(1, 1), (1, 2), (2, 1), (2, 3), (3, 4), (2, 4), (3, 2)])\n', (2156, 2214), True, 'import networkx as nx\n'), ((2226, 2245), 'networkx.spring_layout', 'nx.spring_layout', (['G'], {}), '(G)\n', (2242, 2245), True, 'import networkx as nx\n'), ((2255, 2264), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2262, 2264), True, 'import matplotlib.pyplot as plt\n'), ((2562, 2579), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (2570, 2579), True, 'import matplotlib.pyplot as plt\n'), ((2584, 2599), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2592, 2599), True, 'import matplotlib.pyplot as plt\n'), ((2635, 2645), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2643, 2645), True, 'import matplotlib.pyplot as plt\n'), ((1319, 1366), 'matplotlib.patches.Circle', 'Circle', (['pos[n]'], {'radius': 'r', 'alpha': 'a', 'fc': 'fc', 'ec': 'ec'}), '(pos[n], radius=r, alpha=a, fc=fc, ec=ec)\n', (1325, 1366), False, 'from matplotlib.patches import FancyArrowPatch, Circle\n'), ((1759, 1929), 'matplotlib.patches.FancyArrowPatch', 'FancyArrowPatch', (['n1.center', 'n2.center'], {'patchA': 'n1', 'patchB': 'n2', 'arrowstyle': '"""-|>"""', 'connectionstyle': "('arc3,rad=%s' % rad)", 'mutation_scale': '(10.0)', 'lw': 'lw', 'alpha': 'a', 'color': 'ec'}), "(n1.center, n2.center, patchA=n1, patchB=n2, arrowstyle=\n '-|>', connectionstyle='arc3,rad=%s' % rad, mutation_scale=10.0, lw=lw,\n alpha=a, color=ec)\n", (1774, 1929), False, 'from matplotlib.patches import FancyArrowPatch, Circle\n'), ((2109, 2125), 'hips.plotting.colormaps.harvard_colors', 'harvard_colors', ([], {}), '()\n', (2123, 2125), False, 'from hips.plotting.colormaps import harvard_colors\n'), ((1721, 1733), 'numpy.sign', 'np.sign', (['rad'], {}), '(rad)\n', (1728, 1733), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import numpy as np
import scipy.io
# Exercise 5 | Regularized Linear Regression and Bias-Variance
#
# Instructions
# ------------
#
# This file contains code that helps you get started on the
# exercise. You will need to complete the following functions:
#
# linearRegCostFunction.m
# learningCurve.m
# validationCurve.m
#
# For this exercise, you will not need to change any code in this file,
# or any other files other than those mentioned above.
#
class test_ex5_regularized_linear_regressionand_bias_vs_variance(unittest.TestCase):
@classmethod
def setUp(cls):
# Load Training Data
print('Loading and Visualizing Data ...')
data_file = "resource/ex5data1.mat"
# Load
# You will have X, y, Xval, yval, Xtest, ytest in your environment
mat = scipy.io.loadmat(data_file)
cls.X = mat["X"]
cls.y = mat["y"]
cls.Xval = mat["Xval"]
cls.yval = mat["yval"]
cls.Xtest = mat["Xtest"]
cls.ytest = mat["ytest"]
cls.m = np.shape(cls.X)[0]
# =========== Part 1: Loading and Visualizing Data =============
# We start the exercise by first loading and visualizing the dataset.
# The following code will load the dataset into your environment and plot
# the data.
#
def test_load_and_visualzing_data(self):
import matplotlib.pyplot as plt
# print("point_end_y: {max_y}".format(max_y = point_end_y))
plt.figure(1)
plt.xlabel('Change in water level (x)')
plt.ylabel('Water flowing out of the dam (y)')
plt.scatter(self.X, self.y, marker='o', color='k', s=10)
plt.show()
# Plot training data
print('Program paused. Press enter to continue.')
# =========== Part 2: Regularized Linear Regression Cost =============
# You should now implement the cost function for regularized linear
# regression.
def test_regularized_linear_regression_cost_and_grad(self):
# m = Number of examples
theta = np.array([[1], [1]])
X_padded = np.column_stack((np.ones((self.m, 1)), self.X))
from ex5_regularized_linear_regressionand_bias_vs_variance.linearRegCostFunction import linearRegCostFunction
J, grad = linearRegCostFunction(X_padded, self.y, theta, 1)
self.assertAlmostEqual(J, 303.993, delta=0.001)
print('Cost at theta = [1 ; 1]: {cost} \n'
'(this value should be about 303.993192)'.format(cost=J))
# =========== Part 3: Regularized Linear Regression Gradient =============
# You should now implement the gradient for regularized linear
# regression.
self.assertAlmostEqual(grad[0], -15.303016, delta=0.0001)
self.assertAlmostEqual(grad[1], 598.250744, delta=0.0001)
print('Gradient at theta = [1 ; 1]: [{grad_0}; {grad_1}] \n'
'(this value should be about [-15.303016; 598.250744])\n'.format(grad_0=grad[0], grad_1=grad[1]))
# =========== Part 4: Train Linear Regression =============
# Once you have implemented the cost and gradient correctly, the
# trainLinearReg function will use your cost function to train
# regularized linear regression.
#
# Write Up Note: The data is non - linear, so this will not give a great
# fit.
#
def test_train_linear_reg(self):
from ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg import trainLinearReg
# Train linear regression with lambda = 0
_lambda = 0
x_with_bias = np.column_stack((np.ones(self.m), self.X))
cost, theta = trainLinearReg(x_with_bias, self.y, _lambda)
ret = x_with_bias.dot(theta)
import matplotlib.pyplot as plt
plt.figure(1)
plt.xlabel('Change in water level (x)')
plt.ylabel('Water flowing out of the dam (y)')
plt.scatter(self.X, self.y, marker='x', c='r', s=30, linewidth=2)
plt.plot(self.X, ret, linewidth=2)
plt.show()
# =========== Part 5: Learning Curve for Linear Regression =============
# Next, you should implement the learningCurve function.
#
# Write Up Note: Since the model is underfitting the data, we expect to
# see a graph with "high bias" -- slide 8 in ML-advice.pdf
#
def test_learning_curve_for_linear_regression(self):
_lambda = 0
from ex5_regularized_linear_regressionand_bias_vs_variance.learningCurve import learningCurve
x_with_bias = np.column_stack((np.ones(self.m), self.X))
x_val_with_bias = np.column_stack((np.ones(np.shape(self.Xval)[0]), self.Xval))
error_train, error_val = learningCurve(x_with_bias, self.y, x_val_with_bias, self.yval, 0)
print('# Training Examples\tTrain Error\tCross Validation Error')
for i in range(self.m):
print(' \t{index}\t\t{error_train}\t{error_val}\n'.format(index=i,
error_train=error_train[i],
error_val=error_val[i]))
import matplotlib.pyplot as plt
temp = np.array([x for x in range(1, self.m + 1)])
# plt.plot(1:m, error_train, 1:m, error_val);
plt.title('Learning curve for linear regression')
plt.xlabel('Number of training examples')
plt.ylabel('Error')
plt.plot(temp, np.array(error_train), color='b', linewidth=2, label='Train')
plt.plot(temp, np.array(error_val), color='y', linewidth=2, label='Cross Validation')
plt.legend()
plt.show(block=True)
# =========== Part 6: Feature Mapping for Polynomial Regression =============
# One solution to this is to use polynomial regression.You should now
# complete polyFeatures to map each example into its powers
#
def test_feature_mapping_for_polynomial_regression(self):
p = 8
# Map X onto Polynomial Features and Normalize
from ex5_regularized_linear_regressionand_bias_vs_variance.polyFeatures import polyFeatures
X_poly = polyFeatures(self.X, p)
X_poly_m, X_poly_n = np.shape(X_poly)
self.assertEqual(X_poly_m, self.m)
self.assertEqual(X_poly_n, p)
from ex5_regularized_linear_regressionand_bias_vs_variance.featureNormalize import featureNormalize
X_poly, mu, sigma = featureNormalize(X_poly)
X_poly = np.column_stack((np.ones((self.m, 1)), X_poly))
X_poly_test = polyFeatures(self.Xtest, p)
X_poly_test_m, X_poly_test_n = np.shape(X_poly_test)
self.assertEqual(X_poly_test_m, np.shape(self.Xtest)[0])
self.assertEqual(X_poly_test_n, p)
X_poly_test = X_poly_test - mu
X_poly_test = X_poly_test / sigma
X_poly_test = np.column_stack((np.ones((X_poly_test.shape[0], 1)), X_poly_test))
X_poly_val = polyFeatures(self.Xval, p)
X_poly_val_m, X_poly_val_n = np.shape(X_poly_val)
self.assertEqual(X_poly_val_m, np.shape(self.Xval)[0])
self.assertEqual(X_poly_val_n, p)
X_poly_val = X_poly_val - mu
X_poly_val = X_poly_val / sigma
X_poly_val = np.column_stack((np.ones((X_poly_val.shape[0], 1)), X_poly_val))
print('Normalized Training Example 1:\n'
' {X_poly} '.format(X_poly=X_poly))
# =========== Part 7: Learning Curve for Polynomial Regression =============
# Now, you will get to experiment with polynomial regression with multiple
# values of lambda .The code below runs polynomial regression with
# lambda = 0. You should try running the code with different values of
# lambda to see how the fit and learning curve change.
#
_lambda = 0
from ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg import trainLinearReg
cost, theta = trainLinearReg(X_poly, self.y, _lambda)
self.assertIsNotNone(cost)
self.assertIsNotNone(theta)
import matplotlib.pyplot as plt
plt.figure(1)
plt.scatter(self.X, self.y, marker='x', c='r', s=30, linewidth=2)
plt.xlim([-80, 80])
plt.ylim([-20, 60])
plt.xlabel('Change in water level(x)')
plt.ylabel('Water flowing out of the dam(y)')
plt.title('Polynomial Regression Fit (lambda = {:f})'.format(_lambda))
# plt.plot(self.X, self.y, 'rx', markersize=10, linewidth=1.5)
from ex5_regularized_linear_regressionand_bias_vs_variance.plotFit import plotFit
plotFit(min(self.X), max(self.X), mu, sigma, theta, p)
plt.show(block=False)
plt.figure(2)
from ex5_regularized_linear_regressionand_bias_vs_variance.learningCurve import learningCurve
error_train, error_val = learningCurve(X_poly, self.y, X_poly_val, self.yval, 0)
p1, p2 = plt.plot(range(1, self.m + 1), error_train, range(1, self.m + 1), error_val)
plt.legend((p1, p2), ('Train', 'Cross Validation'))
plt.show(block=False)
print('Polynomial Regression (lambda =%{_lambda})'.format(_lambda=_lambda))
print('# Training Examples\tTrain Error\tCross Validation Error')
for i in range(0, self.m):
print('\t{i}\t\t{error_train}\t{error_val}'.format(i=i, error_train=error_train[i], error_val=error_val[i]))
# =========== Part 8: Validation for Selecting Lambda =============
# You will now implement validationCurve to test various values of
# lambda on a validation set. You will then use this to select the
# "best" lambda value.
#
from ex5_regularized_linear_regressionand_bias_vs_variance.validationCurve import validationCurve
lambda_vec, error_train, error_val = validationCurve(X_poly, self.y, X_poly_val, self.yval)
self.assertEqual(len(error_train), len(lambda_vec))
self.assertEqual(len(error_val), len(lambda_vec))
plt.close('all')
p1, p2, = plt.plot(lambda_vec, error_train, lambda_vec, error_val)
plt.legend((p1, p2), ('Train', 'Cross Validation'))
plt.xlabel('lambda')
plt.ylabel('Error')
plt.show(block=False)
print('lambda\t\tTrain Error\tValidation Error')
for i in range(len(lambda_vec)):
print(
'{lambda_vec}\t{error_train}\t{error_val}'.format(lambda_vec=lambda_vec[i], error_train=error_train[i],
error_val=error_val[i]))
# =========== Part 9: Computing test set error and Plotting learning curves with randomly selected examples
# ============= best lambda value from previous step
lambda_val = 3
# note that we're using X_poly - polynomial linear regression with polynomial features
from ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg import trainLinearReg
_, theta = trainLinearReg(X_poly, self.y, lambda_val)
# because we're using X_poly, we also have to use X_poly_test with polynomial features
from ex5_regularized_linear_regressionand_bias_vs_variance.linearRegCostFunction import linearRegCostFunction
error_test, _ = linearRegCostFunction(X_poly_test, self.ytest, theta, 0)
print('Test set error: {error_test}'.format(error_test=error_test)) # expected 3.859
# why? something wrong
# self.assertAlmostEqual(error_test, 3.859, delta=0.01)
# =========== Part 10: Plot learning curves with randomly selected examples =============
#
# lambda_val value for this step
lambda_val = 0.01
times = 50
error_train_rand = np.zeros((self.m, times))
error_val_rand = np.zeros((self.m, times))
for i in range(self.m):
for k in range(times):
rand_sample_train = np.random.permutation(X_poly.shape[0])
rand_sample_train = rand_sample_train[:i + 1]
rand_sample_val = np.random.permutation(X_poly_val.shape[0])
rand_sample_val = rand_sample_val[:i + 1]
X_poly_train_rand = X_poly[rand_sample_train, :]
y_train_rand = self.y[rand_sample_train]
X_poly_val_rand = X_poly_val[rand_sample_val, :]
y_val_rand = self.yval[rand_sample_val]
_, theta = trainLinearReg(X_poly_train_rand, y_train_rand, lambda_val)
cost, _ = linearRegCostFunction(X_poly_train_rand, y_train_rand, np.asarray(theta), 0)
error_train_rand[i, k] = cost
cost, _ = linearRegCostFunction(X_poly_val_rand, y_val_rand, theta, 0)
error_val_rand[i, k] = cost
error_train = np.mean(error_train_rand, axis=1)
error_val = np.mean(error_val_rand, axis=1)
p1, p2 = plt.plot(range(self.m), error_train, range(self.m), error_val)
plt.title('Polynomial Regression Learning Curve (lambda = {:f})'.format(lambda_val))
plt.legend((p1, p2), ('Train', 'Cross Validation'))
plt.xlabel('Number of training examples')
plt.ylabel('Error')
plt.axis([0, 13, 0, 150])
plt.show(block=False)
if __name__ == '__main__':
unittest.main()
|
[
"matplotlib.pyplot.title",
"ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg.trainLinearReg",
"numpy.ones",
"ex5_regularized_linear_regressionand_bias_vs_variance.featureNormalize.featureNormalize",
"numpy.shape",
"matplotlib.pyplot.figure",
"numpy.mean",
"unittest.main",
"matplotlib.pyplot.close",
"ex5_regularized_linear_regressionand_bias_vs_variance.linearRegCostFunction.linearRegCostFunction",
"matplotlib.pyplot.show",
"ex5_regularized_linear_regressionand_bias_vs_variance.validationCurve.validationCurve",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"numpy.asarray",
"numpy.random.permutation",
"matplotlib.pyplot.ylabel",
"ex5_regularized_linear_regressionand_bias_vs_variance.polyFeatures.polyFeatures",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"numpy.zeros",
"matplotlib.pyplot.axis",
"numpy.array",
"ex5_regularized_linear_regressionand_bias_vs_variance.learningCurve.learningCurve",
"matplotlib.pyplot.xlabel"
] |
[((13315, 13330), 'unittest.main', 'unittest.main', ([], {}), '()\n', (13328, 13330), False, 'import unittest\n'), ((1535, 1548), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1545, 1548), True, 'import matplotlib.pyplot as plt\n'), ((1557, 1596), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Change in water level (x)"""'], {}), "('Change in water level (x)')\n", (1567, 1596), True, 'import matplotlib.pyplot as plt\n'), ((1605, 1651), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Water flowing out of the dam (y)"""'], {}), "('Water flowing out of the dam (y)')\n", (1615, 1651), True, 'import matplotlib.pyplot as plt\n'), ((1660, 1716), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.X', 'self.y'], {'marker': '"""o"""', 'color': '"""k"""', 's': '(10)'}), "(self.X, self.y, marker='o', color='k', s=10)\n", (1671, 1716), True, 'import matplotlib.pyplot as plt\n'), ((1725, 1735), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1733, 1735), True, 'import matplotlib.pyplot as plt\n'), ((2103, 2123), 'numpy.array', 'np.array', (['[[1], [1]]'], {}), '([[1], [1]])\n', (2111, 2123), True, 'import numpy as np\n'), ((2327, 2376), 'ex5_regularized_linear_regressionand_bias_vs_variance.linearRegCostFunction.linearRegCostFunction', 'linearRegCostFunction', (['X_padded', 'self.y', 'theta', '(1)'], {}), '(X_padded, self.y, theta, 1)\n', (2348, 2376), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.linearRegCostFunction import linearRegCostFunction\n'), ((3683, 3727), 'ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg.trainLinearReg', 'trainLinearReg', (['x_with_bias', 'self.y', '_lambda'], {}), '(x_with_bias, self.y, _lambda)\n', (3697, 3727), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg import trainLinearReg\n'), ((3814, 3827), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (3824, 3827), True, 'import matplotlib.pyplot as plt\n'), ((3836, 3875), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Change in water level (x)"""'], {}), "('Change in water level (x)')\n", (3846, 3875), True, 'import matplotlib.pyplot as plt\n'), ((3884, 3930), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Water flowing out of the dam (y)"""'], {}), "('Water flowing out of the dam (y)')\n", (3894, 3930), True, 'import matplotlib.pyplot as plt\n'), ((3939, 4004), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.X', 'self.y'], {'marker': '"""x"""', 'c': '"""r"""', 's': '(30)', 'linewidth': '(2)'}), "(self.X, self.y, marker='x', c='r', s=30, linewidth=2)\n", (3950, 4004), True, 'import matplotlib.pyplot as plt\n'), ((4013, 4047), 'matplotlib.pyplot.plot', 'plt.plot', (['self.X', 'ret'], {'linewidth': '(2)'}), '(self.X, ret, linewidth=2)\n', (4021, 4047), True, 'import matplotlib.pyplot as plt\n'), ((4056, 4066), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4064, 4066), True, 'import matplotlib.pyplot as plt\n'), ((4741, 4806), 'ex5_regularized_linear_regressionand_bias_vs_variance.learningCurve.learningCurve', 'learningCurve', (['x_with_bias', 'self.y', 'x_val_with_bias', 'self.yval', '(0)'], {}), '(x_with_bias, self.y, x_val_with_bias, self.yval, 0)\n', (4754, 4806), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.learningCurve import learningCurve\n'), ((5352, 5401), 'matplotlib.pyplot.title', 'plt.title', (['"""Learning curve for linear regression"""'], {}), "('Learning curve for linear regression')\n", (5361, 5401), True, 'import matplotlib.pyplot as plt\n'), ((5410, 5451), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of training examples"""'], {}), "('Number of training examples')\n", (5420, 5451), True, 'import matplotlib.pyplot as plt\n'), ((5460, 5479), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error"""'], {}), "('Error')\n", (5470, 5479), True, 'import matplotlib.pyplot as plt\n'), ((5667, 5679), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5677, 5679), True, 'import matplotlib.pyplot as plt\n'), ((5688, 5708), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (5696, 5708), True, 'import matplotlib.pyplot as plt\n'), ((6184, 6207), 'ex5_regularized_linear_regressionand_bias_vs_variance.polyFeatures.polyFeatures', 'polyFeatures', (['self.X', 'p'], {}), '(self.X, p)\n', (6196, 6207), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.polyFeatures import polyFeatures\n'), ((6237, 6253), 'numpy.shape', 'np.shape', (['X_poly'], {}), '(X_poly)\n', (6245, 6253), True, 'import numpy as np\n'), ((6472, 6496), 'ex5_regularized_linear_regressionand_bias_vs_variance.featureNormalize.featureNormalize', 'featureNormalize', (['X_poly'], {}), '(X_poly)\n', (6488, 6496), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.featureNormalize import featureNormalize\n'), ((6585, 6612), 'ex5_regularized_linear_regressionand_bias_vs_variance.polyFeatures.polyFeatures', 'polyFeatures', (['self.Xtest', 'p'], {}), '(self.Xtest, p)\n', (6597, 6612), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.polyFeatures import polyFeatures\n'), ((6652, 6673), 'numpy.shape', 'np.shape', (['X_poly_test'], {}), '(X_poly_test)\n', (6660, 6673), True, 'import numpy as np\n'), ((6974, 7000), 'ex5_regularized_linear_regressionand_bias_vs_variance.polyFeatures.polyFeatures', 'polyFeatures', (['self.Xval', 'p'], {}), '(self.Xval, p)\n', (6986, 7000), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.polyFeatures import polyFeatures\n'), ((7038, 7058), 'numpy.shape', 'np.shape', (['X_poly_val'], {}), '(X_poly_val)\n', (7046, 7058), True, 'import numpy as np\n'), ((7971, 8010), 'ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg.trainLinearReg', 'trainLinearReg', (['X_poly', 'self.y', '_lambda'], {}), '(X_poly, self.y, _lambda)\n', (7985, 8010), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg import trainLinearReg\n'), ((8131, 8144), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (8141, 8144), True, 'import matplotlib.pyplot as plt\n'), ((8153, 8218), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.X', 'self.y'], {'marker': '"""x"""', 'c': '"""r"""', 's': '(30)', 'linewidth': '(2)'}), "(self.X, self.y, marker='x', c='r', s=30, linewidth=2)\n", (8164, 8218), True, 'import matplotlib.pyplot as plt\n'), ((8227, 8246), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-80, 80]'], {}), '([-80, 80])\n', (8235, 8246), True, 'import matplotlib.pyplot as plt\n'), ((8255, 8274), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-20, 60]'], {}), '([-20, 60])\n', (8263, 8274), True, 'import matplotlib.pyplot as plt\n'), ((8283, 8321), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Change in water level(x)"""'], {}), "('Change in water level(x)')\n", (8293, 8321), True, 'import matplotlib.pyplot as plt\n'), ((8330, 8375), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Water flowing out of the dam(y)"""'], {}), "('Water flowing out of the dam(y)')\n", (8340, 8375), True, 'import matplotlib.pyplot as plt\n'), ((8688, 8709), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (8696, 8709), True, 'import matplotlib.pyplot as plt\n'), ((8719, 8732), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (8729, 8732), True, 'import matplotlib.pyplot as plt\n'), ((8868, 8923), 'ex5_regularized_linear_regressionand_bias_vs_variance.learningCurve.learningCurve', 'learningCurve', (['X_poly', 'self.y', 'X_poly_val', 'self.yval', '(0)'], {}), '(X_poly, self.y, X_poly_val, self.yval, 0)\n', (8881, 8923), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.learningCurve import learningCurve\n'), ((9026, 9077), 'matplotlib.pyplot.legend', 'plt.legend', (['(p1, p2)', "('Train', 'Cross Validation')"], {}), "((p1, p2), ('Train', 'Cross Validation'))\n", (9036, 9077), True, 'import matplotlib.pyplot as plt\n'), ((9086, 9107), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (9094, 9107), True, 'import matplotlib.pyplot as plt\n'), ((9846, 9900), 'ex5_regularized_linear_regressionand_bias_vs_variance.validationCurve.validationCurve', 'validationCurve', (['X_poly', 'self.y', 'X_poly_val', 'self.yval'], {}), '(X_poly, self.y, X_poly_val, self.yval)\n', (9861, 9900), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.validationCurve import validationCurve\n'), ((10028, 10044), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (10037, 10044), True, 'import matplotlib.pyplot as plt\n'), ((10063, 10119), 'matplotlib.pyplot.plot', 'plt.plot', (['lambda_vec', 'error_train', 'lambda_vec', 'error_val'], {}), '(lambda_vec, error_train, lambda_vec, error_val)\n', (10071, 10119), True, 'import matplotlib.pyplot as plt\n'), ((10128, 10179), 'matplotlib.pyplot.legend', 'plt.legend', (['(p1, p2)', "('Train', 'Cross Validation')"], {}), "((p1, p2), ('Train', 'Cross Validation'))\n", (10138, 10179), True, 'import matplotlib.pyplot as plt\n'), ((10188, 10208), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""lambda"""'], {}), "('lambda')\n", (10198, 10208), True, 'import matplotlib.pyplot as plt\n'), ((10217, 10236), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error"""'], {}), "('Error')\n", (10227, 10236), True, 'import matplotlib.pyplot as plt\n'), ((10245, 10266), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (10253, 10266), True, 'import matplotlib.pyplot as plt\n'), ((11015, 11057), 'ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg.trainLinearReg', 'trainLinearReg', (['X_poly', 'self.y', 'lambda_val'], {}), '(X_poly, self.y, lambda_val)\n', (11029, 11057), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg import trainLinearReg\n'), ((11296, 11352), 'ex5_regularized_linear_regressionand_bias_vs_variance.linearRegCostFunction.linearRegCostFunction', 'linearRegCostFunction', (['X_poly_test', 'self.ytest', 'theta', '(0)'], {}), '(X_poly_test, self.ytest, theta, 0)\n', (11317, 11352), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.linearRegCostFunction import linearRegCostFunction\n'), ((11767, 11792), 'numpy.zeros', 'np.zeros', (['(self.m, times)'], {}), '((self.m, times))\n', (11775, 11792), True, 'import numpy as np\n'), ((11818, 11843), 'numpy.zeros', 'np.zeros', (['(self.m, times)'], {}), '((self.m, times))\n', (11826, 11843), True, 'import numpy as np\n'), ((12820, 12853), 'numpy.mean', 'np.mean', (['error_train_rand'], {'axis': '(1)'}), '(error_train_rand, axis=1)\n', (12827, 12853), True, 'import numpy as np\n'), ((12874, 12905), 'numpy.mean', 'np.mean', (['error_val_rand'], {'axis': '(1)'}), '(error_val_rand, axis=1)\n', (12881, 12905), True, 'import numpy as np\n'), ((13088, 13139), 'matplotlib.pyplot.legend', 'plt.legend', (['(p1, p2)', "('Train', 'Cross Validation')"], {}), "((p1, p2), ('Train', 'Cross Validation'))\n", (13098, 13139), True, 'import matplotlib.pyplot as plt\n'), ((13148, 13189), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of training examples"""'], {}), "('Number of training examples')\n", (13158, 13189), True, 'import matplotlib.pyplot as plt\n'), ((13198, 13217), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error"""'], {}), "('Error')\n", (13208, 13217), True, 'import matplotlib.pyplot as plt\n'), ((13226, 13251), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 13, 0, 150]'], {}), '([0, 13, 0, 150])\n', (13234, 13251), True, 'import matplotlib.pyplot as plt\n'), ((13260, 13281), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (13268, 13281), True, 'import matplotlib.pyplot as plt\n'), ((1111, 1126), 'numpy.shape', 'np.shape', (['cls.X'], {}), '(cls.X)\n', (1119, 1126), True, 'import numpy as np\n'), ((5503, 5524), 'numpy.array', 'np.array', (['error_train'], {}), '(error_train)\n', (5511, 5524), True, 'import numpy as np\n'), ((5588, 5607), 'numpy.array', 'np.array', (['error_val'], {}), '(error_val)\n', (5596, 5607), True, 'import numpy as np\n'), ((2160, 2180), 'numpy.ones', 'np.ones', (['(self.m, 1)'], {}), '((self.m, 1))\n', (2167, 2180), True, 'import numpy as np\n'), ((3635, 3650), 'numpy.ones', 'np.ones', (['self.m'], {}), '(self.m)\n', (3642, 3650), True, 'import numpy as np\n'), ((4594, 4609), 'numpy.ones', 'np.ones', (['self.m'], {}), '(self.m)\n', (4601, 4609), True, 'import numpy as np\n'), ((6531, 6551), 'numpy.ones', 'np.ones', (['(self.m, 1)'], {}), '((self.m, 1))\n', (6538, 6551), True, 'import numpy as np\n'), ((6714, 6734), 'numpy.shape', 'np.shape', (['self.Xtest'], {}), '(self.Xtest)\n', (6722, 6734), True, 'import numpy as np\n'), ((6902, 6936), 'numpy.ones', 'np.ones', (['(X_poly_test.shape[0], 1)'], {}), '((X_poly_test.shape[0], 1))\n', (6909, 6936), True, 'import numpy as np\n'), ((7098, 7117), 'numpy.shape', 'np.shape', (['self.Xval'], {}), '(self.Xval)\n', (7106, 7117), True, 'import numpy as np\n'), ((7279, 7312), 'numpy.ones', 'np.ones', (['(X_poly_val.shape[0], 1)'], {}), '((X_poly_val.shape[0], 1))\n', (7286, 7312), True, 'import numpy as np\n'), ((11948, 11986), 'numpy.random.permutation', 'np.random.permutation', (['X_poly.shape[0]'], {}), '(X_poly.shape[0])\n', (11969, 11986), True, 'import numpy as np\n'), ((12084, 12126), 'numpy.random.permutation', 'np.random.permutation', (['X_poly_val.shape[0]'], {}), '(X_poly_val.shape[0])\n', (12105, 12126), True, 'import numpy as np\n'), ((12457, 12516), 'ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg.trainLinearReg', 'trainLinearReg', (['X_poly_train_rand', 'y_train_rand', 'lambda_val'], {}), '(X_poly_train_rand, y_train_rand, lambda_val)\n', (12471, 12516), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg import trainLinearReg\n'), ((12692, 12752), 'ex5_regularized_linear_regressionand_bias_vs_variance.linearRegCostFunction.linearRegCostFunction', 'linearRegCostFunction', (['X_poly_val_rand', 'y_val_rand', 'theta', '(0)'], {}), '(X_poly_val_rand, y_val_rand, theta, 0)\n', (12713, 12752), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.linearRegCostFunction import linearRegCostFunction\n'), ((12598, 12615), 'numpy.asarray', 'np.asarray', (['theta'], {}), '(theta)\n', (12608, 12615), True, 'import numpy as np\n'), ((4671, 4690), 'numpy.shape', 'np.shape', (['self.Xval'], {}), '(self.Xval)\n', (4679, 4690), True, 'import numpy as np\n')]
|
###################################################################
# This implementation is based on the following papaer:
#
# <NAME> and <NAME>. Automatic data and computation
# decomposition on distributed# memory parallel computers.
# ACM Transactions on Programming Languages and Systems,
# 24(1):1–50, Jan. 2002.
#
# Algorithm of Figure 5
#
###################################################################
import heterocl as hcl
import numpy as np
def top_adi(Nx=20, Ny=20, NT=20, Dx=0.1, Dy=0.1, DT=0.1, B1=0.1, B2=0.1,
mu1=0.1, mu2=0.1, a=0.1, b=0.1, c=0.1, d=0.1, e=0.1, f=0.1,
dtype=hcl.Int(), target=None):
hcl.init(dtype)
u = hcl.placeholder((Nx, Ny), "u")
v = hcl.placeholder((Nx, Ny), "v")
p = hcl.placeholder((Nx, Ny), "p")
q = hcl.placeholder((Nx, Ny), "q")
def kernel_adi(u, v, p, q):
def sweep(u, v, p, q):
with hcl.for_(1, Ny - 1, name="L1") as i:
v[0][i] = hcl.scalar(1.0)
p[i][0] = hcl.scalar(0.0)
q[i][0] = v[0][i]
with hcl.for_(1, Nx - 1, name="L2") as j:
p[i][j] = -1.0 * c / (a * p[i][j - 1] + b)
q[i][j] = (-1.0 * d * u[j][i - 1] + (1.0 + 2.0 * d) * u[j][i] - f * u[j][i + 1] - a * q[i][j - 1])/(a * p[i][j - 1] + b)
v[Nx - 1][i] = hcl.scalar(1.0)
with hcl.for_(Nx - 2, 0, -1, name="L3") as j:
v[j][i] = p[i][j] * v[j + 1][i] + q[i][j]
with hcl.for_(1, Nx - 1, name="L4") as i:
u[i][0] = hcl.scalar(1.0)
p[i][0] = hcl.scalar(0.0)
q[i][0] = u[i][0]
with hcl.for_(1, Ny - 1, name="L5") as j:
p[i][j] = -1.0 * f / (d * p[i][j - 1] + e)
q[i][j] = (-1.0 * a * v[i - 1][j] + (1.0 + 2 * a) * v[i][j] - c * v[i + 1][j] - d * q[i][j - 1])/(d * p[i][j - 1] + e)
u[i][Ny - 1] = hcl.scalar(1.0)
with hcl.for_(Ny - 2, 0, -1, name="L6") as j:
u[i][j] = p[i][j] * u[i][j + 1] + q[i][j]
hcl.mutate((NT,), lambda m: sweep(u, v, p, q), "main_loop")
s = hcl.create_schedule([u, v, p, q], kernel_adi)
#### Apply customizations ####
main_loop = kernel_adi.main_loop
#s[main_loop].pipeline(main_loop.L1)
#s[main_loop].pipeline(main_loop.L4)
#### Apply customizations ####
return hcl.build(s, target=target)
def adi_golden(N, TSTEPS, Dx, Dy, DT, B1, B2, mu1, mu2, a, b, c, d, e, f, u, v, p, q):
for t in range(TSTEPS):
## Column sweep
for i in range(1, N - 1):
v[0][i] = 1.0
p[i][0] = 0.0
q[i][0] = v[0][i]
for j in range(1, N - 1):
p[i][j] = -1.0 * c / (a * p[i][j - 1] + b)
q[i][j] = (-1.0 * d * u[j][i - 1] + (1.0 + 2.0 * d) * u[j][i] - f * u[j][i + 1] - a * q[i][j - 1])/(a * p[i][j - 1] + b)
v[N - 1][i] = 1.0
for j in range(N - 2, 0, -1):
v[j][i] = p[i][j] * v[j + 1][i] + q[i][j]
## Row sweep
for i in range(1, N - 1):
u[i][0] = 1.0
p[i][0] = 0.0
q[i][0] = u[i][0]
for j in range(1, N - 1):
p[i][j] = -1.0 * f / (d * p[i][j - 1] + e)
q[i][j] = (-1.0 * a * v[i - 1][j] + (1.0 + 2.0 * a) *
v[i][j] - c * v[i + 1][j] -d * q[i][j - 1])/(d * p[i][j - 1] + e)
u[i][N - 1] = 1.0
for j in range(N - 2, 0, -1):
u[i][j] = p[i][j] * u[i][j + 1] + q[i][j]
def main(Nx=20, Ny=20, NT=20, Dx=0.1, Dy=0.1, DT=0.1, B1=0.1, B2=0.1,
mu1=0.1, mu2=0.1, a=0.1, b=0.1, c=0.1, d=0.1, e=0.1, f=0.1,
dtype=hcl.Float(32), target=None):
u = np.random.randint(10, size=(Nx, Ny)).astype(np.float32)
v = np.random.randint(10, size=(Nx, Ny)).astype(np.float32)
p = np.random.randint(10, size=(Nx, Ny)).astype(np.float32)
q = np.random.randint(10, size=(Nx, Ny)).astype(np.float32)
f = top_adi(Nx, Ny, NT, Dx, Dy, DT, B1, B2, mu1, mu2, a, b, c, d, e, f, dtype, target)
f(u, v, p, q)
if __name__ == "__main__":
main()
|
[
"heterocl.for_",
"heterocl.placeholder",
"numpy.random.randint",
"heterocl.build",
"heterocl.create_schedule",
"heterocl.init",
"heterocl.scalar",
"heterocl.Int",
"heterocl.Float"
] |
[((621, 630), 'heterocl.Int', 'hcl.Int', ([], {}), '()\n', (628, 630), True, 'import heterocl as hcl\n'), ((651, 666), 'heterocl.init', 'hcl.init', (['dtype'], {}), '(dtype)\n', (659, 666), True, 'import heterocl as hcl\n'), ((675, 705), 'heterocl.placeholder', 'hcl.placeholder', (['(Nx, Ny)', '"""u"""'], {}), "((Nx, Ny), 'u')\n", (690, 705), True, 'import heterocl as hcl\n'), ((714, 744), 'heterocl.placeholder', 'hcl.placeholder', (['(Nx, Ny)', '"""v"""'], {}), "((Nx, Ny), 'v')\n", (729, 744), True, 'import heterocl as hcl\n'), ((753, 783), 'heterocl.placeholder', 'hcl.placeholder', (['(Nx, Ny)', '"""p"""'], {}), "((Nx, Ny), 'p')\n", (768, 783), True, 'import heterocl as hcl\n'), ((792, 822), 'heterocl.placeholder', 'hcl.placeholder', (['(Nx, Ny)', '"""q"""'], {}), "((Nx, Ny), 'q')\n", (807, 822), True, 'import heterocl as hcl\n'), ((2176, 2221), 'heterocl.create_schedule', 'hcl.create_schedule', (['[u, v, p, q]', 'kernel_adi'], {}), '([u, v, p, q], kernel_adi)\n', (2195, 2221), True, 'import heterocl as hcl\n'), ((2432, 2459), 'heterocl.build', 'hcl.build', (['s'], {'target': 'target'}), '(s, target=target)\n', (2441, 2459), True, 'import heterocl as hcl\n'), ((3761, 3774), 'heterocl.Float', 'hcl.Float', (['(32)'], {}), '(32)\n', (3770, 3774), True, 'import heterocl as hcl\n'), ((3799, 3835), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(Nx, Ny)'}), '(10, size=(Nx, Ny))\n', (3816, 3835), True, 'import numpy as np\n'), ((3863, 3899), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(Nx, Ny)'}), '(10, size=(Nx, Ny))\n', (3880, 3899), True, 'import numpy as np\n'), ((3927, 3963), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(Nx, Ny)'}), '(10, size=(Nx, Ny))\n', (3944, 3963), True, 'import numpy as np\n'), ((3991, 4027), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(Nx, Ny)'}), '(10, size=(Nx, Ny))\n', (4008, 4027), True, 'import numpy as np\n'), ((905, 935), 'heterocl.for_', 'hcl.for_', (['(1)', '(Ny - 1)'], {'name': '"""L1"""'}), "(1, Ny - 1, name='L1')\n", (913, 935), True, 'import heterocl as hcl\n'), ((968, 983), 'heterocl.scalar', 'hcl.scalar', (['(1.0)'], {}), '(1.0)\n', (978, 983), True, 'import heterocl as hcl\n'), ((1010, 1025), 'heterocl.scalar', 'hcl.scalar', (['(0.0)'], {}), '(0.0)\n', (1020, 1025), True, 'import heterocl as hcl\n'), ((1353, 1368), 'heterocl.scalar', 'hcl.scalar', (['(1.0)'], {}), '(1.0)\n', (1363, 1368), True, 'import heterocl as hcl\n'), ((1511, 1541), 'heterocl.for_', 'hcl.for_', (['(1)', '(Nx - 1)'], {'name': '"""L4"""'}), "(1, Nx - 1, name='L4')\n", (1519, 1541), True, 'import heterocl as hcl\n'), ((1574, 1589), 'heterocl.scalar', 'hcl.scalar', (['(1.0)'], {}), '(1.0)\n', (1584, 1589), True, 'import heterocl as hcl\n'), ((1616, 1631), 'heterocl.scalar', 'hcl.scalar', (['(0.0)'], {}), '(0.0)\n', (1626, 1631), True, 'import heterocl as hcl\n'), ((1957, 1972), 'heterocl.scalar', 'hcl.scalar', (['(1.0)'], {}), '(1.0)\n', (1967, 1972), True, 'import heterocl as hcl\n'), ((1081, 1111), 'heterocl.for_', 'hcl.for_', (['(1)', '(Nx - 1)'], {'name': '"""L2"""'}), "(1, Nx - 1, name='L2')\n", (1089, 1111), True, 'import heterocl as hcl\n'), ((1390, 1424), 'heterocl.for_', 'hcl.for_', (['(Nx - 2)', '(0)', '(-1)'], {'name': '"""L3"""'}), "(Nx - 2, 0, -1, name='L3')\n", (1398, 1424), True, 'import heterocl as hcl\n'), ((1687, 1717), 'heterocl.for_', 'hcl.for_', (['(1)', '(Ny - 1)'], {'name': '"""L5"""'}), "(1, Ny - 1, name='L5')\n", (1695, 1717), True, 'import heterocl as hcl\n'), ((1994, 2028), 'heterocl.for_', 'hcl.for_', (['(Ny - 2)', '(0)', '(-1)'], {'name': '"""L6"""'}), "(Ny - 2, 0, -1, name='L6')\n", (2002, 2028), True, 'import heterocl as hcl\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) CKM Analytix Corp. All rights reserved.
# Authors: <NAME> (<EMAIL>), <NAME> (<EMAIL>)
"""
Metrics for determining quality of community structure
"""
import numpy as np
from scipy.sparse import identity
__all__ = ['modularity_r', 'modularity_density', 'mula_modularity_density']
def cluster_total_weight(adj_r, c, cluster_num, dict_bool):
"""Determines the 2*total weight of a community.
Parameters
----------
adj_r : SciPy sparse matrix (csr or csc)
The N x N rescaled Adjacency matrix constructed from N x N adjacency
matrix of the graph and scale 'r'.
c : Integer array
Array of community labels for the nodes in the graph as ordered by the
adjacency matrix.
cluster_num : Integer
Label of the community of interest.
dict_bool : dictionary
Tracks the nodes in each community, with cluster labels as dictionary
keys, and the corresponding boolean arrays (c == label) as values
Returns
-------
float
Twice the total weight of all nodes in the rescaled topology of
cluster 'cluster_num'.
"""
bool_r = dict_bool[cluster_num]
zero = np.zeros(adj_r.shape[0], dtype=int)
zero[bool_r] = 1
return (adj_r[bool_r].dot(zero)).sum()
def cluster_total_volume(adj, c, cluster_num, dict_bool):
"""Determines the volume of a community.
Parameters
----------
adj : SciPy sparse matrix (csr or csc)
The N x N rescaled Adjacency matrix constructed from
N x N adjacency matrix of the graph and scale r.
c : Integer array
Array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
cluster_num : Integer
Label of the community of interest.
dict_bool : dictionary
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
Total volume of all nodes in the rescaled topology of
cluster 'cluster_num'.
"""
return adj[dict_bool[cluster_num]].sum()
def modularity_r(adj, c, cluster_labels, r=0, dict_bool=None):
r"""Determines the modularity (of rescaled topology) for a subset of
communities in the network.
Parameters
----------
adj : SciPy sparse matrix (csr or csc)
The N x N Adjacency matrix of the graph of interest.
c : Integer array
Array of community labels for the nodes in the graph as ordered by
the adjacency matrix.
cluster_labels : Integer array or list
Array/list of unique cluster labels for which modularity is calculated.
r : float
Resolution of the topology: smaller 'r' favors larger communities,
while larger 'r' favors smaller communities.
dict_bool : dictionary, optional
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
total modularity (of rescaled topology) for a set of communities given
by 'cluster_labels'.
Examples
--------
>>> G = nx.karate_club_graph()
>>> adj = nx.to_scipy_sparse_matrix(G)
>>> c = fine_tuned_clustering_q(G)
>>> c
array([2, 2, 2, 2, 4, 4, 4, 2, 3, 3, 4, 2, 2, 2, 3, 3, 4, 2, 3, 2, 3, 2,
3, 1, 1, 1, 3, 1, 1, 3, 3, 1, 3, 3])
>>> modularity_r(adj, c, np.unique(c), r=0)
0.4197896120973044
>>> modularity_r(adj, c, [1, 2], r=0)
0.21301775147928995
Notes
-----
Modularity in [1]_,[2]_ is given as
.. math::
Q = \sum_{c_i \in C}\left [ \frac{|E_{c_i}^{in}|}{|E|} -
\left (\frac{2|E_{c_i}^{in}| +
|E_{c_i}^{out}|}{2|E|} \right )^2 \right ],
where $C$ is the set of all communities. $c_i$ is a specific community in
$C$, $|E_{c_i}^{in}|$ is the total weight of edges between nodes within
community $c_i$, $|E_{c_i}{out}|$ is the total weight of edges from
nodes in community $c_i$ to the nodes outside $c_i$, and $|E|$ is the
total weight of edges in the network.
Modularity for rescaled topology (see [1]_) at scale $r$ is given as
.. math::
Q_r = \sum_{c_i \in C}\left [ \frac{2|E_{c_i}^{in}| +r|c_i|}{2|E| +
r|V|} - \left (\frac{2|E_{c_i}^{in}| + |E_{c_i}^{out}| +
r|c_i|}{2|E| + r|V|} \right )^2 \right ],
where $|c_i|$ is the number of nodes in a specific community. $|V|$ is the
total number of nodes in the entire network structure.
References
----------
.. [1] <NAME>, <NAME>, <NAME>. Community detection via maximization
of modularity and its variants. IEEE Transactions on Computational
Social Systems. 1(1), 46–65, 2014
.. [2] <NAME>, <NAME>. Finding and evaluating community structure in
community structure in networks. Phys. Rev. E. 69, 026113, 2004
"""
Identity = identity(n=(adj).shape[0])
# Rescaled adjancency matrix
adj = adj + (Identity*r)
if (dict_bool is None):
# Track the nodes in each community
dict_bool = {}
for label in np.unique(cluster_labels):
dict_bool[label] = (c == label)
one = np.ones(adj.shape[0], dtype=int)
# Twice the total weight of all nodes in the rescaled topology
total_weight = (adj.dot(one)).sum()
# Function to determine modularity of each community in the network
modularize = np.vectorize(lambda cluster_num:
(cluster_total_weight(adj, c,
cluster_num, dict_bool)/total_weight) -
((cluster_total_volume(adj, c, cluster_num,
dict_bool)/total_weight)**2))
# Total modularity (of rescaled topology) for a set of communities
# given by 'cluster_labels'
return np.sum(modularize(cluster_labels))
def split_penalty(adj, c, ci, conn_clusters, total_weight, dict_bool):
"""Determines total Split Penalty density for splitting edges between a
community and a set of communities.
Parameters
----------
adj : SciPy sparse matrix (csr or csc)
The N x N Adjacency matrix of the graph of interest.
c : Integer array
Current array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
ci : Integer
Label of the community of interest.
conn_clusters : Integer array
Array of unique labels of communities that may be connected
to the community 'ci'.
total_weight : float
Twice the total weight of all nodes in the adjacency matrix.
dict_bool : dictionary
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
Total Split Penalty density for splitting edges between 'ci' and
a set of other communities in 'conn_clusters'.
"""
bool_ci = dict_bool[ci]
adj_ci = adj[bool_ci]
# Make sure the array of unique labels do not contain 'ci'
search_bool = (conn_clusters != ci)
# Determine total split penalty density of splitting edges between
# 'ci' and 'conn_clusters'
if(np.sum(search_bool) > 0):
penalty = sum_penalty(adj_ci, c, conn_clusters[search_bool],
dict_bool)/(np.count_nonzero(bool_ci)
* total_weight)
else:
penalty = 0
# Total Split Penalty density for splitting edges between 'ci' and
# a set of other communities in 'conn_clusters'
return penalty
def individual_penalty(adj_ci, c, cj, dict_bool):
"""Determines partial component of split penalty density for splitting edges
between two communities.
Parameters
----------
adj_ci : SciPy sparse matrix (csr or csc)
The subset of N X N adjacency matrix: adj[c == ci].
c : Integer array
Array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
cj : Integer
Label of a community connected to the community 'ci'.
dict_bool : dictionary
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
Partial component of split penalty density for splitting edges
between 'ci' and 'cj'.
"""
bool_cj = dict_bool[cj]
zero = np.zeros(len(c), dtype=int)
zero[bool_cj] = 1
# Determine partial component of split penalty density for splitting edges
# between 'ci' and 'cj'
return ((((adj_ci.dot(zero)).sum())**2)/np.count_nonzero(bool_cj))
def sum_penalty(adj_ci, c, conn_clusters, dict_bool):
"""Determines partial component of total Split Penalty density for splitting
edges between a community and a set of communities.
Parameters
----------
adj_ci : SciPy sparse matrix (csr or csc)
The subset of N X N adjacency matrix: adj[c == ci].
c : Integer array
Array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
conn_clusters : Integer array
Array of unique labels of communities that may be connected
to community 'ci'.
dict_bool : dictionary
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
Partial component of total Split Penalty density for splitting edges
between 'ci' and a set of other communities in 'conn_clusters'.
"""
# Function to determine partial component of total Split Penalty density
# for splitting edges between 'ci' and 'cj'
penalize = np.vectorize(lambda cj: individual_penalty(adj_ci, c,
cj, dict_bool))
# Partial component of total Split Penalty density for splitting edges
# between 'ci'and a set of other communities in 'conn_clusters'
return np.sum(penalize(conn_clusters))
def density_based_modularity(adj, c, ci, total_weight, dict_bool):
"""Determines partial component of modularity density of a community.
Parameters
----------
adj : SciPy sparse matrix (csr or csc)
The subset of N X N adjacency matrix: adj[c == ci].
c : Integer array
Array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
ci : Integer
Label of community of interest.
total_weight : float
Twice the total weight of all nodes in the adjacency matrix.
dict_bool : dictionary
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
Partial component of modularity density of a community 'ci'.
"""
# Determine Internal community density of 'ci'
comm_density = community_density(adj, c, ci, dict_bool)
first_term = (cluster_total_weight(adj, c,
ci, dict_bool) * comm_density)/total_weight
second_term = ((cluster_total_volume(adj, c,
ci, dict_bool) * comm_density)/total_weight)**2
# Partial component of modularity density of 'ci'
return (first_term - second_term)
def community_density(adj, c, ci, dict_bool):
"""Determines internal community density of a community.
Parameters
----------
adj : SciPy sparse matrix (csr or csc)
The subset of N X N adjacency matrix: adj[c == ci].
c : Integer array
Array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
ci : Integer
Label of community of interest.
dict_bool : dictionary
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
Determines internal community density of community 'ci'.
"""
bool_ci = dict_bool[ci]
zero = np.zeros(adj.shape[0], dtype=int)
zero[bool_ci] = 1
# Twice the weight of all edges in the cluster 'ci'
community_sum = (adj[bool_ci].dot(zero)).sum()
# Number of nodes in commmunity 'ci'
size = np.count_nonzero(bool_ci)
# Internal community density of 'ci'
if(size <= 1):
density = 0
else:
density = (community_sum)/(size*(size - 1))
# Internal community density of 'ci'
return density
def compute_modularity_density(adj, c, conn_clusters, cluster_labels,
total_weight, dict_bool):
"""Determines modularity density of a set of communities.
Parameters
----------
adj : SciPy sparse matrix (csr or csc)
The N x N Adjacency matrix of the graph of interest.
c : Integer array
Array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
conn_clusters : Integer array
Array of unique labels of communities that may be connected to
communities in 'cluster_labels'.
cluster_labels : Integer array or list
Array/list of unique labels of communities of interest.
total_weight : float
Twice the total weight of all nodes in the adjacency matrix.
dict_bool : dictionary
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
Determines modularity density of a set of communities in
'cluster_labels' with a set of connected communities
in 'conn_clusters'.
"""
# Function to determine modularity density of 'ci' with connected
# communities in 'conn_clusters'
mod_density = np.vectorize(lambda ci: density_based_modularity(adj, c, ci,
total_weight, dict_bool) - split_penalty(adj, c,
ci, conn_clusters, total_weight, dict_bool))
# Modularity density of a set of communities in 'cluster_labels' with a
# set of connected communities in 'conn_clusters'
return np.sum(mod_density(cluster_labels))
def modularity_density(adj, c, cluster_labels,
dict_bool=None, conn_clusters=None):
r"""Determines modularity_density of a set of communities.
Parameters
----------
adj : SciPy sparse matrix (csr or csc)
The N x N Adjacency matrix of the graph of interest.
c : Integer array
Current array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
cluster_labels : Integer array or list
Array/list of unique labels of communities of interest.
dict_bool : dictionary, optional
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
conn_clusters : Integer array, optional
Array of unique labels of communities that may be connected to
communities in 'cluster_labels'. It is helpful to send this input when
computing modularity density for each community in order to reduce the
computational time
Returns
-------
float
Determines modularity_density of a set of communities
in 'cluster_labels'.
Examples
--------
>>> G = nx.karate_club_graph()
>>> adj = nx.to_scipy_sparse_matrix(G)
>>> c = fine_tuned_clustering_qds(G)
>>> c
array([4, 4, 4, 4, 2, 2, 2, 4, 3, 3, 2, 4, 4, 4, 3, 3, 2, 4, 3, 4, 3, 4,
3, 3, 1, 1, 3, 3, 3, 3, 3, 1, 3, 3])
>>> modularity_density(adj, c, np.unique(c))
0.23126500169457212
>>> modularity_density(adj, c, [1, 2])
0.06929093698324468
>>> modularity_density(adj, c, [1])
0.028788874942721095
>>> modularity_density(adj, c, [1], conn_clusters = np.array([3, 4]))
0.028788874942721095
Notes
-----
Modularity density in [1] is given as
.. math::
Q = \sum_{c_i \in C}\left [ \frac{|E_{c_i}^{in}|}{|E|}d_{c_i} -
\left (\frac{2|E_{c_i}^{in}| +
|E_{c_i}{out}|}{2|E|}d_{c_i} \right )^2 -
\sum_{c_j \in C, c_j \neq c_i}
\frac{|E_{c_i, c_j}|}{2|E|}d_{c_i,c_j} \right ],
d_{c_i} = \frac{2|E_{c_i}^{in}|}{|c_i|\left ( |c_i| - 1 \right )},
d_{c_i,c_j} = \frac{|E_{c_i, c_j}|}{|c_i||c_j|}.
where $C$ is the set of all communities. $c_i$ is a specific community in
$C$, $|E_{c_i}^{in}|$ is the total weight of edges between nodes within
community $c_i$, $|E_{c_i}{out}|$ is the total weight of edges from
nodes in community $c_i$ to the nodes outside $c_i$, and $|E|$ is the
total weight of edges in the network. $d_{c_i}$ is the internal community
density of community $c_i$, $d_{c_i, c_j}$ is the pair-wise density between
communities $c_i$ and $c_j$.
References
----------
.. [1] <NAME>, <NAME>, <NAME>. Community detection via maximization
of modularity and its variants. IEEE Transactions on Computational
Social Systems. 1(1), 46–65, 2014
"""
one = np.ones(adj.shape[0], dtype=int)
# Twice the total weight of all nodes in the adjacency matrix
total_weight = (adj.dot(one)).sum()
# Array of unique labels of communities in the network
unique_clusters = np.unique(c)
if (dict_bool is None):
# Track the nodes in each community
dict_bool = {}
for label in unique_clusters:
dict_bool[label] = (c == label)
if (conn_clusters is None):
# Array of labels of communities that may be connected to communities
# in 'cluster_labels'
conn_clusters = unique_clusters
# Compute modularity density of a set of communities in 'cluster_labels'
return compute_modularity_density(adj, c, conn_clusters, cluster_labels,
total_weight, dict_bool)
def dotdot(adj, vec1, vec2):
"""Computes the dot product of a matrix with two vectors
Parameters
----------
adj : Numpy Matrix or SciPy sparse matrix (csr or csc)
The N x N Adjacency matrix of the graph of interest.
vec1 : first Numpy array
vec2 : second Numpy array
Returns
-------
scalar (float, int, boolean, etc.)
Resulting scalar of dot product
"""
return ((((adj).dot(vec1)).dot(vec2)))
def norm_vector(vec):
"""Normalizes vector for modularity density calculation
Parameters
----------
vec : Numpy array to be normalized
Returns
-------
Numpy array
"""
mod = (np.count_nonzero(vec))**0.5
vec = vec/mod
return vec
def mula_modularity_density(adj, c, dict_vec=None):
r"""Determines modularity_density of a set of communities using a metric
that is free from bias and faster to compute.
Parameters
----------
adj : SciPy sparse matrix (csr or csc)
The N x N Adjacency matrix of the graph of interest.
c : Integer array
Current array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
dict_vec : dictionary, optional
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
Determines modularity_density of a set of communities
in 'cluster_labels'.
Examples
--------
>>> G = nx.karate_club_graph()
>>> adj = nx.to_scipy_sparse_matrix(G)
>>> c = fine_tuned_clustering_qds(G)
>>> c
array([4, 4, 4, 4, 2, 2, 2, 4, 3, 3, 2, 4, 4, 4, 3, 3, 2, 4, 3, 4, 3, 4,
3, 3, 1, 1, 3, 3, 3, 3, 3, 1, 3, 3])
>>> new_modularity_density(adj, c, np.unique(c))
0.23126500169457212
>>> new_modularity_density(adj, c, [1, 2])
0.06929093698324468
>>> new_modularity_density(adj, c, [1])
0.028788874942721095
>>> new_modularity_density(adj, c, [1])
0.028788874942721095
Notes
-----
Modularity density in [1] is given as
.. math::
Q = \sum_{c \in C}\Bigg\{\frac{\sum_{i,j \in c}T_{ij}}{n_c} - \sum_{c^{\prime} \in C-c}\Bigg( \frac{\sum_{{i \in c,}{j \in c^{\prime}}}T_{ij}}{\sqrt{n_c n_{c^{\prime}}}}\Bigg)\Bigg\}
where:
- each cluster ${c \in C}$ is represented by an indicator vector ${\vec{v}_c = [v{_{c_i}}] \in {\R}^{|V|} : v{_{c_i}}= 1}$ if ${i \in c}$, else $0$
- \hat{n}_c = \frac{\vec{v}_c}{|\vec{v}_c|}
References
----------
.. [1] <NAME>, <NAME>. A new measure of modularity density for
community detection. arXiv:1908.08452 2019.
"""
cluster_labels = np.unique(c)
Nsum = 0
if (dict_vec is None):
collect_dict_vec = True
dict_vec = {}
for label in cluster_labels:
if collect_dict_vec:
vector = norm_vector((c == label)*1)
dict_vec[label] = vector
else:
dict_vect = dict_vec[label]*1 # verify vec is 0|1
Nsum += dict_vec[label]
# penalty
penalty = dotdot(adj, Nsum, Nsum)
modularize = np.vectorize(lambda label: dotdot(adj, dict_vec[label],
dict_vec[label]))
# Compute reduced modularity density of a set of communities
# in 'cluster_labels'
metric = 2*np.sum(modularize(cluster_labels)) - penalty
return(metric)
|
[
"numpy.count_nonzero",
"numpy.sum",
"numpy.zeros",
"numpy.ones",
"scipy.sparse.identity",
"numpy.unique"
] |
[((1202, 1237), 'numpy.zeros', 'np.zeros', (['adj_r.shape[0]'], {'dtype': 'int'}), '(adj_r.shape[0], dtype=int)\n', (1210, 1237), True, 'import numpy as np\n'), ((5019, 5043), 'scipy.sparse.identity', 'identity', ([], {'n': 'adj.shape[0]'}), '(n=adj.shape[0])\n', (5027, 5043), False, 'from scipy.sparse import identity\n'), ((5308, 5340), 'numpy.ones', 'np.ones', (['adj.shape[0]'], {'dtype': 'int'}), '(adj.shape[0], dtype=int)\n', (5315, 5340), True, 'import numpy as np\n'), ((12277, 12310), 'numpy.zeros', 'np.zeros', (['adj.shape[0]'], {'dtype': 'int'}), '(adj.shape[0], dtype=int)\n', (12285, 12310), True, 'import numpy as np\n'), ((12494, 12519), 'numpy.count_nonzero', 'np.count_nonzero', (['bool_ci'], {}), '(bool_ci)\n', (12510, 12519), True, 'import numpy as np\n'), ((17395, 17427), 'numpy.ones', 'np.ones', (['adj.shape[0]'], {'dtype': 'int'}), '(adj.shape[0], dtype=int)\n', (17402, 17427), True, 'import numpy as np\n'), ((17617, 17629), 'numpy.unique', 'np.unique', (['c'], {}), '(c)\n', (17626, 17629), True, 'import numpy as np\n'), ((20949, 20961), 'numpy.unique', 'np.unique', (['c'], {}), '(c)\n', (20958, 20961), True, 'import numpy as np\n'), ((5226, 5251), 'numpy.unique', 'np.unique', (['cluster_labels'], {}), '(cluster_labels)\n', (5235, 5251), True, 'import numpy as np\n'), ((7358, 7377), 'numpy.sum', 'np.sum', (['search_bool'], {}), '(search_bool)\n', (7364, 7377), True, 'import numpy as np\n'), ((8826, 8851), 'numpy.count_nonzero', 'np.count_nonzero', (['bool_cj'], {}), '(bool_cj)\n', (8842, 8851), True, 'import numpy as np\n'), ((18878, 18899), 'numpy.count_nonzero', 'np.count_nonzero', (['vec'], {}), '(vec)\n', (18894, 18899), True, 'import numpy as np\n'), ((7495, 7520), 'numpy.count_nonzero', 'np.count_nonzero', (['bool_ci'], {}), '(bool_ci)\n', (7511, 7520), True, 'import numpy as np\n')]
|
import numpy as np
from scipy.stats import gamma as RVgamma
# the gamma distribution consider a varying shape parameter and a scale parameter equal to 1
class HMM_approxSEIR_expanded:
def __init__( self, N, beta, rho, gamma, q, eta_zero, q_r, t_star ):
self.N = N
self.beta = beta
self.rho = rho
self.gamma = gamma
self.q = q
self.eta_zero = eta_zero
self.q_r = q_r
self.t_star = t_star
def eta_computation(self, T):
eta = np.zeros((4, T))
eta[:, 0] = self.eta_zero
pC = 1 - np.exp(-self.rho)
pR = 1 - np.exp(-self.gamma)
for t in range(1, T):
Kappa_eta_prev = np.array([[ np.exp(-self.beta*eta[2,t-1]), 1 - np.exp(-self.beta*eta[2,t-1]), 0, 0 ], [ 0, 1 - pC, pC, 0 ], [ 0, 0, 1 - pR, pR ], [ 0, 0, 0, 1 ]])
eta[:, t] = eta[:, t-1] @ Kappa_eta_prev
return eta
def filtering(self, y):
T = np.size(y[0, 0, :])
pC = 1 - np.exp(-self.rho)
pR = 1 - np.exp(-self.gamma)
pitt = np.zeros([4,T])
pitt[:,0]= self.eta_zero
pitt_expanded = np.zeros((4, 4, T))
pitt_expanded[0, :, 0] = pitt[:,0]
pitt_prev_expanded = np.zeros((4, 4, T))
pitt_prev_expanded[0, :, 0] = pitt[:,0]
Kappa = np.zeros([4,4,T-1])
pitt_expanded_q = np.zeros([4,4,T])
for t in range(1, T):
beta_restr = self.beta*(t< self.t_star) + self.beta*(np.exp(-self.q_r*(t-self.t_star)))*(t>= self.t_star)
Kappa_eta_prev = np.array([[ np.exp(-beta_restr*pitt[2,t-1]), 1 - np.exp(-beta_restr*pitt[2,t-1]), 0, 0 ], [ 0, 1 - pC, pC, 0 ], [ 0, 0, 1 - pR, pR ], [ 0, 0, 0, 1 ]])
Kappa[:,:,t-1] = Kappa_eta_prev
pitt_prev_expanded[:,:, t] = Kappa_eta_prev*( np.sum(pitt_expanded[:, :, t-1], 0) ).reshape(4,1)
#rho_vec = pitt_prev_expanded[:,:, t]*(1-self.q)
#rho_vec = rho_vec/np.sum(rho_vec)
pitt_expanded_q[:,:,t] = pitt_prev_expanded[:,:, t]*(1-self.q)
pitt_expanded_q[:,:,t] = pitt_expanded_q[:,:,t]/np.sum(pitt_expanded_q[:,:,t])
pitt_expanded[:,:, t] = y[:,:, t]/self.N + ( 1 - (np.sum( y[:,:, t] ))/(self.N) )*pitt_expanded_q[:,:,t]
pitt[:,t] = np.sum( pitt_expanded[:,:, t], 0 )
return pitt, Kappa, pitt_expanded, pitt_prev_expanded
def smoothing(self,pitt_expanded, pitt):
T = np.size(pitt_expanded[1,1,:])
pist = np.zeros((4, T))
pist[:,T-1] = np.sum(pitt_expanded[:,:,T-1],0)
L = np.zeros((4,4))
pist_expanded = np.zeros((4, 4, T))
pist_expanded[:,:,T-1] = pitt_expanded[:,:,T-1]
for t in range(T-1,1,-1):
pist[:,t-1] = np.sum(pist_expanded[:,:,t],1)
L[np.outer(pitt[:,t-1],np.ones(4))!=0] = np.transpose(pitt_expanded[:,:,t-1])[np.outer(pitt[:,t-1],np.ones(4))!=0] / np.outer(pitt[:,t-1],np.ones(4))[np.outer(pitt[:,t-1],np.ones(4))!=0]
pist_expanded[:,:,t-1] = np.outer(np.ones(4),pist[:,t-1]) * np.transpose(L)
pist[:,0] = np.sum(pist_expanded[:,:,1],1)
pist_expanded[0, :, 0] = pist[:,0]
return pist, pist_expanded
|
[
"numpy.size",
"numpy.sum",
"numpy.zeros",
"numpy.transpose",
"numpy.ones",
"numpy.exp"
] |
[((547, 563), 'numpy.zeros', 'np.zeros', (['(4, T)'], {}), '((4, T))\n', (555, 563), True, 'import numpy as np\n'), ((999, 1018), 'numpy.size', 'np.size', (['y[0, 0, :]'], {}), '(y[0, 0, :])\n', (1006, 1018), True, 'import numpy as np\n'), ((1108, 1124), 'numpy.zeros', 'np.zeros', (['[4, T]'], {}), '([4, T])\n', (1116, 1124), True, 'import numpy as np\n'), ((1183, 1202), 'numpy.zeros', 'np.zeros', (['(4, 4, T)'], {}), '((4, 4, T))\n', (1191, 1202), True, 'import numpy as np\n'), ((1278, 1297), 'numpy.zeros', 'np.zeros', (['(4, 4, T)'], {}), '((4, 4, T))\n', (1286, 1297), True, 'import numpy as np\n'), ((1371, 1394), 'numpy.zeros', 'np.zeros', (['[4, 4, T - 1]'], {}), '([4, 4, T - 1])\n', (1379, 1394), True, 'import numpy as np\n'), ((1426, 1445), 'numpy.zeros', 'np.zeros', (['[4, 4, T]'], {}), '([4, 4, T])\n', (1434, 1445), True, 'import numpy as np\n'), ((2575, 2606), 'numpy.size', 'np.size', (['pitt_expanded[1, 1, :]'], {}), '(pitt_expanded[1, 1, :])\n', (2582, 2606), True, 'import numpy as np\n'), ((2629, 2645), 'numpy.zeros', 'np.zeros', (['(4, T)'], {}), '((4, T))\n', (2637, 2645), True, 'import numpy as np\n'), ((2668, 2705), 'numpy.sum', 'np.sum', (['pitt_expanded[:, :, T - 1]', '(0)'], {}), '(pitt_expanded[:, :, T - 1], 0)\n', (2674, 2705), True, 'import numpy as np\n'), ((2713, 2729), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (2721, 2729), True, 'import numpy as np\n'), ((2754, 2773), 'numpy.zeros', 'np.zeros', (['(4, 4, T)'], {}), '((4, 4, T))\n', (2762, 2773), True, 'import numpy as np\n'), ((3243, 3276), 'numpy.sum', 'np.sum', (['pist_expanded[:, :, 1]', '(1)'], {}), '(pist_expanded[:, :, 1], 1)\n', (3249, 3276), True, 'import numpy as np\n'), ((621, 638), 'numpy.exp', 'np.exp', (['(-self.rho)'], {}), '(-self.rho)\n', (627, 638), True, 'import numpy as np\n'), ((656, 675), 'numpy.exp', 'np.exp', (['(-self.gamma)'], {}), '(-self.gamma)\n', (662, 675), True, 'import numpy as np\n'), ((1037, 1054), 'numpy.exp', 'np.exp', (['(-self.rho)'], {}), '(-self.rho)\n', (1043, 1054), True, 'import numpy as np\n'), ((1072, 1091), 'numpy.exp', 'np.exp', (['(-self.gamma)'], {}), '(-self.gamma)\n', (1078, 1091), True, 'import numpy as np\n'), ((2405, 2438), 'numpy.sum', 'np.sum', (['pitt_expanded[:, :, t]', '(0)'], {}), '(pitt_expanded[:, :, t], 0)\n', (2411, 2438), True, 'import numpy as np\n'), ((2892, 2925), 'numpy.sum', 'np.sum', (['pist_expanded[:, :, t]', '(1)'], {}), '(pist_expanded[:, :, t], 1)\n', (2898, 2925), True, 'import numpy as np\n'), ((2226, 2258), 'numpy.sum', 'np.sum', (['pitt_expanded_q[:, :, t]'], {}), '(pitt_expanded_q[:, :, t])\n', (2232, 2258), True, 'import numpy as np\n'), ((3196, 3211), 'numpy.transpose', 'np.transpose', (['L'], {}), '(L)\n', (3208, 3211), True, 'import numpy as np\n'), ((2976, 3016), 'numpy.transpose', 'np.transpose', (['pitt_expanded[:, :, t - 1]'], {}), '(pitt_expanded[:, :, t - 1])\n', (2988, 3016), True, 'import numpy as np\n'), ((3170, 3180), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (3177, 3180), True, 'import numpy as np\n'), ((748, 782), 'numpy.exp', 'np.exp', (['(-self.beta * eta[2, t - 1])'], {}), '(-self.beta * eta[2, t - 1])\n', (754, 782), True, 'import numpy as np\n'), ((1541, 1578), 'numpy.exp', 'np.exp', (['(-self.q_r * (t - self.t_star))'], {}), '(-self.q_r * (t - self.t_star))\n', (1547, 1578), True, 'import numpy as np\n'), ((1636, 1672), 'numpy.exp', 'np.exp', (['(-beta_restr * pitt[2, t - 1])'], {}), '(-beta_restr * pitt[2, t - 1])\n', (1642, 1672), True, 'import numpy as np\n'), ((1893, 1930), 'numpy.sum', 'np.sum', (['pitt_expanded[:, :, t - 1]', '(0)'], {}), '(pitt_expanded[:, :, t - 1], 0)\n', (1899, 1930), True, 'import numpy as np\n'), ((2958, 2968), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (2965, 2968), True, 'import numpy as np\n'), ((3073, 3083), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (3080, 3083), True, 'import numpy as np\n'), ((783, 817), 'numpy.exp', 'np.exp', (['(-self.beta * eta[2, t - 1])'], {}), '(-self.beta * eta[2, t - 1])\n', (789, 817), True, 'import numpy as np\n'), ((1673, 1709), 'numpy.exp', 'np.exp', (['(-beta_restr * pitt[2, t - 1])'], {}), '(-beta_restr * pitt[2, t - 1])\n', (1679, 1709), True, 'import numpy as np\n'), ((2326, 2344), 'numpy.sum', 'np.sum', (['y[:, :, t]'], {}), '(y[:, :, t])\n', (2332, 2344), True, 'import numpy as np\n'), ((3034, 3044), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (3041, 3044), True, 'import numpy as np\n'), ((3106, 3116), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (3113, 3116), True, 'import numpy as np\n')]
|
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
from astropy.io import ascii
from pkg_resources import resource_filename
'''
Function to read in atomic line information for a given rest frame wavelength.
Or
For the line matching the closest wavelength.
Input :
lambda_rest :- Rest Frame wavelength (in \AA) of the line to match
method :- 'closest' -> If set will match the closest line.
'Exact' -> If set will match the exact wavelength.
Output: dic :- Dictionary with fval,lambda and species name.
Example: str=rb_setline(2796.3,'closest')
Written By: <NAME> Jan 2018, Python 2.7
Edit: <NAME> Sep 2018, Depreciated kwargs to be compatible with python 3
'''
def rb_setline(lambda_rest,method,linelist='atom'):
#if kwargs.has_key('linelist'):
# linelist=kwargs['linelist']
#else:
# linelist='LLS'
line_str=read_line_list(linelist)
wavelist=np.zeros((len(line_str),))
name = np.empty(len(line_str), dtype='object')
fval=np.zeros((len(line_str),))
if linelist=='atom':
gamma=np.zeros((len(line_str),))
for i in range(0,len(wavelist)):
wavelist[i]=np.double(line_str[i]['wrest'])
fval[i]=np.float(line_str[i]['fval'])
name[i]=np.str(line_str[i]['ion'])
if linelist=='atom':
gamma[i]=np.str(line_str[i]['gamma'])
if method=='Exact':
q= np.where( (np.abs(lambda_rest-wavelist) < 1e-3))
if linelist=='atom':
outstr={'wave':wavelist[q],'fval':fval[q],'name':name[q],'gamma':gamma[q]}
else:
outstr={'wave':wavelist[q],'fval':fval[q],'name':name[q]}
elif method=='closest':
idx=(np.abs(lambda_rest-wavelist)).argmin()
if linelist=='atom':
outstr={'wave':wavelist[idx],'fval':fval[idx],'name':name[idx],'gamma':gamma[idx]}
else:
outstr={'wave':wavelist[idx],'fval':fval[idx],'name':name[idx]}
else:
raise NameError('Specify the matching method, closest or Exact')
return outstr
def read_line_list(label):
if label=='atom':
filename=resource_filename('rbvfit','lines/atom_full.dat')
elif label == 'LLS':
filename=resource_filename('rbvfit','lines/lls.lst')
elif label == 'LLS Small':
filename=resource_filename('rbvfit','lines/lls_sub.lst')
elif label == 'DLA':
filename=resource_filename('rbvfit','lines/dla.lst')
else:
print('Give Correct LineList')
data = []
if label=='atom':
s=ascii.read(filename)
for line in range(0,len(s['col1'])):
source = {}
source['wrest'] = float(s['col2'][line])
source['ion'] = s['col1'][line]+' '+np.str(np.int(s['col2'][line]))
source['fval']=float(s['col3'][line])
source['gamma']=float(s['col4'][line])
data.append(source)
else:
f=open(filename,'r')
header1 = f.readline()
for line in f:
line = line.strip()
columns = line.split()
source = {}
source['wrest'] = float(columns[0])
source['ion'] = columns[1]+' '+columns[2]
source['fval']=float(columns[3])
data.append(source)
return data
|
[
"astropy.io.ascii.read",
"numpy.abs",
"numpy.double",
"numpy.float",
"pkg_resources.resource_filename",
"numpy.int",
"numpy.str"
] |
[((1244, 1275), 'numpy.double', 'np.double', (["line_str[i]['wrest']"], {}), "(line_str[i]['wrest'])\n", (1253, 1275), True, 'import numpy as np\n'), ((1286, 1315), 'numpy.float', 'np.float', (["line_str[i]['fval']"], {}), "(line_str[i]['fval'])\n", (1294, 1315), True, 'import numpy as np\n'), ((1326, 1352), 'numpy.str', 'np.str', (["line_str[i]['ion']"], {}), "(line_str[i]['ion'])\n", (1332, 1352), True, 'import numpy as np\n'), ((2077, 2127), 'pkg_resources.resource_filename', 'resource_filename', (['"""rbvfit"""', '"""lines/atom_full.dat"""'], {}), "('rbvfit', 'lines/atom_full.dat')\n", (2094, 2127), False, 'from pkg_resources import resource_filename\n'), ((2445, 2465), 'astropy.io.ascii.read', 'ascii.read', (['filename'], {}), '(filename)\n', (2455, 2465), False, 'from astropy.io import ascii\n'), ((1388, 1416), 'numpy.str', 'np.str', (["line_str[i]['gamma']"], {}), "(line_str[i]['gamma'])\n", (1394, 1416), True, 'import numpy as np\n'), ((2160, 2204), 'pkg_resources.resource_filename', 'resource_filename', (['"""rbvfit"""', '"""lines/lls.lst"""'], {}), "('rbvfit', 'lines/lls.lst')\n", (2177, 2204), False, 'from pkg_resources import resource_filename\n'), ((1455, 1485), 'numpy.abs', 'np.abs', (['(lambda_rest - wavelist)'], {}), '(lambda_rest - wavelist)\n', (1461, 1485), True, 'import numpy as np\n'), ((2243, 2291), 'pkg_resources.resource_filename', 'resource_filename', (['"""rbvfit"""', '"""lines/lls_sub.lst"""'], {}), "('rbvfit', 'lines/lls_sub.lst')\n", (2260, 2291), False, 'from pkg_resources import resource_filename\n'), ((1697, 1727), 'numpy.abs', 'np.abs', (['(lambda_rest - wavelist)'], {}), '(lambda_rest - wavelist)\n', (1703, 1727), True, 'import numpy as np\n'), ((2324, 2368), 'pkg_resources.resource_filename', 'resource_filename', (['"""rbvfit"""', '"""lines/dla.lst"""'], {}), "('rbvfit', 'lines/dla.lst')\n", (2341, 2368), False, 'from pkg_resources import resource_filename\n'), ((2611, 2634), 'numpy.int', 'np.int', (["s['col2'][line]"], {}), "(s['col2'][line])\n", (2617, 2634), True, 'import numpy as np\n')]
|
import datetime
import warnings
from copy import copy
from types import MappingProxyType
from typing import Sequence, Callable, Mapping, Union, TypeVar, TYPE_CHECKING
import numpy as np
import pandas as pd
import sidekick as sk
from .clinical_acessor import Clinical
from .metaclass import ModelMeta
from .. import fitting as fit
from .. import formulas
from ..diseases import Disease, DiseaseParams, disease as get_disease
from ..logging import log
from ..mixins import (
Meta,
WithParamsMixin,
WithDataModelMixin,
WithInfoMixin,
WithResultsMixin,
WithRegionDemography,
)
from ..packages import plt
from ..utils import today, not_implemented, extract_keys, param_property
T = TypeVar("T")
NOW = datetime.datetime.now()
TODAY = datetime.date(NOW.year, NOW.month, NOW.day)
DAY = datetime.timedelta(days=1)
pplt = sk.import_later("..plot", package=__package__)
if TYPE_CHECKING:
from ..model_group import ModelGroup
from pydemic_ui.model import UIProperty
class Model(
WithDataModelMixin,
WithInfoMixin,
WithResultsMixin,
WithParamsMixin,
WithRegionDemography,
metaclass=ModelMeta,
):
"""
Base class for all models.
"""
meta: Meta
class Meta:
model_name = "Model"
data_aliases = {}
# Initial values
state: np.ndarray = None
initial_cases: float = sk.lazy(lambda self: self._initial_cases())
initial_infected: float = sk.lazy(lambda self: self._initial_infected())
# Initial time
date: datetime.date = None
time: float = 0.0
iter: int = sk.property(lambda m: len(m.data))
dates: pd.DatetimeIndex = sk.property(lambda m: m.to_dates(m.times))
times: pd.Index = sk.property(lambda m: m.data.index)
# Common epidemiological parameters
R0: float = param_property("R0", default=2.0)
K = sk.property(not_implemented)
duplication_time = property(lambda self: np.log(2) / self.K)
# Special accessors
clinical: Clinical = property(lambda self: Clinical(self))
clinical_model: type = None
clinical_params: Mapping = MappingProxyType({})
disease: Disease = None
disease_params: DiseaseParams = None
@property
def ui(self) -> "UIProperty":
try:
from pydemic_ui.model import UIProperty
except ImportError as ex:
log.warn(f"Could not import pydemic_ui.model: {ex}")
msg = (
"must have pydemic-ui installed to access the model.ui attribute.\n"
"Please 'pip install pydemic-ui' before proceeding'"
)
raise RuntimeError(msg)
return UIProperty(self)
def __init__(
self, params=None, *, run=None, name=None, date=None, clinical=None, disease=None, **kwargs
):
self.name = name or f"{type(self).__name__} model"
self.date = pd.to_datetime(date or today())
self.disease = get_disease(disease)
self._initialized = False
# Fix demography
demography_opts = WithRegionDemography._init_from_dict(self, kwargs)
self.disease_params = self.disease.params(**demography_opts)
# Init other mixins
WithParamsMixin.__init__(self, params, keywords=kwargs)
WithInfoMixin.__init__(self)
WithResultsMixin.__init__(self)
WithDataModelMixin.__init__(self)
if clinical:
clinical = dict(clinical)
self.clinical_model = clinical.pop("model", None)
self.clinical_params = clinical
for k, v in kwargs.items():
if hasattr(self, k):
try:
setattr(self, k, v)
except AttributeError:
name = type(self).__name__
msg = f"cannot set '{k}' attribute in '{name}' model"
raise AttributeError(msg)
else:
raise TypeError(f"invalid arguments: {k}")
if run is not None:
self.run(run)
def __str__(self):
return self.name
def _initial_cases(self):
raise NotImplementedError("must be implemented in subclass")
def _initial_infected(self):
raise NotImplementedError("must be implemented in subclass")
def epidemic_model_name(self):
"""
Return the epidemic model name.
"""
return self.meta.model_name
#
# Pickling and copying
#
# noinspection PyUnresolvedReferences
def copy(self, **kwargs):
"""
Copy instance possibly setting new values for attributes.
Keyword Args:
All keyword arguments are used to reset attributes in the copy.
Examples:
>>> m.copy(R0=1.0, name="Stable")
<SIR(name="Stable")>
"""
cls = type(self)
data = self.__dict__.copy()
params = data.pop("_params")
data.pop("_results_cache")
new = object.__new__(cls)
for k in list(kwargs):
if k in data:
data[k] = kwargs.pop(k)
new._params = copy(params)
new._results_cache = {}
new.__dict__.update(copy(data))
for k, v in kwargs.items():
setattr(new, k, v)
return new
def split(self, n=None, **kwargs) -> "ModelGroup":
"""
Create n copies of model, each one may override a different set of
parameters and return a ModelGroup.
Args:
n:
Number of copies in the resulting list. It can also be a sequence
of dictionaries with arguments to pass to the .copy() constructor.
Keyword Args:
Keyword arguments are passed to the `.copy()` method of the model. If
the keyword is a sequence, it applies the n-th component of the sequence
to the corresponding n-th model.
"""
from ..model_group import ModelGroup
if n is None:
for k, v in kwargs.items():
if not isinstance(v, str) and isinstance(v, Sequence):
n = len(v)
break
else:
raise TypeError("cannot determine the group size from arguments")
if isinstance(n, int):
options = [{} for _ in range(n)]
else:
options = [dict(d) for d in n]
n: int = len(options)
# Merge option dicts
for k, v in kwargs.items():
if not isinstance(v, str) and isinstance(v, Sequence):
xs = v
m = len(xs)
if m != n:
raise ValueError(
f"sizes do not match: "
f"{k} should be a sequence of {n} "
f"items, got {m}"
)
for opt, x in zip(options, xs):
opt.setdefault(k, x)
else:
for opt in options:
opt.setdefault(k, v)
# Fix name
for opt in options:
try:
name = opt["name"]
except KeyError:
pass
else:
opt["name"] = name.format(n=n, **opt)
return ModelGroup(self.copy(**opt) for opt in options)
def split_children(self, options=MappingProxyType({}), **kwargs) -> "ModelGroup":
"""
Similar to split, but split into the children of the given class.
Args:
options:
A mapping between region or region id
"""
from ..model_group import ModelGroup
if self.region is None:
raise ValueError("model is not bound to a region")
for k in self._params:
if k not in kwargs:
kwargs[k] = self.get_param(k)
for attr in ("disease",):
kwargs.setdefault(attr, getattr(self, attr))
return ModelGroup.from_children(self.region, type(self), options, **kwargs)
def reset(self, date: Union[datetime.date, float] = None, **kwargs):
"""
Return a copy of the model setting the state to the final state. If a
positional "date" argument is given, reset to the state to the one in the
specified date.
Args:
date (float or date):
An optional float or datetime selecting the desired date.
Keyword Args:
Additional keyword arguments are handled the same way as the
:method:`copy` method.
"""
if date is None:
date = self.date
time = self.time
elif isinstance(date, (float, int)):
time = float(date)
date = self.to_date(date)
else:
time: float = self.to_time(date)
kwargs["data"] = self.data.loc[[time]]
kwargs["date"] = date
kwargs["state"] = kwargs["data"].iloc[0].values
kwargs["time"] = 1
return self.copy(**kwargs)
def trim_dates(self, start=0, end=None):
"""
Trim data in model to the given interval specified by start and end
dates or times.
Args:
start (int or date):
Starting date. If not given, start at zero.
end (int or date):
End date. If not given, select up to the final date.
"""
start = int(start or 0)
end = int(end or self.time)
new = self.copy(
date=self.to_date(start),
data=self.data.iloc[start:end].reset_index(drop=True),
time=end - start,
state=self.data.iloc[end].values,
)
return new
#
# Initial conditions
#
def set_ic(self, state=None, **kwargs):
"""
Set initial conditions.
"""
if self.state is None:
if state is None:
state = self.initial_state(**kwargs)
self.state = np.array(state, dtype=float)
alias = self.meta.data_aliases
for k, v in list(kwargs.items()):
if k in alias:
del kwargs[k]
kwargs[alias[k]] = v
components = extract_keys(self.meta.variables, kwargs)
for k, v in components.items():
idx = self.meta.get_variable_index(k)
self.state[idx] = v
return self
def set_data(self, data):
"""
Force a dataframe into simulation state.
"""
data = data.copy()
data.columns = [self.meta.data_aliases.get(c, c) for c in data.columns]
self.set_ic(state=data.iloc[0])
self.data = data.reset_index(drop=True)
self.time = len(data) - 1
self.date = data.index[-1]
self.state[:] = data.iloc[-1]
self.info["observed.dates"] = data.index[[0, -1]]
self._initialized = True
return self
def set_cases_from_region(self: T) -> T:
"""
Set the number of cases from region.
"""
self.set_cases()
return self
def set_cases(self: T, curves=None, adjust_R0=False, save_observed=False) -> T:
"""
Initialize model from a dataframe with the deaths and cases curve.
This curve is usually the output of disease.epidemic_curve(region), and is
automatically retrieved if not passed explicitly and the region of the model
is set.
Args:
curves:
Dataframe with cumulative ["cases", "deaths"] columns. If not given,
or None, fetches from disease.epidemic_curves(info)
adjust_R0:
If true, adjust R0 from the observed cases.
save_observed:
If true, save the cases curves into the model.info["observed.cases"] key.
"""
if curves is None:
warnings.warn("omitting curves from set_cases will be deprecated.")
if self.region is None or self.disease is None:
msg = 'must provide both "region" and "disease" or an explicit cases ' "curve."
raise ValueError(msg)
curves = self.region.pydemic.epidemic_curve(self.disease)
if adjust_R0:
warnings.warn("adjust_R0 argument is deprecated")
method = "RollingOLS" if adjust_R0 is True else adjust_R0
Re, _ = value = fit.estimate_R0(self, curves, Re=True, method=method)
assert np.isfinite(Re), f"invalid value for R0: {value}"
self.R0 = Re
# Save notification it in the info dictionary for reference
if "cases_observed" in curves:
tf = curves.index[-1]
rate = curves.loc[tf, "cases_observed"] / curves.loc[tf, "cases"]
else:
rate = 1.0
self.info["observed.notification_rate"] = rate
# Save simulation state from data
model = self.epidemic_model_name()
curve = fit.cases(curves)
data = fit.epidemic_curve(model, curve, self)
self.set_data(data)
self.initial_cases = curve.iloc[0]
if adjust_R0:
self.R0 /= self["susceptible:final"] / self.population
self.info["observed.R0"] = self.R0
# Optionally save cases curves into the info dictionary
if save_observed:
key = "observed.curves" if save_observed is True else save_observed
df = curves.rename(columns={"cases": "cases_raw"})
df["cases"] = curve
self.info[key] = df
return self
def adjust_R0(self, method="RollingOLS"):
curves = self["cases"]
self.R0, _ = fit.estimate_R0(self, curves, method=method)
self.info["observed.R0"] = self.R0
def initial_state(self, cases=None, **kwargs):
"""
Create the default initial vector for model.
"""
if cases is not None:
kwargs.setdefault("population", self.population)
return formulas.initial_state(self.epidemic_model_name(), cases, self, **kwargs)
return self._initial_state()
def infect(self, n=1, column="infectious"):
"""
Convert 'n' susceptible individuals to infectious.
"""
last = self.data.index[-1]
n = min(n, self.data.loc[last, "susceptible"])
self.data.loc[last, column] += n
self.data.loc[last, "susceptible"] -= n
return self
def _initial_state(self):
raise NotImplementedError
def initialize(self):
"""
Force initialization.
"""
if not self._initialized:
self.set_ic()
self.data = make_dataframe(self)
self._initialized = True
#
# Running simulation
#
def run(self: T, time) -> T:
"""
Runs the model for the given duration.
"""
steps = int(time)
self.initialize()
if time == 0:
return
_, *shape = self.data.shape
ts = self.time + 1.0 + np.arange(steps)
data = np.zeros((steps, *shape))
date = self.date
if self.info.get("event.simulation_start") is None:
self.info.save_event("simulation_start")
self.run_to_fill(data, ts)
extra = pd.DataFrame(data, columns=self.data.columns, index=ts)
self.data = pd.concat([self.data, extra])
self.date = date + time * DAY
self.time = ts[-1]
self.state = data[-1]
return self
def run_to_fill(self: T, data, times) -> T:
"""
Run simulation to fill pre-allocated array of data.
"""
raise NotImplementedError
def run_until(self, condition: Callable[["Model"], bool]):
"""
Run until stop condition is satisfied.
Args:
condition:
A function that receives a model and return True if stop
criteria is satisfied.
"""
raise NotImplementedError
#
# Utility methods
#
def to_dates(self, times: Sequence[int], start_date=None) -> pd.DatetimeIndex:
"""
Convert an array of numerical times to dates.
Args:
times:
Sequence of times.
start_date:
Starting date. If not given, uses the starting date for
simulation.
"""
dates: pd.DatetimeIndex
if isinstance(times, pd.DatetimeIndex):
return times
if start_date is None:
start_date = self.date - self.time * DAY
# noinspection PyTypeChecker
return pd.to_datetime(times, unit="D", origin=start_date)
def to_date(self, time: Union[float, int]) -> datetime.date:
"""
Convert a single instant to the corresponding datetime
"""
return pd.to_datetime(time - self.time, unit="D", origin=self.date)
def to_times(self, dates: Sequence, start_date=None) -> np.ndarray:
"""
Convert an array of numerical times to dates.
Args:
dates:
Sequence of dates.
start_date:
Starting date. If not given, uses the starting date for
simulation.
"""
if start_date is None:
start_date = self.date - self.time * DAY
data = [(date - start_date).days for date in dates]
return np.array(data) if data else np.array([], dtype=int)
def to_time(self, date, start_date=None) -> float:
"""
Convert date to time.
"""
if start_date is None:
return self.to_time(date, self.date) - self.time
return float((date - start_date).days)
def get_times(self, idx=None):
"""
Get times possibly sliced by an index.
"""
if idx is None:
return self.times
else:
return self.times[idx]
def get_data_time(self, idx):
times = self.get_times(idx)
return pd.Series(times, index=times)
def get_data_date(self, idx):
times = self.get_times(idx)
dates = self.to_dates(times)
return pd.Series(dates, index=times)
def get_data_cases(self, idx):
raise NotImplementedError
#
# Plotting and showing information
#
def plot(
self,
components=None,
*,
ax=None,
logy=False,
show=False,
dates=False,
legend=True,
grid=True,
):
"""
Plot the result of simulation.
"""
ax = ax or plt.gca()
kwargs = {"logy": logy, "ax": ax, "grid": grid, "legend": legend}
def get_column(col):
if dates:
col += ":dates"
data = self[col]
return data
components = self.meta.variables if components is None else components
for col in components:
data = get_column(col)
data.plot(**kwargs)
if show:
plt.show()
def make_dataframe(model: Model):
"""
Create the initial dataframe for the given model.
"""
data = [model.state]
cols = model.meta.variables
index = [model.time]
return pd.DataFrame(data, columns=cols, index=index)
|
[
"numpy.arange",
"pandas.DataFrame",
"pydemic_ui.model.UIProperty",
"sidekick.property",
"numpy.isfinite",
"datetime.timedelta",
"typing.TypeVar",
"sidekick.import_later",
"datetime.datetime.now",
"pandas.concat",
"types.MappingProxyType",
"datetime.date",
"pandas.to_datetime",
"pandas.Series",
"numpy.log",
"copy.copy",
"numpy.zeros",
"numpy.array",
"warnings.warn"
] |
[((704, 716), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (711, 716), False, 'from typing import Sequence, Callable, Mapping, Union, TypeVar, TYPE_CHECKING\n'), ((723, 746), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (744, 746), False, 'import datetime\n'), ((755, 798), 'datetime.date', 'datetime.date', (['NOW.year', 'NOW.month', 'NOW.day'], {}), '(NOW.year, NOW.month, NOW.day)\n', (768, 798), False, 'import datetime\n'), ((805, 831), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (823, 831), False, 'import datetime\n'), ((839, 885), 'sidekick.import_later', 'sk.import_later', (['"""..plot"""'], {'package': '__package__'}), "('..plot', package=__package__)\n", (854, 885), True, 'import sidekick as sk\n'), ((1698, 1733), 'sidekick.property', 'sk.property', (['(lambda m: m.data.index)'], {}), '(lambda m: m.data.index)\n', (1709, 1733), True, 'import sidekick as sk\n'), ((1833, 1861), 'sidekick.property', 'sk.property', (['not_implemented'], {}), '(not_implemented)\n', (1844, 1861), True, 'import sidekick as sk\n'), ((2078, 2098), 'types.MappingProxyType', 'MappingProxyType', (['{}'], {}), '({})\n', (2094, 2098), False, 'from types import MappingProxyType\n'), ((19097, 19142), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols', 'index': 'index'}), '(data, columns=cols, index=index)\n', (19109, 19142), True, 'import pandas as pd\n'), ((2620, 2636), 'pydemic_ui.model.UIProperty', 'UIProperty', (['self'], {}), '(self)\n', (2630, 2636), False, 'from pydemic_ui.model import UIProperty\n'), ((5047, 5059), 'copy.copy', 'copy', (['params'], {}), '(params)\n', (5051, 5059), False, 'from copy import copy\n'), ((7279, 7299), 'types.MappingProxyType', 'MappingProxyType', (['{}'], {}), '({})\n', (7295, 7299), False, 'from types import MappingProxyType\n'), ((14946, 14971), 'numpy.zeros', 'np.zeros', (['(steps, *shape)'], {}), '((steps, *shape))\n', (14954, 14971), True, 'import numpy as np\n'), ((15163, 15218), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'self.data.columns', 'index': 'ts'}), '(data, columns=self.data.columns, index=ts)\n', (15175, 15218), True, 'import pandas as pd\n'), ((15240, 15269), 'pandas.concat', 'pd.concat', (['[self.data, extra]'], {}), '([self.data, extra])\n', (15249, 15269), True, 'import pandas as pd\n'), ((16503, 16553), 'pandas.to_datetime', 'pd.to_datetime', (['times'], {'unit': '"""D"""', 'origin': 'start_date'}), "(times, unit='D', origin=start_date)\n", (16517, 16553), True, 'import pandas as pd\n'), ((16722, 16782), 'pandas.to_datetime', 'pd.to_datetime', (['(time - self.time)'], {'unit': '"""D"""', 'origin': 'self.date'}), "(time - self.time, unit='D', origin=self.date)\n", (16736, 16782), True, 'import pandas as pd\n'), ((17883, 17912), 'pandas.Series', 'pd.Series', (['times'], {'index': 'times'}), '(times, index=times)\n', (17892, 17912), True, 'import pandas as pd\n'), ((18036, 18065), 'pandas.Series', 'pd.Series', (['dates'], {'index': 'times'}), '(dates, index=times)\n', (18045, 18065), True, 'import pandas as pd\n'), ((5120, 5130), 'copy.copy', 'copy', (['data'], {}), '(data)\n', (5124, 5130), False, 'from copy import copy\n'), ((9891, 9919), 'numpy.array', 'np.array', (['state'], {'dtype': 'float'}), '(state, dtype=float)\n', (9899, 9919), True, 'import numpy as np\n'), ((11780, 11847), 'warnings.warn', 'warnings.warn', (['"""omitting curves from set_cases will be deprecated."""'], {}), "('omitting curves from set_cases will be deprecated.')\n", (11793, 11847), False, 'import warnings\n'), ((12147, 12196), 'warnings.warn', 'warnings.warn', (['"""adjust_R0 argument is deprecated"""'], {}), "('adjust_R0 argument is deprecated')\n", (12160, 12196), False, 'import warnings\n'), ((12368, 12383), 'numpy.isfinite', 'np.isfinite', (['Re'], {}), '(Re)\n', (12379, 12383), True, 'import numpy as np\n'), ((14913, 14929), 'numpy.arange', 'np.arange', (['steps'], {}), '(steps)\n', (14922, 14929), True, 'import numpy as np\n'), ((17286, 17300), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (17294, 17300), True, 'import numpy as np\n'), ((17314, 17337), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (17322, 17337), True, 'import numpy as np\n'), ((1907, 1916), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1913, 1916), True, 'import numpy as np\n')]
|
###############################################################################
# plot_afefeh: the basic [a/Fe] vs. [Fe/H] plot for the data section
###############################################################################
import sys
import matplotlib
import numpy
from scipy import special
matplotlib.use('Agg')
from galpy.util import bovy_plot
from matplotlib import pyplot
import define_rcsample
def plot_afefeh(plotfilename):
# Load the data
data= define_rcsample.get_rcsample()
# Plot the data
bovy_plot.bovy_print()
bovy_plot.scatterplot(data[define_rcsample._FEHTAG],
data[define_rcsample._AFETAG],
'k.',ms=.8,
levels=special.erf(numpy.arange(1,2)/numpy.sqrt(2.)),
xrange=[-1.,0.4],
yrange=[-0.15,0.35],
xlabel=r'$[\mathrm{Fe/H}]$',
ylabel=define_rcsample._AFELABEL)
# Overplot sub-samples
# low alpha, low feh
lowfeh= define_rcsample._lowlow_lowfeh(0.)
highfeh= define_rcsample._lowlow_highfeh(0.)
pyplot.plot([lowfeh,lowfeh],[define_rcsample._lowlow_lowafe(lowfeh),
define_rcsample._lowlow_highafe(lowfeh)],
'k--',lw=2.)
pyplot.plot([highfeh,highfeh],[define_rcsample._lowlow_lowafe(highfeh),
define_rcsample._lowlow_highafe(highfeh)],
'k--',lw=2.)
pyplot.plot([lowfeh,highfeh],[define_rcsample._lowlow_lowafe(lowfeh),
define_rcsample._lowlow_lowafe(highfeh)],
'k--',lw=2.)
pyplot.plot([lowfeh,highfeh],[define_rcsample._lowlow_highafe(lowfeh),
define_rcsample._lowlow_highafe(highfeh)],
'k--',lw=2.)
# high alpha
lowfeh= define_rcsample._highalpha_lowfeh(0.)
highfeh= define_rcsample._highalpha_highfeh(0.)
pyplot.plot([lowfeh,lowfeh],[define_rcsample._highalpha_lowafe(lowfeh),
define_rcsample._highalpha_highafe(lowfeh)],
'k--',lw=2.)
pyplot.plot([highfeh,highfeh],[define_rcsample._highalpha_lowafe(highfeh),
define_rcsample._highalpha_highafe(highfeh)],
'k--',lw=2.)
pyplot.plot([lowfeh,highfeh],[define_rcsample._highalpha_lowafe(lowfeh),
define_rcsample._highalpha_lowafe(highfeh)],
'k--',lw=2.)
pyplot.plot([lowfeh,highfeh],[define_rcsample._highalpha_highafe(lowfeh),
define_rcsample._highalpha_highafe(highfeh)],
'k--',lw=2.)
# solar
lowfeh= define_rcsample._solar_lowfeh(0.)
highfeh= define_rcsample._solar_highfeh(0.)
pyplot.plot([lowfeh,lowfeh],[define_rcsample._solar_lowafe(lowfeh),
define_rcsample._solar_highafe(lowfeh)],
'k--',lw=2.)
pyplot.plot([highfeh,highfeh],[define_rcsample._solar_lowafe(highfeh),
define_rcsample._solar_highafe(highfeh)],
'k--',lw=2.)
pyplot.plot([lowfeh,highfeh],[define_rcsample._solar_lowafe(lowfeh),
define_rcsample._solar_lowafe(highfeh)],
'k--',lw=2.)
pyplot.plot([lowfeh,highfeh],[define_rcsample._solar_highafe(lowfeh),
define_rcsample._solar_highafe(highfeh)],
'k--',lw=2.)
# high [Fe/H]
lowfeh= define_rcsample._highfeh_lowfeh(0.)
highfeh= define_rcsample._highfeh_highfeh(0.)
pyplot.plot([lowfeh,lowfeh],[define_rcsample._highfeh_lowafe(lowfeh),
define_rcsample._highfeh_highafe(lowfeh)],
'k--',lw=2.)
pyplot.plot([highfeh,highfeh],[define_rcsample._highfeh_lowafe(highfeh),
define_rcsample._highfeh_highafe(highfeh)],
'k--',lw=2.)
pyplot.plot([lowfeh,highfeh],[define_rcsample._highfeh_lowafe(lowfeh),
define_rcsample._highfeh_lowafe(highfeh)],
'k--',lw=2.)
pyplot.plot([lowfeh,highfeh],[define_rcsample._highfeh_highafe(lowfeh),
define_rcsample._highfeh_highafe(highfeh)],
'k--',lw=2.)
# Label them
bovy_plot.bovy_text(-0.4,0.265,r'$\mathrm{high}\ [\alpha/\mathrm{Fe}]$',
size=15.,backgroundcolor='w')
bovy_plot.bovy_text(-0.975,0.05,r'$\mathrm{low\ [Fe/H]}$',
size=15.,backgroundcolor='w')
bovy_plot.bovy_text(0.,-0.125,r'$\mathrm{high\ [Fe/H]}$',
size=15.,backgroundcolor='w')
bovy_plot.bovy_text(-0.225,-0.125,r'$\mathrm{solar}$',
size=15.,backgroundcolor='w')
# Loci
if False:
haloc= define_rcsample.highalphalocus()
bovy_plot.bovy_plot(haloc[:,0],haloc[:,1],'k-',lw=2.,overplot=True)
haloc= define_rcsample.lowalphalocus()
bovy_plot.bovy_plot(haloc[:,0],haloc[:,1],'k-',lw=2.,overplot=True)
bovy_plot.bovy_end_print(plotfilename)
return None
if __name__ == '__main__':
plot_afefeh(sys.argv[1])
|
[
"galpy.util.bovy_plot.bovy_print",
"define_rcsample._highalpha_lowafe",
"numpy.arange",
"define_rcsample._solar_lowfeh",
"define_rcsample._highfeh_highafe",
"define_rcsample._solar_highfeh",
"define_rcsample._highalpha_lowfeh",
"define_rcsample.get_rcsample",
"define_rcsample._highfeh_lowafe",
"define_rcsample._solar_highafe",
"define_rcsample._solar_lowafe",
"define_rcsample._lowlow_highafe",
"define_rcsample._highfeh_lowfeh",
"galpy.util.bovy_plot.bovy_text",
"galpy.util.bovy_plot.bovy_end_print",
"matplotlib.use",
"define_rcsample._highalpha_highfeh",
"define_rcsample.highalphalocus",
"define_rcsample._lowlow_highfeh",
"define_rcsample._lowlow_lowafe",
"define_rcsample.lowalphalocus",
"galpy.util.bovy_plot.bovy_plot",
"define_rcsample._highalpha_highafe",
"define_rcsample._lowlow_lowfeh",
"define_rcsample._highfeh_highfeh",
"numpy.sqrt"
] |
[((297, 318), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (311, 318), False, 'import matplotlib\n'), ((466, 496), 'define_rcsample.get_rcsample', 'define_rcsample.get_rcsample', ([], {}), '()\n', (494, 496), False, 'import define_rcsample\n'), ((521, 543), 'galpy.util.bovy_plot.bovy_print', 'bovy_plot.bovy_print', ([], {}), '()\n', (541, 543), False, 'from galpy.util import bovy_plot\n'), ((1046, 1081), 'define_rcsample._lowlow_lowfeh', 'define_rcsample._lowlow_lowfeh', (['(0.0)'], {}), '(0.0)\n', (1076, 1081), False, 'import define_rcsample\n'), ((1094, 1130), 'define_rcsample._lowlow_highfeh', 'define_rcsample._lowlow_highfeh', (['(0.0)'], {}), '(0.0)\n', (1125, 1130), False, 'import define_rcsample\n'), ((1879, 1917), 'define_rcsample._highalpha_lowfeh', 'define_rcsample._highalpha_lowfeh', (['(0.0)'], {}), '(0.0)\n', (1912, 1917), False, 'import define_rcsample\n'), ((1930, 1969), 'define_rcsample._highalpha_highfeh', 'define_rcsample._highalpha_highfeh', (['(0.0)'], {}), '(0.0)\n', (1964, 1969), False, 'import define_rcsample\n'), ((2737, 2771), 'define_rcsample._solar_lowfeh', 'define_rcsample._solar_lowfeh', (['(0.0)'], {}), '(0.0)\n', (2766, 2771), False, 'import define_rcsample\n'), ((2784, 2819), 'define_rcsample._solar_highfeh', 'define_rcsample._solar_highfeh', (['(0.0)'], {}), '(0.0)\n', (2814, 2819), False, 'import define_rcsample\n'), ((3561, 3597), 'define_rcsample._highfeh_lowfeh', 'define_rcsample._highfeh_lowfeh', (['(0.0)'], {}), '(0.0)\n', (3592, 3597), False, 'import define_rcsample\n'), ((3610, 3647), 'define_rcsample._highfeh_highfeh', 'define_rcsample._highfeh_highfeh', (['(0.0)'], {}), '(0.0)\n', (3642, 3647), False, 'import define_rcsample\n'), ((4396, 4514), 'galpy.util.bovy_plot.bovy_text', 'bovy_plot.bovy_text', (['(-0.4)', '(0.265)', '"""$\\\\mathrm{high}\\\\ [\\\\alpha/\\\\mathrm{Fe}]$"""'], {'size': '(15.0)', 'backgroundcolor': '"""w"""'}), "(-0.4, 0.265,\n '$\\\\mathrm{high}\\\\ [\\\\alpha/\\\\mathrm{Fe}]$', size=15.0, backgroundcolor='w'\n )\n", (4415, 4514), False, 'from galpy.util import bovy_plot\n'), ((4528, 4625), 'galpy.util.bovy_plot.bovy_text', 'bovy_plot.bovy_text', (['(-0.975)', '(0.05)', '"""$\\\\mathrm{low\\\\ [Fe/H]}$"""'], {'size': '(15.0)', 'backgroundcolor': '"""w"""'}), "(-0.975, 0.05, '$\\\\mathrm{low\\\\ [Fe/H]}$', size=15.0,\n backgroundcolor='w')\n", (4547, 4625), False, 'from galpy.util import bovy_plot\n'), ((4646, 4743), 'galpy.util.bovy_plot.bovy_text', 'bovy_plot.bovy_text', (['(0.0)', '(-0.125)', '"""$\\\\mathrm{high\\\\ [Fe/H]}$"""'], {'size': '(15.0)', 'backgroundcolor': '"""w"""'}), "(0.0, -0.125, '$\\\\mathrm{high\\\\ [Fe/H]}$', size=15.0,\n backgroundcolor='w')\n", (4665, 4743), False, 'from galpy.util import bovy_plot\n'), ((4763, 4855), 'galpy.util.bovy_plot.bovy_text', 'bovy_plot.bovy_text', (['(-0.225)', '(-0.125)', '"""$\\\\mathrm{solar}$"""'], {'size': '(15.0)', 'backgroundcolor': '"""w"""'}), "(-0.225, -0.125, '$\\\\mathrm{solar}$', size=15.0,\n backgroundcolor='w')\n", (4782, 4855), False, 'from galpy.util import bovy_plot\n'), ((5149, 5187), 'galpy.util.bovy_plot.bovy_end_print', 'bovy_plot.bovy_end_print', (['plotfilename'], {}), '(plotfilename)\n', (5173, 5187), False, 'from galpy.util import bovy_plot\n'), ((4913, 4945), 'define_rcsample.highalphalocus', 'define_rcsample.highalphalocus', ([], {}), '()\n', (4943, 4945), False, 'import define_rcsample\n'), ((4954, 5028), 'galpy.util.bovy_plot.bovy_plot', 'bovy_plot.bovy_plot', (['haloc[:, 0]', 'haloc[:, 1]', '"""k-"""'], {'lw': '(2.0)', 'overplot': '(True)'}), "(haloc[:, 0], haloc[:, 1], 'k-', lw=2.0, overplot=True)\n", (4973, 5028), False, 'from galpy.util import bovy_plot\n'), ((5037, 5068), 'define_rcsample.lowalphalocus', 'define_rcsample.lowalphalocus', ([], {}), '()\n', (5066, 5068), False, 'import define_rcsample\n'), ((5077, 5151), 'galpy.util.bovy_plot.bovy_plot', 'bovy_plot.bovy_plot', (['haloc[:, 0]', 'haloc[:, 1]', '"""k-"""'], {'lw': '(2.0)', 'overplot': '(True)'}), "(haloc[:, 0], haloc[:, 1], 'k-', lw=2.0, overplot=True)\n", (5096, 5151), False, 'from galpy.util import bovy_plot\n'), ((1163, 1201), 'define_rcsample._lowlow_lowafe', 'define_rcsample._lowlow_lowafe', (['lowfeh'], {}), '(lowfeh)\n', (1193, 1201), False, 'import define_rcsample\n'), ((1236, 1275), 'define_rcsample._lowlow_highafe', 'define_rcsample._lowlow_highafe', (['lowfeh'], {}), '(lowfeh)\n', (1267, 1275), False, 'import define_rcsample\n'), ((1342, 1381), 'define_rcsample._lowlow_lowafe', 'define_rcsample._lowlow_lowafe', (['highfeh'], {}), '(highfeh)\n', (1372, 1381), False, 'import define_rcsample\n'), ((1418, 1458), 'define_rcsample._lowlow_highafe', 'define_rcsample._lowlow_highafe', (['highfeh'], {}), '(highfeh)\n', (1449, 1458), False, 'import define_rcsample\n'), ((1524, 1562), 'define_rcsample._lowlow_lowafe', 'define_rcsample._lowlow_lowafe', (['lowfeh'], {}), '(lowfeh)\n', (1554, 1562), False, 'import define_rcsample\n'), ((1598, 1637), 'define_rcsample._lowlow_lowafe', 'define_rcsample._lowlow_lowafe', (['highfeh'], {}), '(highfeh)\n', (1628, 1637), False, 'import define_rcsample\n'), ((1703, 1742), 'define_rcsample._lowlow_highafe', 'define_rcsample._lowlow_highafe', (['lowfeh'], {}), '(lowfeh)\n', (1734, 1742), False, 'import define_rcsample\n'), ((1778, 1818), 'define_rcsample._lowlow_highafe', 'define_rcsample._lowlow_highafe', (['highfeh'], {}), '(highfeh)\n', (1809, 1818), False, 'import define_rcsample\n'), ((2002, 2043), 'define_rcsample._highalpha_lowafe', 'define_rcsample._highalpha_lowafe', (['lowfeh'], {}), '(lowfeh)\n', (2035, 2043), False, 'import define_rcsample\n'), ((2078, 2120), 'define_rcsample._highalpha_highafe', 'define_rcsample._highalpha_highafe', (['lowfeh'], {}), '(lowfeh)\n', (2112, 2120), False, 'import define_rcsample\n'), ((2187, 2229), 'define_rcsample._highalpha_lowafe', 'define_rcsample._highalpha_lowafe', (['highfeh'], {}), '(highfeh)\n', (2220, 2229), False, 'import define_rcsample\n'), ((2266, 2309), 'define_rcsample._highalpha_highafe', 'define_rcsample._highalpha_highafe', (['highfeh'], {}), '(highfeh)\n', (2300, 2309), False, 'import define_rcsample\n'), ((2375, 2416), 'define_rcsample._highalpha_lowafe', 'define_rcsample._highalpha_lowafe', (['lowfeh'], {}), '(lowfeh)\n', (2408, 2416), False, 'import define_rcsample\n'), ((2452, 2494), 'define_rcsample._highalpha_lowafe', 'define_rcsample._highalpha_lowafe', (['highfeh'], {}), '(highfeh)\n', (2485, 2494), False, 'import define_rcsample\n'), ((2560, 2602), 'define_rcsample._highalpha_highafe', 'define_rcsample._highalpha_highafe', (['lowfeh'], {}), '(lowfeh)\n', (2594, 2602), False, 'import define_rcsample\n'), ((2638, 2681), 'define_rcsample._highalpha_highafe', 'define_rcsample._highalpha_highafe', (['highfeh'], {}), '(highfeh)\n', (2672, 2681), False, 'import define_rcsample\n'), ((2852, 2889), 'define_rcsample._solar_lowafe', 'define_rcsample._solar_lowafe', (['lowfeh'], {}), '(lowfeh)\n', (2881, 2889), False, 'import define_rcsample\n'), ((2924, 2962), 'define_rcsample._solar_highafe', 'define_rcsample._solar_highafe', (['lowfeh'], {}), '(lowfeh)\n', (2954, 2962), False, 'import define_rcsample\n'), ((3029, 3067), 'define_rcsample._solar_lowafe', 'define_rcsample._solar_lowafe', (['highfeh'], {}), '(highfeh)\n', (3058, 3067), False, 'import define_rcsample\n'), ((3104, 3143), 'define_rcsample._solar_highafe', 'define_rcsample._solar_highafe', (['highfeh'], {}), '(highfeh)\n', (3134, 3143), False, 'import define_rcsample\n'), ((3209, 3246), 'define_rcsample._solar_lowafe', 'define_rcsample._solar_lowafe', (['lowfeh'], {}), '(lowfeh)\n', (3238, 3246), False, 'import define_rcsample\n'), ((3282, 3320), 'define_rcsample._solar_lowafe', 'define_rcsample._solar_lowafe', (['highfeh'], {}), '(highfeh)\n', (3311, 3320), False, 'import define_rcsample\n'), ((3386, 3424), 'define_rcsample._solar_highafe', 'define_rcsample._solar_highafe', (['lowfeh'], {}), '(lowfeh)\n', (3416, 3424), False, 'import define_rcsample\n'), ((3460, 3499), 'define_rcsample._solar_highafe', 'define_rcsample._solar_highafe', (['highfeh'], {}), '(highfeh)\n', (3490, 3499), False, 'import define_rcsample\n'), ((3680, 3719), 'define_rcsample._highfeh_lowafe', 'define_rcsample._highfeh_lowafe', (['lowfeh'], {}), '(lowfeh)\n', (3711, 3719), False, 'import define_rcsample\n'), ((3754, 3794), 'define_rcsample._highfeh_highafe', 'define_rcsample._highfeh_highafe', (['lowfeh'], {}), '(lowfeh)\n', (3786, 3794), False, 'import define_rcsample\n'), ((3861, 3901), 'define_rcsample._highfeh_lowafe', 'define_rcsample._highfeh_lowafe', (['highfeh'], {}), '(highfeh)\n', (3892, 3901), False, 'import define_rcsample\n'), ((3938, 3979), 'define_rcsample._highfeh_highafe', 'define_rcsample._highfeh_highafe', (['highfeh'], {}), '(highfeh)\n', (3970, 3979), False, 'import define_rcsample\n'), ((4045, 4084), 'define_rcsample._highfeh_lowafe', 'define_rcsample._highfeh_lowafe', (['lowfeh'], {}), '(lowfeh)\n', (4076, 4084), False, 'import define_rcsample\n'), ((4120, 4160), 'define_rcsample._highfeh_lowafe', 'define_rcsample._highfeh_lowafe', (['highfeh'], {}), '(highfeh)\n', (4151, 4160), False, 'import define_rcsample\n'), ((4226, 4266), 'define_rcsample._highfeh_highafe', 'define_rcsample._highfeh_highafe', (['lowfeh'], {}), '(lowfeh)\n', (4258, 4266), False, 'import define_rcsample\n'), ((4302, 4343), 'define_rcsample._highfeh_highafe', 'define_rcsample._highfeh_highafe', (['highfeh'], {}), '(highfeh)\n', (4334, 4343), False, 'import define_rcsample\n'), ((741, 759), 'numpy.arange', 'numpy.arange', (['(1)', '(2)'], {}), '(1, 2)\n', (753, 759), False, 'import numpy\n'), ((759, 774), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (769, 774), False, 'import numpy\n')]
|
from .base_lot import *
import numpy as np
import os
from .units import *
#TODO get rid of get_energy
class QChem(Lot):
def run(self,geom,multiplicity):
tempfilename = 'tempQCinp'
tempfile = open(tempfilename,'w')
if self.lot_inp_file == False:
tempfile.write(' $rem\n')
tempfile.write(' JOBTYPE FORCE\n')
tempfile.write(' EXCHANGE {}\n'.format(self.functional))
tempfile.write(' SCF_ALGORITHM rca_diis\n')
tempfile.write(' SCF_MAX_CYCLES 300\n')
tempfile.write(' BASIS {}\n'.format(self.basis))
#tempfile.write(' ECP LANL2DZ \n')
tempfile.write(' WAVEFUNCTION_ANALYSIS FALSE\n')
tempfile.write(' GEOM_OPT_MAX_CYCLES 300\n')
tempfile.write('scf_convergence 6\n')
tempfile.write(' SYM_IGNORE TRUE\n')
tempfile.write(' SYMMETRY FALSE\n')
tempfile.write('molden_format true\n')
tempfile.write(' $end\n')
tempfile.write('\n')
tempfile.write('$molecule\n')
else:
with open(self.lot_inp_file) as lot_inp:
lot_inp_lines = lot_inp.readlines()
for line in lot_inp_lines:
tempfile.write(line)
tempfile.write('{} {}\n'.format(self.charge,multiplicity))
if os.path.isfile("link.txt"):
with open("link.txt") as link:
link_lines = link.readlines()
tmp_geom = [list(i) for i in geom]
for i,coord in enumerate(tmp_geom):
coord.append(link_lines[i].rstrip('\n'))
for i in coord:
tempfile.write(str(i)+' ')
tempfile.write('\n')
else:
for coord in geom:
for i in coord:
tempfile.write(str(i)+' ')
tempfile.write('\n')
tempfile.write('$end')
tempfile.close()
cmd = "qchem -nt {} -save {} {}.qchem.out {}.{}".format(self.nproc,tempfilename,tempfilename,self.node_id,multiplicity)
os.system(cmd)
efilepath = os.environ['QCSCRATCH']
efilepath += '/{}.{}/GRAD'.format(self.node_id,multiplicity)
with open(efilepath) as efile:
elines = efile.readlines()
temp = 0
for lines in elines:
if temp == 1:
self.E.append((multiplicity,float(lines.split()[0])))
break
if "$" in lines:
temp += 1
gradfilepath = os.environ['QCSCRATCH']
gradfilepath += '/{}.{}/GRAD'.format(self.node_id,multiplicity)
with open(gradfilepath) as gradfile:
gradlines = gradfile.readlines()
temp = 0
tmp=[]
for lines in gradlines:
if '$' in lines:
temp+=1
elif temp == 2:
tmpline = lines.split()
tmp.append([float(i) for i in tmpline])
elif temp == 3:
break
self.grada.append((multiplicity,tmp))
return
def get_energy(self,coords,multiplicity,state):
#if self.has_nelectrons==False:
# for i in self.states:
# self.get_nelec(geom,i[0])
# self.has_nelectrons==True
if self.hasRanForCurrentCoords==False or (coords != self.currentCoords).any():
self.currentCoords = coords.copy()
geom = manage_xyz.np_to_xyz(self.geom,self.currentCoords)
self.runall(geom)
self.hasRanForCurrentCoords=True
tmp = self.search_tuple(self.E,multiplicity)
return np.asarray(tmp[state][1])*KCAL_MOL_PER_AU
def get_gradient(self,coords,multiplicity,state):
#if self.has_nelectrons==False:
# for i in self.states:
# self.get_nelec(geom,i[0])
# self.has_nelectrons==True
if self.hasRanForCurrentCoords==False or (coords != self.currentCoords).any():
self.currentCoords = coords.copy()
geom = manage_xyz.np_to_xyz(self.geom,self.currentCoords)
self.runall(geom)
tmp = self.search_tuple(self.grada,multiplicity)
return np.asarray(tmp[state][1])*ANGSTROM_TO_AU
@classmethod
def copy(cls,lot,**kwargs):
base = os.environ['QCSCRATCH']
for state in self.states:
multiplicity = state[0]
efilepath_old=base+ '/{}.{}'.format(self.node_id,multiplicity)
efilepath_new =base+ '/{}.{}'.format(node_id,multiplicity)
os.system('cp -r ' + efilepath_old +' ' + efilepath_new)
return cls(lot.options.copy().set_values(options))
|
[
"numpy.asarray",
"os.path.isfile",
"os.system"
] |
[((1355, 1381), 'os.path.isfile', 'os.path.isfile', (['"""link.txt"""'], {}), "('link.txt')\n", (1369, 1381), False, 'import os\n'), ((2103, 2117), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (2112, 2117), False, 'import os\n'), ((3668, 3693), 'numpy.asarray', 'np.asarray', (['tmp[state][1]'], {}), '(tmp[state][1])\n', (3678, 3693), True, 'import numpy as np\n'), ((4228, 4253), 'numpy.asarray', 'np.asarray', (['tmp[state][1]'], {}), '(tmp[state][1])\n', (4238, 4253), True, 'import numpy as np\n'), ((4586, 4643), 'os.system', 'os.system', (["('cp -r ' + efilepath_old + ' ' + efilepath_new)"], {}), "('cp -r ' + efilepath_old + ' ' + efilepath_new)\n", (4595, 4643), False, 'import os\n')]
|
from enum import Enum
import numpy as np
import tensorflow as tf
from edward1_utils import get_ancestors, get_descendants
class GenerativeMode(Enum):
UNCONDITIONED = 1 # i.e. sampling the learnt prior
CONDITIONED = 2 # i.e. sampling the posterior, with variational samples substituted
RECONSTRUCTION = 3 # i.e. mode of the posterior
def noncopying_integrated_reparam_klqp(generative_builder, variational_builder, name_to_data_map, discrete_name_to_states_map, sample_count=1, beta=1., alpha=5.e6, grad_clip_magnitude=None):
# Every variable in the generative and variational should have a leading dimension that is 'IID', corresponding to
# an index-into-batch or otherwise sampled independently -- when we make substitutions, this dimension may be
# expanded to incorporate more samples. Thus, all RVs are indexed by iid-index, *
# Generative RVs are created by lambdas, taking zero or one parameters. There should be zero parameters when
# dim0 (i.e. the iid-dimension) has size fixed by ancestral variables; there should be one parameter when it's a 'root'
# variable (i.e. doesn't have any ancestor-RVs) and its base dim0 should be multiplied by that parameter
# Variational RVs are created similarly; the name given to the lambda should match that of the corresponding
# generative variable. Sample/discrete-expanded observations are retrived with a second lambda
# generative_builder is free to return any type or None; for example, it may choose to return an object containing
# some of its random variables; the unconditioned and mode-reconstruction versions of this result are returned to
# the caller
# ** note that we do not allow (but do not check for!) non-leaf non-RV tensors that are iib-indexed and have an RV as a sibling to
# ** be included as parents of RVs in the graph, as these cannot easily be expanded to the correct dimensionality -- their iid-index
# ** will always be of 'base' size, and will not broadcast correctly against 'upsampled' iid-indices of the sibling RV
# ** this could be fixed by handling such things essentially the same as expansion-like-discrete
# ** note that having RVs created with a non-default (i.e. not scalar) sample_shape will not work in general, as we call rv.sample()
# ** directly without passing this in -- so the shape will not be what the caller expects
assert len(discrete_name_to_states_map) < 2
if len(discrete_name_to_states_map) == 1:
assert False # ...as this is broken for now -- discrete states get 'blurred together'
discrete_name, discrete_states = discrete_name_to_states_map.items()[0] # discrete_states is a numpy array indexed by discrete-index, *
assert discrete_name not in name_to_data_map
else:
discrete_name = None
discrete_states = np.zeros([1])
# Build the 'prior', i.e. the generative without variational substitutions, so we can evaluate the prior probability of the variational samples later
name_to_unconditioned_generative_variable = {}
generative_root_variable_names = set()
def make_unconditioned_rv(name, builder):
with tf.name_scope(name):
assert name not in name_to_unconditioned_generative_variable
is_root_variable = builder.__code__.co_argcount == 1 # ideally the below assert would *define* root-ness (indeed, it does, conceptually), but can't evaluate it before the variable is created!
variable = builder(1) if is_root_variable else builder()
assert is_root_variable == (len(get_ancestors(variable, name_to_unconditioned_generative_variable.values())) == 0) # ** could be made more efficient by caching, so quickly know chunks of the graph do/don't have ancestor-RVs
if is_root_variable:
generative_root_variable_names.add(name)
name_to_unconditioned_generative_variable[name] = variable
return variable.value
with tf.variable_scope('generative'), tf.name_scope('unconditioned'):
unconditioned_generative = generative_builder(make_unconditioned_rv, GenerativeMode.UNCONDITIONED)
def expand_like_discrete(substituted_value):
# This will be applied to all variables that aren't indexed by discrete-state
substituted_value = tf.reshape(substituted_value, [sample_count, -1] + list(map(int, substituted_value.get_shape()[1:]))) # indexed by sample-index, iid-index, *
substituted_value = tf.tile(substituted_value, [1, discrete_states.shape[0]] + [1] * (substituted_value.get_shape().ndims - 2)) # indexed by sample-index, discrete-index * iid-index, *
return tf.reshape(substituted_value, [-1] + list(map(int, substituted_value.get_shape()[2:]))) # indexed by sample-index * discrete-index * iid-index, *
name_to_substituted_value = {} # each value is indexed by sample-index * discrete-index * iid-index, *
# Construct expanded copies of the observations (tiled over sample and discrete indices); these are made available
# to the variational so it can reason over them, and are used as substitutions in the generative
for name in name_to_data_map:
assert name != discrete_name # ** need to think about this case!
# ** should also probably assert that the observed variable is not a variational-descendant of the discrete (or any other variable!)
substituted_value = tf.tile(
name_to_data_map[name],
[sample_count] + [1] * (name_to_data_map[name].get_shape().ndims - 1)
) # indexed by sample-index * iid-index, *
# ** is calling expand_like_discrete not strictly less efficient that just adding the discrete-state-count into the above tile?
name_to_substituted_value[name] = expand_like_discrete(substituted_value) # always expand, as an observed variable cannot be variational-descendant of the discrete
def is_variable_discrete_indexed(variable):
# Substituted values are always discrete-indexed, hence having one of them as an ancestor is a sufficient
# condition for being discrete-indexed. In practice we check the reverse, as the substitution is not an RV
# hence won't be returned as an ancestor. It is also a necessary condition, as there is no other route through
# which discrete-indexing can be added
return any(
len(get_descendants(substituted_value, [variable])) > 0
for substituted_value in name_to_substituted_value.values()
)
# Build the variational, substituting samples and expanding all variables to be indexed by sample and discrete indices
name_to_conditioned_variational_variable = {}
def make_variational_rv(name, builder):
with tf.name_scope('q_' + name):
assert name in name_to_unconditioned_generative_variable
assert name not in name_to_data_map
assert name not in name_to_conditioned_variational_variable
is_root_variable = builder.__code__.co_argcount == 1 # ideally the below assert would *define* root-ness (indeed, it does, conceptually), but can't evaluate it before the variable is created!
variable = builder(sample_count) if is_root_variable else builder()
assert is_root_variable == (
len(get_ancestors(variable, name_to_conditioned_variational_variable.values())) == 0 # it's a root variable if it doesn't have any variational RV as an ancestor...
and
all(
len(get_descendants(name_to_substituted_value[observation_name], [variable])) == 0 # ...and no observation has it as a descendant -- i.e. it doesn't have any observation as an ancestor either
for observation_name in name_to_data_map
)
) # ** could be made more efficient by caching, so quickly know chunks of the graph do/don't have ancestor-RVs
substituted_value = variable.value # indexed by sample-index * [discrete-index *] iid-index, *
if discrete_name is not None: # if there's a discrete to be integrated, then *all* substituted values must be discrete-indexed
if name == discrete_name:
assert map(int, substituted_value.get_shape()[1:]) == list(discrete_states.shape[1:]) # check the discrete values have the same shape as samples from the distribution
substituted_value = tf.tile(
discrete_states[np.newaxis, :, np.newaxis, ...],
[sample_count, 1, int(substituted_value.get_shape()[0]) / sample_count / (discrete_states.shape[0] if is_variable_discrete_indexed(variable) else 1)] + [1] * (len(discrete_states.shape) - 1)
) # indexed by sample-index, discrete-index, iid-index, *
substituted_value = tf.reshape(substituted_value, [-1] + list(discrete_states.shape[1:])) # indexed by sample-index * discrete-index * iid-index, *
else:
if not is_variable_discrete_indexed(variable):
substituted_value = expand_like_discrete(substituted_value)
name_to_conditioned_variational_variable[name] = variable # this is used to evaluate the variational density of the variational sample; for both this and next, uses ancestral substitutions in case of non-MF variational
name_to_substituted_value[name] = substituted_value # this is substituted into the generative
return substituted_value
with tf.variable_scope('variational'), tf.name_scope('conditioned'):
variational_builder(make_variational_rv, lambda observation_name: name_to_substituted_value[observation_name])
if discrete_name is not None:
assert discrete_name in name_to_conditioned_variational_variable
assert discrete_name in name_to_substituted_value
# Build the 'conditioned generative', with values substituted from the variational and observations
name_to_conditioned_generative_variable = {}
def make_conditioned_rv(name, builder):
with tf.name_scope(name):
is_root_variable = name in generative_root_variable_names # i.e. whether this is an RV with no ancestor-RVs, meaning that it should be replicated according to sample_count (otherwise, replication of some ancestor should 'bubble down' to us)
variable = builder(sample_count) if is_root_variable else builder()
name_to_conditioned_generative_variable[name] = variable # used to evaluate the generative density of the variational sample (and the observed data), with ancestral substitutions
if name not in name_to_substituted_value:
# Marginalise by sampling from the generative (with ancestral conditioning), as there's no corresponding variational or observation
# ** could condition the warning on whether it actually has descendants!
print('warning: {} has neither variational distribution nor observed value, hence will be marginalised by sampling'.format(name))
substituted_value = variable.value
if discrete_name is not None:
if not is_variable_discrete_indexed(variable):
substituted_value = expand_like_discrete(substituted_value)
name_to_substituted_value[name] = substituted_value
return name_to_substituted_value[name]
with tf.variable_scope('generative', reuse=True), tf.name_scope('conditioned'):
conditioned_generative = generative_builder(make_conditioned_rv, GenerativeMode.CONDITIONED)
if discrete_name is not None:
assert discrete_name in name_to_conditioned_generative_variable
def get_mode_or_mean(variable):
try:
return variable.distribution.mode()
except NotImplementedError:
print('warning: using mean instead of mode for {} in reconstruction'.format(variable.distribution.name))
return variable.distribution.mean() # fall back to mean, e.g. for uniform random variables
# Build a second copy of the variational, with the (variational) mode of each variable substituted, in order to do
# a full 'ancestrally modal' reconstruction in the non-MF case
name_to_variational_mode = {}
def make_variational_reconstruction_rv(name, builder):
with tf.name_scope('q_' + name):
assert name in name_to_unconditioned_generative_variable
is_root_variable = builder.__code__.co_argcount == 1 # ** cache from first variational model creation above?
variable = builder(1) if is_root_variable else builder()
name_to_variational_mode[name] = get_mode_or_mean(variable)
return name_to_variational_mode[name]
with tf.variable_scope('variational', reuse=True), tf.name_scope('modes'):
variational_builder(make_variational_reconstruction_rv, lambda observation_name: name_to_data_map[observation_name])
# This third copy of the generative is not used by inference, but is returned to the caller to use for reconstructions
# It does not perform any sample/discrete expansion, but substitutes variational modes for ancestral latents
def make_reconstruction_rv(name, builder):
with tf.name_scope(name):
if name in name_to_variational_mode:
return name_to_variational_mode[name]
else:
# ** non-use of name_to_data_map here may not be desirable if the variable is not a leaf
variable = builder(1) if name in generative_root_variable_names else builder()
return get_mode_or_mean(variable)
with tf.variable_scope('generative', reuse=True), tf.name_scope('reconstruction'):
reconstruction_modes = generative_builder(make_reconstruction_rv, GenerativeMode.RECONSTRUCTION)
with tf.name_scope('integrated_klqp'):
def lifted_log_prob(variable, value, name): # ** would be nice if we could rely on variable.name == name!
# variable is a random variable, indexed by sample-index * [discrete-index *] iid-index, *
# value is a tensor, indexed by sample-index * discrete-index * iid-index, *
# This function evaluates variable.log_prob on slices of value taken over discrete-index, summing away non-iid dimensions
discrete_state_count = discrete_states.shape[0]
if discrete_name is None:
log_prob = variable.distribution.log_prob(value)
return tf.reduce_sum(log_prob, axis=list(range(1, log_prob.get_shape().ndims)))[np.newaxis, ...]
elif is_variable_discrete_indexed(variable):
log_prob = variable.distribution.log_prob(value) # indexed by sample-index * discrete-index * iid-index, *
log_prob = tf.reduce_sum(log_prob, axis=list(range(1, log_prob.get_shape().ndims))) # indexed by sample-index * discrete-index * iid-index
log_prob = tf.reshape(log_prob, [sample_count, discrete_state_count, -1]) # indexed by sample-index, discrete-index, iid-index
return tf.reshape(tf.transpose(log_prob, [1, 0, 2]), [discrete_state_count, -1]) # indexed by discrete-index, sample-index * iid-index
else:
value = tf.reshape(value, [sample_count, discrete_state_count, -1] + list(map(int, value.get_shape()[1:]))) # indexed by sample-index, discrete-index, iid-index, *
value = tf.transpose(value, [1, 0, 2] + list(range(3, value.get_shape().ndims))) # indexed by discrete-index, sample-index, iid-index, *
value = tf.reshape(value, [discrete_state_count, -1] + list(map(int, value.get_shape()[3:]))) # indexed by discrete-index, sample-index * iid-index, *
log_prob = tf.stack([
variable.distribution.log_prob(value[state_index])
for state_index in range(discrete_state_count)
]) # indexed by discrete-index, sample-index * iid-index, *
return tf.reduce_sum(log_prob, axis=range(2, log_prob.get_shape().ndims)) # indexed by discrete-index, sample-index * iid-index
if discrete_name is not None:
discrete_qz_probs = tf.exp(lifted_log_prob(
name_to_conditioned_variational_variable[discrete_name],
name_to_substituted_value[discrete_name], # this is the discrete states tiled over sample-index and iid-index
discrete_name
)) # indexed by discrete-index, sample-index * iid-index; this is the probability under the variational, of each discrete state
def E_log_prob_wrt_discrete(variable, value, name): # ** again, would be nice if could rely on variable.name == name!
# log_prob is indexed by sample-index * [discrete-index *] iid-index, *
# result is scalar, being a mean over samples, and minibatch-elements, an expectation over discrete-states, and a sum over remaining dimensions
maybe_weighted_log_prob = lifted_log_prob(variable, value, name) # indexed by discrete-index, sample-index * iid-index
if discrete_name is not None:
maybe_weighted_log_prob *= discrete_qz_probs
return tf.reduce_mean(maybe_weighted_log_prob) # that we do a mean over iid-index means we treat the minibatch-indexing as independent sampling, not a joint rv
log_Px = sum(
E_log_prob_wrt_discrete(name_to_conditioned_generative_variable[name], name_to_substituted_value[name], name)
for name in name_to_data_map
)
log_Pz = sum(
E_log_prob_wrt_discrete(name_to_conditioned_generative_variable[name], name_to_substituted_value[name], name)
# for name in name_to_conditioned_generative_variable
for name in name_to_conditioned_variational_variable # variational not generative so we only include things with variational (not prior) substitutions
if name != discrete_name # ...as we use L1 divergence for this instead
if name not in name_to_data_map # ...as it's in P(x) instead
)
log_Qz = sum(
E_log_prob_wrt_discrete(name_to_conditioned_variational_variable[name], name_to_substituted_value[name], name)
for name in name_to_conditioned_variational_variable
if name != discrete_name # ...as we use L1 divergence for this instead
)
for name in name_to_conditioned_variational_variable:
if name != discrete_name:
if name not in name_to_data_map:
tf.summary.scalar('P(z_' + name + ')', E_log_prob_wrt_discrete(name_to_conditioned_generative_variable[name], name_to_substituted_value[name], name))
for name in name_to_data_map:
tf.summary.scalar('P(x_' + name + ')', E_log_prob_wrt_discrete(name_to_conditioned_generative_variable[name], name_to_substituted_value[name], name))
for name in name_to_conditioned_variational_variable:
if name != discrete_name:
value = E_log_prob_wrt_discrete(name_to_conditioned_variational_variable[name], name_to_substituted_value[name], name)
tf.summary.scalar('Q(z_' + name + ')', value)
if discrete_name is not None:
discrete_z_probs = tf.exp(lifted_log_prob(
name_to_unconditioned_generative_variable[discrete_name],
name_to_substituted_value[discrete_name], # this is the discrete states tiled over sample-index and iid-index
discrete_name
)) # indexed by discrete-index, sample-index * iid-index; this is the prior (unconditioned gen.) probability of the discrete states; it will be constant over sample-index and iid-index iff the discrete has no gen. ancestors
discrete_z_probs = tf.reduce_mean(discrete_z_probs, axis=1) # indexed by discrete-index
discrete_qz_probs = tf.reduce_mean(discrete_qz_probs, axis=1) # ditto; the mean here is calculating the aggregated posterior over the batch and samples
discrete_divergence_loss = tf.reduce_mean(tf.abs(discrete_z_probs - discrete_qz_probs)) * alpha # L1 loss
else:
discrete_divergence_loss = 0.
tf.losses.add_loss(0.) # this is needed because get_total_loss throws instead of returning zero if no losses have been registered
additional_losses = tf.losses.get_total_loss()
loss = -(log_Px + (log_Pz - log_Qz) * beta) + discrete_divergence_loss + additional_losses
tf.summary.scalar('inference/loss', loss)
tf.summary.scalar('inference/log_Px', log_Px)
tf.summary.scalar('inference/log_Pz', log_Pz)
tf.summary.scalar('inference/log_Qz', log_Qz)
tf.summary.scalar('inference/Ldd', discrete_divergence_loss)
tf.summary.scalar('inference/L*', additional_losses)
var_list = tf.trainable_variables()
grads = tf.gradients(loss, [v._ref() for v in var_list])
abs_grads = tf.abs(tf.concat([tf.reshape(grad, [-1]) for grad in grads if grad is not None], axis=0))
loss = tf.Print(loss, [log_Px, log_Pz * beta, log_Qz * beta, discrete_divergence_loss, additional_losses, tf.reduce_mean(abs_grads), tf.reduce_max(abs_grads)], 'p(x), p(z), q(z), Ldd, L*, <|g|>, max |g| = ')
if grad_clip_magnitude is not None:
grads, _ = tf.clip_by_global_norm(grads, grad_clip_magnitude)
return loss, list(zip(grads, var_list)), unconditioned_generative, reconstruction_modes, conditioned_generative
|
[
"tensorflow.abs",
"tensorflow.losses.add_loss",
"tensorflow.summary.scalar",
"tensorflow.trainable_variables",
"tensorflow.reshape",
"numpy.zeros",
"tensorflow.variable_scope",
"tensorflow.reduce_mean",
"edward1_utils.get_descendants",
"tensorflow.transpose",
"tensorflow.reduce_max",
"tensorflow.name_scope",
"tensorflow.clip_by_global_norm",
"tensorflow.losses.get_total_loss"
] |
[((20630, 20671), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""inference/loss"""', 'loss'], {}), "('inference/loss', loss)\n", (20647, 20671), True, 'import tensorflow as tf\n'), ((20676, 20721), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""inference/log_Px"""', 'log_Px'], {}), "('inference/log_Px', log_Px)\n", (20693, 20721), True, 'import tensorflow as tf\n'), ((20726, 20771), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""inference/log_Pz"""', 'log_Pz'], {}), "('inference/log_Pz', log_Pz)\n", (20743, 20771), True, 'import tensorflow as tf\n'), ((20776, 20821), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""inference/log_Qz"""', 'log_Qz'], {}), "('inference/log_Qz', log_Qz)\n", (20793, 20821), True, 'import tensorflow as tf\n'), ((20826, 20886), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""inference/Ldd"""', 'discrete_divergence_loss'], {}), "('inference/Ldd', discrete_divergence_loss)\n", (20843, 20886), True, 'import tensorflow as tf\n'), ((20891, 20943), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""inference/L*"""', 'additional_losses'], {}), "('inference/L*', additional_losses)\n", (20908, 20943), True, 'import tensorflow as tf\n'), ((20960, 20984), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (20982, 20984), True, 'import tensorflow as tf\n'), ((2862, 2875), 'numpy.zeros', 'np.zeros', (['[1]'], {}), '([1])\n', (2870, 2875), True, 'import numpy as np\n'), ((3993, 4024), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""generative"""'], {}), "('generative')\n", (4010, 4024), True, 'import tensorflow as tf\n'), ((4026, 4056), 'tensorflow.name_scope', 'tf.name_scope', (['"""unconditioned"""'], {}), "('unconditioned')\n", (4039, 4056), True, 'import tensorflow as tf\n'), ((9570, 9602), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""variational"""'], {}), "('variational')\n", (9587, 9602), True, 'import tensorflow as tf\n'), ((9604, 9632), 'tensorflow.name_scope', 'tf.name_scope', (['"""conditioned"""'], {}), "('conditioned')\n", (9617, 9632), True, 'import tensorflow as tf\n'), ((11489, 11532), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""generative"""'], {'reuse': '(True)'}), "('generative', reuse=True)\n", (11506, 11532), True, 'import tensorflow as tf\n'), ((11534, 11562), 'tensorflow.name_scope', 'tf.name_scope', (['"""conditioned"""'], {}), "('conditioned')\n", (11547, 11562), True, 'import tensorflow as tf\n'), ((12838, 12882), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""variational"""'], {'reuse': '(True)'}), "('variational', reuse=True)\n", (12855, 12882), True, 'import tensorflow as tf\n'), ((12884, 12906), 'tensorflow.name_scope', 'tf.name_scope', (['"""modes"""'], {}), "('modes')\n", (12897, 12906), True, 'import tensorflow as tf\n'), ((13731, 13774), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""generative"""'], {'reuse': '(True)'}), "('generative', reuse=True)\n", (13748, 13774), True, 'import tensorflow as tf\n'), ((13776, 13807), 'tensorflow.name_scope', 'tf.name_scope', (['"""reconstruction"""'], {}), "('reconstruction')\n", (13789, 13807), True, 'import tensorflow as tf\n'), ((13924, 13956), 'tensorflow.name_scope', 'tf.name_scope', (['"""integrated_klqp"""'], {}), "('integrated_klqp')\n", (13937, 13956), True, 'import tensorflow as tf\n'), ((20340, 20363), 'tensorflow.losses.add_loss', 'tf.losses.add_loss', (['(0.0)'], {}), '(0.0)\n', (20358, 20363), True, 'import tensorflow as tf\n'), ((20499, 20525), 'tensorflow.losses.get_total_loss', 'tf.losses.get_total_loss', ([], {}), '()\n', (20523, 20525), True, 'import tensorflow as tf\n'), ((21425, 21475), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['grads', 'grad_clip_magnitude'], {}), '(grads, grad_clip_magnitude)\n', (21447, 21475), True, 'import tensorflow as tf\n'), ((3184, 3203), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (3197, 3203), True, 'import tensorflow as tf\n'), ((6768, 6794), 'tensorflow.name_scope', 'tf.name_scope', (["('q_' + name)"], {}), "('q_' + name)\n", (6781, 6794), True, 'import tensorflow as tf\n'), ((10129, 10148), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (10142, 10148), True, 'import tensorflow as tf\n'), ((12419, 12445), 'tensorflow.name_scope', 'tf.name_scope', (["('q_' + name)"], {}), "('q_' + name)\n", (12432, 12445), True, 'import tensorflow as tf\n'), ((13330, 13349), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (13343, 13349), True, 'import tensorflow as tf\n'), ((17315, 17354), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['maybe_weighted_log_prob'], {}), '(maybe_weighted_log_prob)\n', (17329, 17354), True, 'import tensorflow as tf\n'), ((19921, 19961), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['discrete_z_probs'], {'axis': '(1)'}), '(discrete_z_probs, axis=1)\n', (19935, 19961), True, 'import tensorflow as tf\n'), ((20023, 20064), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['discrete_qz_probs'], {'axis': '(1)'}), '(discrete_qz_probs, axis=1)\n', (20037, 20064), True, 'import tensorflow as tf\n'), ((21263, 21288), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['abs_grads'], {}), '(abs_grads)\n', (21277, 21288), True, 'import tensorflow as tf\n'), ((21290, 21314), 'tensorflow.reduce_max', 'tf.reduce_max', (['abs_grads'], {}), '(abs_grads)\n', (21303, 21314), True, 'import tensorflow as tf\n'), ((19282, 19327), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["('Q(z_' + name + ')')", 'value'], {}), "('Q(z_' + name + ')', value)\n", (19299, 19327), True, 'import tensorflow as tf\n'), ((21081, 21103), 'tensorflow.reshape', 'tf.reshape', (['grad', '[-1]'], {}), '(grad, [-1])\n', (21091, 21103), True, 'import tensorflow as tf\n'), ((15040, 15102), 'tensorflow.reshape', 'tf.reshape', (['log_prob', '[sample_count, discrete_state_count, -1]'], {}), '(log_prob, [sample_count, discrete_state_count, -1])\n', (15050, 15102), True, 'import tensorflow as tf\n'), ((20210, 20254), 'tensorflow.abs', 'tf.abs', (['(discrete_z_probs - discrete_qz_probs)'], {}), '(discrete_z_probs - discrete_qz_probs)\n', (20216, 20254), True, 'import tensorflow as tf\n'), ((6403, 6449), 'edward1_utils.get_descendants', 'get_descendants', (['substituted_value', '[variable]'], {}), '(substituted_value, [variable])\n', (6418, 6449), False, 'from edward1_utils import get_ancestors, get_descendants\n'), ((15191, 15224), 'tensorflow.transpose', 'tf.transpose', (['log_prob', '[1, 0, 2]'], {}), '(log_prob, [1, 0, 2])\n', (15203, 15224), True, 'import tensorflow as tf\n'), ((7557, 7629), 'edward1_utils.get_descendants', 'get_descendants', (['name_to_substituted_value[observation_name]', '[variable]'], {}), '(name_to_substituted_value[observation_name], [variable])\n', (7572, 7629), False, 'from edward1_utils import get_ancestors, get_descendants\n')]
|
# This code calculates compressibility factor (z-factor) for natural hydrocarbon gases
# with 3 different methods. It is the outcomes of the following paper:
# <br>
# <NAME>.; <NAME>., <NAME>.; <NAME>. & <NAME>, <NAME>.
# Using artificial neural networks to estimate the Z-Factor for natural hydrocarbon gases
# Journal of Petroleum Science and Engineering, 2010, 73, 248-257
# <br>
# The original paper can be found at:
# <a href="http://www.sciencedirect.com/science/article/pii/S0920410510001427">here</a>.
# <p>
# Artificial Neural Network (ANN)has been applied and two accurate non-iterative methods are presented.
# The Dranchuk and Abou-Kassem equation of state model, which is an iterative method, is
# also presented here for comparison. All the methods are:
# <ul>
# <li> ANN10: this method is the most accurate ANN method that presented in the paper.
# <li> ANN5: this method is the next accurate ANN method that presented in the paper.
# <li> DAK: this is the Dranchuk and Abou-Kassem equation of state.
# </ul>
#
# @author <a href="mailto:<EMAIL>"><NAME></a>
# @author <a href="mailto:<EMAIL>"><NAME>.</a>
import numpy as np
class CalculateZFactor:
# Minimum and Maximum values used in the neural network to normalize the input and output values.
def __init__(self):
pass
Ppr_min = 0
Ppr_max = 30
Tpr_min = 1
Tpr_max = 3
Z_min = 0.25194
Z_max = 2.66
# -------------START OF NETWORK 2-5-5-1 STRUCTURE-------------
# Weights and Biases for the 1st layer of neurons
wb1_5 = [
[-1.5949, 7.9284, 7.2925],
[-1.7917, 1.2117, 2.221],
[5.3547, -4.5424, -0.9846],
[4.6209, 2.2228, 8.9966],
[-2.3577, -0.1499, -1.5063]
]
# Weights and Biases for the 2nd layer of neurons
wb2_5 = [
[2.3617, -4.0858, 1.2062, -1.1518, -1.2915, 2.0626],
[10.0141, 9.8649, -11.4445, -123.0698, 7.5898, 95.1393],
[10.4103, 14.1358, -10.9061, -125.5468, 6.3448, 93.8916],
[-1.7794, 14.0742, -1.4195, 12.0894, -15.4537, -9.9439],
[-0.5988, -0.4354, -0.336, 9.9429, -0.4029, -8.3371]
]
# Weights and Biases for the 3rd layer of neurons
wb3_5 = [1.4979, -37.466, 37.7958, -7.7463, 6.9079, 2.8462]
# -------------END OF NETWORK 2-5-5-1 STRUCTURE-------------
# -------------START OF NETWORK 2-10-10-1 STRUCTURE-------------
# Weights and Biases for the 1st layer of neurons
wb1_10 = [
[2.2458, -2.2493, -3.7801],
[3.4663, 8.1167, -14.9512],
[5.0509, -1.8244, 3.5017],
[6.1185, -0.2045, 0.3179],
[1.3366, 4.9303, 2.2153],
[-2.8652, 1.1679, 1.0218],
[-6.5716, -0.8414, -8.1646],
[-6.1061, 12.7945, 7.2201],
[13.0884, 7.5387, 19.2231],
[70.7187, 7.6138, 74.6949]
]
# Weights and Biases for the 2nd layer of neurons
wb2_10 = [
[4.674, 1.4481, -1.5131, 0.0461, -0.1427, 2.5454, -6.7991, -0.5948, -1.6361, 0.5801, -3.0336],
[-6.7171, -0.7737, -5.6596, 2.975, 14.6248, 2.7266, 5.5043, -13.2659, -0.7158, 3.076, 15.9058],
[7.0753, -3.0128, -1.1779, -6.445, -1.1517, 7.3248, 24.7022, -0.373, 4.2665, -7.8302, -3.1938],
[2.5847, -12.1313, 21.3347, 1.2881, -0.2724, -1.0393, -19.1914, -0.263, -3.2677, -12.4085, -10.2058],
[-19.8404, 4.8606, 0.3891, -4.5608, -0.9258, -7.3852, 18.6507, 0.0403, -6.3956, -0.9853, 13.5862],
[16.7482, -3.8389, -1.2688, 1.9843, -0.1401, -8.9383, -30.8856, -1.5505, -4.7172, 10.5566, 8.2966],
[2.4256, 2.1989, 18.8572, -14.5366, 11.64, -19.3502, 26.6786, -8.9867, -13.9055, 5.195, 9.7723],
[-16.388, 12.1992, -2.2401, -4.0366, -0.368, -6.9203, -17.8283, -0.0244, 9.3962, -1.7107, -1.0572],
[14.6257, 7.5518, 12.6715, -12.7354, 10.6586, -43.1601, 1.3387, -16.3876, 8.5277, 45.9331, -6.6981],
[-6.9243, 0.6229, 1.6542, -0.6833, 1.3122, -5.588, -23.4508, 0.5679, 1.7561, -3.1352, 5.8675]
]
# Weights and Biases for the 3rd layer of neurons
wb3_10 = [-30.1311, 2.0902, -3.5296, 18.1108, -2.528, -0.7228, 0.0186, 5.3507, -0.1476, -5.0827, 3.9767]
# -------------END OF NETWORK 2-10-10-1 STRUCTURE-------------
# input and output of the 1st layer in 2-5-5-1 network. [,0] ==> inputs, [,1] ==> outputs
n1_5 = np.zeros((5, 2))
# input and output of the 2nd layer in 2-5-5-1 network. [,0] ==> inputs, [,1] ==> outputs
n2_5 = np.zeros((5, 2))
# input and output of the 1st layer in 2-10-10-1 network. [,0] ==> inputs, [,1] ==> outputs
n1_10 = np.zeros((10, 2))
# input and output of the 2nd layer in 2-10-10-1 network. [,0] ==> inputs, [,1] ==> outputs
n2_10 = np.zeros((10, 2))
TOLERANCE = 0.0001 # tolerance of DAK
MAX_NO_Iterations = 20 # Max number of iterations for DAK
def ANN10(self, Ppr: float, Tpr: float) -> float:
"""
his method calculates the z-factor using a 2x10x10x1 Artificial Neural Network
based on training data obtained from Standing-Katz and Katz charts.
It always produces a result, but accuracy is controlled for 0<Ppr<30 and 1<Tpr<3
:param Ppr: pseudo-reduced pressure
:param Tpr: pseudo-reduced temperature
:return: z factor
"""
Ppr_n = 2.0 / (self.Ppr_max - self.Ppr_min) * (Ppr - self.Ppr_min) - 1.0
Tpr_n = 2.0 / (self.Tpr_max - self.Tpr_min) * (Tpr - self.Tpr_min) - 1.0
for i in range(10):
self.n1_10[i][0] = Ppr_n * self.wb1_10[i][0] + Tpr_n * self.wb1_10[i][1] + self.wb1_10[i][2]
self.n1_10[i][1] = log_sig(self.n1_10[i][0])
for i in range(10):
self.n2_10[i][0] = 0
for j in range(len(self.n2_10)):
self.n2_10[i][0] += self.n1_10[j][1] * self.wb2_10[i][j]
self.n2_10[i][0] += self.wb2_10[i][10] # adding the bias value
self.n2_10[i][1] = log_sig(self.n2_10[i][0])
z_n = 0
for j in range(len(self.n2_10)):
z_n += self.n2_10[j][1] * self.wb3_10[j]
z_n += self.wb3_10[10] # adding the bias value
zAnn10 = (z_n + 1) * (self.Z_max - self.Z_min) / 2 + self.Z_min # reverse normalization of normalized z factor.
return zAnn10
def ANN5(self, Ppr: float, Tpr: float) -> float:
"""
This method calculates the z-factor using a 2x5x5x1 Artificial Neural Network
based on training data obtained from Standing-Katz and Katz charts.
It always produces a result, but accuracy is controlled for 0<Ppr<30 and 1<Tpr<3
:param Ppr: pseudo-reduced pressure
:param Tpr: pseudo-reduced temperature
:return: z factor
"""
Ppr_n = 2.0 / (self.Ppr_max - self.Ppr_min) * (Ppr - self.Ppr_min) - 1.0
Tpr_n = 2.0 / (self.Tpr_max - self.Tpr_min) * (Tpr - self.Tpr_min) - 1.0
for i in range(5):
self.n1_5[i][0] = Ppr_n * self.wb1_5[i][0] + Tpr_n * self.wb1_5[i][1] + self.wb1_5[i][2]
self.n1_5[i][1] = log_sig(self.n1_5[i][0])
for i in range(5):
self.n2_5[i][0] = 0
for j in range(len(self.n2_5)):
self.n2_5[i][0] += self.n1_5[j][1] * self.wb2_5[i][j]
self.n2_5[i][0] += self.wb2_5[i][5] # adding the bias value
self.n2_5[i][1] = log_sig(self.n2_5[i][0])
z_n = 0
for j in range(len(self.n2_5)):
z_n += self.n2_5[j][1] * self.wb3_5[j]
z_n += self.wb3_5[5] # adding the bias value
zAnn5 = (z_n + 1) * (
self.Z_max - self.Z_min) / 2 + self.Z_min # reverse normalization of normalized z factor.
return zAnn5
def DAK(self, Ppr: float, Tpr: float) -> float:
"""
This method calculates the z-factor using Dranchuk and Abou-Kassem (DAK) method.
:param Ppr: pseudo-reduced pressure
:param Tpr: pseudo-reduced temperature
:return: z factor
"""
A1 = 0.3265
A2 = -1.07
A3 = -0.5339
A4 = 0.01569
A5 = -0.05165
A6 = 0.5475
A7 = -0.7361
A8 = 0.1844
A9 = 0.1056
A10 = 0.6134
A11 = 0.721
z_new = 1.0
z_old = 1.0
den = calculate_density(Ppr, Tpr, z_old)
for i in range(1, self.MAX_NO_Iterations + 1):
z_old = z_new
z_new = 1 + \
(A1 + A2 / Tpr + A3 / Tpr ** 3 + A4 / Tpr ** 4 + A5 / Tpr ** 5) * den + \
(A6 + A7 / Tpr + A8 / Tpr ** 2) * den ** 2 - \
A9 * (A7 / Tpr + A8 / Tpr ** 2) * den ** 5 + \
A10 * (1 + A11 * den ** 2) * den ** 2 / Tpr ** 3 * np.exp(-1 * A11 * den ** 2)
den = calculate_density(Ppr, Tpr, z_new)
if np.abs(z_new - z_old) < self.TOLERANCE:
break
zDAK = z_new
return zDAK
def log_sig(x):
return 1 / (1 + np.exp(-1 * x))
def calculate_density(pr: float, tr: float, z: float):
return 0.27 * pr / tr / z
|
[
"numpy.abs",
"numpy.zeros",
"numpy.exp"
] |
[((4319, 4335), 'numpy.zeros', 'np.zeros', (['(5, 2)'], {}), '((5, 2))\n', (4327, 4335), True, 'import numpy as np\n'), ((4441, 4457), 'numpy.zeros', 'np.zeros', (['(5, 2)'], {}), '((5, 2))\n', (4449, 4457), True, 'import numpy as np\n'), ((4567, 4584), 'numpy.zeros', 'np.zeros', (['(10, 2)'], {}), '((10, 2))\n', (4575, 4584), True, 'import numpy as np\n'), ((4693, 4710), 'numpy.zeros', 'np.zeros', (['(10, 2)'], {}), '((10, 2))\n', (4701, 4710), True, 'import numpy as np\n'), ((8915, 8929), 'numpy.exp', 'np.exp', (['(-1 * x)'], {}), '(-1 * x)\n', (8921, 8929), True, 'import numpy as np\n'), ((8772, 8793), 'numpy.abs', 'np.abs', (['(z_new - z_old)'], {}), '(z_new - z_old)\n', (8778, 8793), True, 'import numpy as np\n'), ((8674, 8701), 'numpy.exp', 'np.exp', (['(-1 * A11 * den ** 2)'], {}), '(-1 * A11 * den ** 2)\n', (8680, 8701), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 6 14:52:32 2021
@author: Patrice
Simple utility script to read tiles from drive and compile a large tensor saved as an npy file.
Use only if you have enough ram to contain all your samples at once
"""
import numpy as np
import glob
import skimage.io as io
def tic():
#Homemade version of matlab tic and toc functions
import time
global startTime_for_tictoc
startTime_for_tictoc = time.time()
def toc():
import time
if 'startTime_for_tictoc' in globals():
print ("Elapsed time is " + str(time.time() - startTime_for_tictoc) + " seconds.")
else:
print ("Toc: start time not set")
tic()
folder='/media/patrice/DataDrive/SEE_ICE/JointTrain/'
OutputName='JointTensor5k'
tilesize=50
bands=4
classes=7
subsample=1#percentage subsample in each class
NormFactor=8192 #will save a normalised tensor ready for the CNN, better for memory to normalise now
UINT8=False #if true this will overide NormFactor and reduce the radiometry to 8-bit via normalisation by 16384
FP16=True #cast final tensor in float 16 for mixed precision training
Itot=0
for c in range(1,classes+1):
class_folder=folder+'C'+str(c)+'/'
clist=glob.glob(class_folder+'*.tif')
Itot=Itot+len(clist)
print ('found '+str(Itot)+' tile samples')
MasterTensor=np.zeros((int(subsample*Itot),tilesize,tilesize,bands), dtype='float16')
MasterLabel=np.zeros((int(subsample*Itot)), dtype='float16')
tile=0
for c in range(1,classes+1):
class_folder=folder+'C'+str(c)+'/'
clist=glob.glob(class_folder+'*.tif')
idx = np.random.choice(np.arange(len(clist)), int(len(clist)*subsample), replace=False)
for i in range(len(idx)):
I=io.imread(clist[idx[i]]).reshape((1,tilesize,tilesize,bands))
Label=c
MasterLabel[tile] = Label
if UINT8 and not(FP16):
MasterTensor=np.uint8(MasterTensor)
MasterTensor[tile,:,:,:] = np.uint8(255*I/16384)
elif FP16 and UINT8:
MasterTensor=np.float16(MasterTensor)
I= np.uint8(255*I/16384)
MasterTensor[tile,:,:,:]=np.float16(I/255)
elif not(UINT8) and FP16:
MasterTensor=np.float16(MasterTensor)
MasterTensor[tile,:,:,:]=np.float16(I/NormFactor)
else:
MasterTensor=np.int16(MasterTensor)
MasterTensor[tile,:,:,:]=np.int16(I)
tile+=1
print('Class '+str(c)+' compiled')
if UINT8 and not(FP16):#downsample radiometry and save as uint8
np.save(folder+OutputName+'_T_uint8',MasterTensor)
np.save(folder+OutputName+'_L_uint8',MasterLabel)
elif FP16 and UINT8:#data will be float 16, but first they have been downsampled to 8bit before normalisation
np.save(folder+OutputName+'_T_uint8float16',MasterTensor)
np.save(folder+OutputName+'_L_uint8float16',MasterLabel)
elif not(UINT8) and FP16:
np.save(folder+OutputName+'_T_float16',MasterTensor)
np.save(folder+OutputName+'_L_float16',MasterLabel)
else:
np.save(folder+OutputName+'_T_int16',MasterTensor)
np.save(folder+OutputName+'_L_int16',MasterLabel)
#Output as npy arrays for both the tensor and the label
toc()
|
[
"numpy.float16",
"numpy.uint8",
"numpy.save",
"time.time",
"glob.glob",
"numpy.int16",
"skimage.io.imread"
] |
[((448, 459), 'time.time', 'time.time', ([], {}), '()\n', (457, 459), False, 'import time\n'), ((1210, 1243), 'glob.glob', 'glob.glob', (["(class_folder + '*.tif')"], {}), "(class_folder + '*.tif')\n", (1219, 1243), False, 'import glob\n'), ((1549, 1582), 'glob.glob', 'glob.glob', (["(class_folder + '*.tif')"], {}), "(class_folder + '*.tif')\n", (1558, 1582), False, 'import glob\n'), ((2535, 2590), 'numpy.save', 'np.save', (["(folder + OutputName + '_T_uint8')", 'MasterTensor'], {}), "(folder + OutputName + '_T_uint8', MasterTensor)\n", (2542, 2590), True, 'import numpy as np\n'), ((2590, 2644), 'numpy.save', 'np.save', (["(folder + OutputName + '_L_uint8')", 'MasterLabel'], {}), "(folder + OutputName + '_L_uint8', MasterLabel)\n", (2597, 2644), True, 'import numpy as np\n'), ((2760, 2822), 'numpy.save', 'np.save', (["(folder + OutputName + '_T_uint8float16')", 'MasterTensor'], {}), "(folder + OutputName + '_T_uint8float16', MasterTensor)\n", (2767, 2822), True, 'import numpy as np\n'), ((2822, 2883), 'numpy.save', 'np.save', (["(folder + OutputName + '_L_uint8float16')", 'MasterLabel'], {}), "(folder + OutputName + '_L_uint8float16', MasterLabel)\n", (2829, 2883), True, 'import numpy as np\n'), ((1890, 1912), 'numpy.uint8', 'np.uint8', (['MasterTensor'], {}), '(MasterTensor)\n', (1898, 1912), True, 'import numpy as np\n'), ((1952, 1977), 'numpy.uint8', 'np.uint8', (['(255 * I / 16384)'], {}), '(255 * I / 16384)\n', (1960, 1977), True, 'import numpy as np\n'), ((2919, 2976), 'numpy.save', 'np.save', (["(folder + OutputName + '_T_float16')", 'MasterTensor'], {}), "(folder + OutputName + '_T_float16', MasterTensor)\n", (2926, 2976), True, 'import numpy as np\n'), ((2976, 3032), 'numpy.save', 'np.save', (["(folder + OutputName + '_L_float16')", 'MasterLabel'], {}), "(folder + OutputName + '_L_float16', MasterLabel)\n", (2983, 3032), True, 'import numpy as np\n'), ((3045, 3100), 'numpy.save', 'np.save', (["(folder + OutputName + '_T_int16')", 'MasterTensor'], {}), "(folder + OutputName + '_T_int16', MasterTensor)\n", (3052, 3100), True, 'import numpy as np\n'), ((3100, 3154), 'numpy.save', 'np.save', (["(folder + OutputName + '_L_int16')", 'MasterLabel'], {}), "(folder + OutputName + '_L_int16', MasterLabel)\n", (3107, 3154), True, 'import numpy as np\n'), ((1721, 1745), 'skimage.io.imread', 'io.imread', (['clist[idx[i]]'], {}), '(clist[idx[i]])\n', (1730, 1745), True, 'import skimage.io as io\n'), ((2028, 2052), 'numpy.float16', 'np.float16', (['MasterTensor'], {}), '(MasterTensor)\n', (2038, 2052), True, 'import numpy as np\n'), ((2068, 2093), 'numpy.uint8', 'np.uint8', (['(255 * I / 16384)'], {}), '(255 * I / 16384)\n', (2076, 2093), True, 'import numpy as np\n'), ((2127, 2146), 'numpy.float16', 'np.float16', (['(I / 255)'], {}), '(I / 255)\n', (2137, 2146), True, 'import numpy as np\n'), ((2204, 2228), 'numpy.float16', 'np.float16', (['MasterTensor'], {}), '(MasterTensor)\n', (2214, 2228), True, 'import numpy as np\n'), ((2266, 2292), 'numpy.float16', 'np.float16', (['(I / NormFactor)'], {}), '(I / NormFactor)\n', (2276, 2292), True, 'import numpy as np\n'), ((2330, 2352), 'numpy.int16', 'np.int16', (['MasterTensor'], {}), '(MasterTensor)\n', (2338, 2352), True, 'import numpy as np\n'), ((2390, 2401), 'numpy.int16', 'np.int16', (['I'], {}), '(I)\n', (2398, 2401), True, 'import numpy as np\n'), ((572, 583), 'time.time', 'time.time', ([], {}), '()\n', (581, 583), False, 'import time\n')]
|
"""
Modular arithmetic
"""
from collections import defaultdict
import numpy as np
class ModInt:
"""
Integers of Z/pZ
"""
def __init__(self, a, n):
self.v = a % n
self.n = n
def __eq__(a, b):
if isinstance(b, ModInt):
return not bool(a - b)
else:
return NotImplemented
def __hash__(self):
return hash((self.v, self.n))
def __bool__(self):
return bool(self.v)
def __add__(a, b):
assert isinstance(b, ModInt)
assert a.n == b.n
return ModInt(a.v + b.v, a.n)
def __radd__(a, b):
assert isinstance(b, int)
return ModInt(a.v + b, a.n)
def __neg__(a): return ModInt(-a.v, a.n)
def __sub__(a, b): return ModInt(a.v - b.v, a.n)
def __mul__(a, b):
if isinstance(b, int):
return ModInt(b * a.v, a.n)
elif isinstance(b, ModInt):
assert a.n == b.n
return ModInt(a.v * b.v, a.n)
return NotImplemented
def __rmul__(a, b):
return a * b
def __pow__(P, k):
assert isinstance(k, int)
V = 1
A = P
while k:
if k & 1:
V *= A
k >>= 1
if not k:
break
A *= A
return V
def inv(self):
if self.v == 0:
raise ZeroDivisionError
return ModInt(ModInt._inv(self.v, self.n), self.n)
@staticmethod
def _inv(k, n):
k %= n
if k == 1:
return k
return (n - n // k) * ModInt._inv(n % k, n) % n
def __truediv__(a, b):
assert isinstance(b, ModInt)
assert a.n == b.n
return a * b.inv()
def __rtruediv__(a, k):
assert isinstance(k, int)
return ModInt(k, a.n) / a
@staticmethod
def extended_euclid(a, b):
"""Extended Euclid algorithm
Return
------
x : int
y : int
a * x + b * y = gcd(a, b)
"""
A, B = a, b
sa, sb = (1 if a >= 0 else -1), (1 if b >= 0 else -1)
xp, yp = 1, 0
x, y = 0, 1
while b:
assert A * xp + B * yp == a
assert A * x + B * y == b
r = a // b
a, b = b, a % b
x, xp = xp - r * x, x
y, yp = yp - r * y, y
return sa * xp, sb * yp
def __repr__(self):
return '%s(%s, %s)' % (self.__class__.__name__, self.v, self.n)
def __str__(self):
return '%s' % self.v
class Polynomial:
"""
Generic class for polynomials
Works with int, float and ModInt
"""
def __len__(self):
return len(self.C)
def trim(C):
i = len(C) - 1
while i >= 0 and not C[i]:
i -= 1
return C[:i + 1]
def __init__(self, C=None):
if C is None:
C = []
self.C = Polynomial.trim(C)
@property
def deg(self):
return len(self.C) - 1
def prime(self): return Polynomial([i * self[i]
for i in range(1, len(self))])
def eval(self, x):
if not self:
return 0
v = self[-1]
for c in self[-2::-1]:
v = v * x + c
return v
def shift(self, d): return Polynomial(
[0 * self[0]] * d + self.C if self else [])
def __eq__(P, Q):
return P.deg == Q.deg and all(cP == cQ for cP, cQ in zip(P, Q))
def __hash__(self):
return hash(tuple(self.C))
def __call__(self, x): return Polynomial.eval(self, x)
def __getitem__(self, x): return self.C[x]
def __neg__(P): return Polynomial([-c for c in P.C])
def __add__(P, Q):
if len(P.C) < len(Q.C):
P, Q = Q, P
return Polynomial([P[d] + Q[d] for d in range(len(Q))] + P[len(Q):])
def __sub__(P, Q): return P + (-Q)
def _mulpoly(P, Q):
assert isinstance(Q, Polynomial)
return Polynomial([sum(P[k] * Q[d - k]
for k in range(max(0, d + 1 - len(Q)),
min(d + 1, len(P)))
) for d in range(len(P) + len(Q) - 1)])
def _mulscal(P, k):
return Polynomial([k * c for c in P])
def __mul__(P, Q):
if isinstance(Q, Polynomial):
return P._mulpoly(Q)
return P._mulscal(Q)
def __rmul__(P, Q):
return P * Q
def __pow__(P, k):
assert isinstance(k, int)
V = 1
A = P
while k:
if k & 1:
V *= A
k >>= 1
if not k:
break
A *= A
return V
def __iter__(self):
yield from self.C
def euclidean_division(A, B):
Q = [0 * B[0]] * max(0, len(A) - len(B) + 1)
while len(A.C) >= len(B.C):
Q[len(A.C) - len(B.C)] = A[-1] / B[-1]
A -= B.shift(len(A) - len(B)) * (A[-1] / B[-1])
return Polynomial(Q), A
def __floordiv__(A, B):
assert isinstance(B, Polynomial)
return A.euclidean_division(B)[0]
def __mod__(A, B):
"""
Polynomial euclidian division
or modular reduction
"""
if isinstance(B, Polynomial):
return A.euclidean_division(B)[1]
else:
assert isinstance(B, int)
assert all(isinstance(c, int) for c in A)
return A.reduceP(B)
def __lt__(A, B): return A.deg < B.deg
def __bool__(self): return bool(self.C)
def gcd(A, B):
while B:
A, B = B, A % B
return A * (1 / A[-1])
@staticmethod
def gaussianElimKer(M, zero, one):
"""
Outputs an element of the kernel of M
zero and one are elements of the same field
"""
# V satisfies the invariant
# M = V M_0
V = [Polynomial([zero] * i + [one]) for i in range(len(M))]
pivots = [None] * (len(M) + 1)
for l in range(len(M)):
while M[l].deg >= 0:
idp = M[l].deg
if pivots[idp] is None:
pivots[idp] = l
break
else:
c = M[l][idp] / M[pivots[idp]][idp]
M[l] -= c * M[pivots[idp]]
V[l] -= c * V[pivots[idp]]
else:
# If a line is null, we found an element of the kernel
return V[l]
return None
def computeQ(P):
# only for Z/pZ[X] square-free polynoms, for p prime
p = P[0].n
# We ignore the image of 1 because (F-Id)(1) = 0
M = [Polynomial(([ModInt(0, p)] * (i * p)) + [ModInt(1, p)]) % P
for i in range(1, P.deg)]
# M -= Id
for i in range(1, P.deg):
M[i - 1] -= Polynomial([ModInt(0, p)] * i + [ModInt(1, p)])
# We find an element of the kernel by Gaussian elimination
pQ = Polynomial.gaussianElimKer(M, ModInt(0, p), ModInt(1, p))
# We put back the 1 tha was removed
return pQ.shift(1) if pQ is not None else None
def factor_unit(P):
"""
Berlekamp's algorithm
only in Z/pZ
"""
assert all(isinstance(c, ModInt) for c in P)
assert len(set(c.n for c in P)) == 1
if P.deg == 1:
return defaultdict(int, {P: 1})
p = P[0].n
S = Polynomial.gcd(P, P.prime())
if S.deg == P.deg:
# P' = 0 so P = R^p
R = Polynomial(P.C[::p])
return defaultdict(int,
{D: p * v
for D, v in Polynomial.factor_unit(R).items()})
else:
factors = defaultdict(int)
if S.deg:
for D, v in S.factor_unit().items():
factors[D] += v
P //= S
# P is now square-free
# We look for Q in Ker(F-Id) \ {1}
Q = Polynomial.computeQ(P)
if Q is None:
# P is irreducible
factors[P] += 1
else:
# P is the product of the gcd(P, Q-i)
# that are factored recursively
for i in range(p):
D = Polynomial.gcd(P, Q - Polynomial([ModInt(i, p)]))
if D.deg:
for DD, v in D.factor_unit().items():
factors[DD] += v
return factors
def factor(P):
"""
Factorization of P
only in Z/pZ
"""
cd = P[-1]
if P.deg == 0:
return (cd, defaultdict(int))
P = P * (1 / cd)
return (cd, P.factor_unit())
@staticmethod
def ppfactors(fz):
c, Ds = fz
a = str(c) if not Ds or c * c != c else ''
l = [a] + [(str(D) if D.deg == 1 and not D[0] else ('(%s)' % D))
+ (v > 1) * ('^%s' % v)
for D, v in sorted(Ds.items(),
key=lambda e: (e[0].deg, e[1]))]
return '⋅'.join(i for i in l if i)
def reduceP(P, p):
return Polynomial([ModInt(c, p) for c in P])
@staticmethod
def sign_changes(l):
return sum(a * b < 0 for a, b in zip(l, l[1:]))
def isreal(P):
return not any(isinstance(c, ModInt) for c in P)
def isinteger(P):
return all(isinstance(c, int) for c in P)
def sturm(P):
"""
Number of distinct real roots
by Sturm's theorem.
Only works on int or float coefficients
"""
inf = float('inf')
assert P.isreal()
A = P
B = A.prime()
l1 = [A(-inf)]
l2 = [A(inf)]
while B:
l1.append(B(-inf))
l2.append(B(inf))
B, A = -A % B, B
return Polynomial.sign_changes(l1) - Polynomial.sign_changes(l2)
@property
def r1(P):
"""
Number of real roots with multiplicity
"""
assert P.isreal()
ans = 0
s = P.sturm()
while s:
ans += s
P = P.gcd(P.prime())
s = P.sturm()
return ans
@property
def r2(P):
ans = P.deg - P.r1
assert ans % 2 == 0
return ans // 2
def sylvester(P, Q):
"""
Sylvester's matrix
"""
assert P.isreal()
assert Q.isreal()
p = P.deg
q = Q.deg
P = np.array(P)
Q = np.array(Q)
m = np.zeros((p + q, p + q))
for i in range(q):
m[i][i:i + p + 1] = P
for i in range(p):
m[q + i][i:i + q + 1] = Q
return m
def resultant(P, Q):
"""
Resultant of two real polynomials
"""
return np.linalg.det(P.sylvester(Q))
@property
def disc(P):
"""
Discriminant of a real polynomial
"""
ans = P.resultant(P.prime()) / P[-1]
if P.isinteger():
ans = int(ans.round())
if P.deg % 4 in [0, 1]:
return ans
else:
return -ans
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.C)
@staticmethod
def _formatmonomial(c, d):
assert c
a = b = ''
if c * c != c or not d:
a = str(c) + (d != 0) * '⋅'
if d > 1:
b = 'X^' + str(d)
elif d == 1:
b = 'X'
return a + b
def __str__(self):
if not self.C:
return "0"
ans = '+'.join(self._formatmonomial(c, d)
for (d, c) in reversed(list(enumerate(self))) if c)
return ans.replace("+-", "-").replace('-1⋅', '-')
|
[
"numpy.zeros",
"collections.defaultdict",
"numpy.array"
] |
[((10508, 10519), 'numpy.array', 'np.array', (['P'], {}), '(P)\n', (10516, 10519), True, 'import numpy as np\n'), ((10532, 10543), 'numpy.array', 'np.array', (['Q'], {}), '(Q)\n', (10540, 10543), True, 'import numpy as np\n'), ((10556, 10580), 'numpy.zeros', 'np.zeros', (['(p + q, p + q)'], {}), '((p + q, p + q))\n', (10564, 10580), True, 'import numpy as np\n'), ((7378, 7402), 'collections.defaultdict', 'defaultdict', (['int', '{P: 1}'], {}), '(int, {P: 1})\n', (7389, 7402), False, 'from collections import defaultdict\n'), ((7754, 7770), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (7765, 7770), False, 'from collections import defaultdict\n'), ((8671, 8687), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (8682, 8687), False, 'from collections import defaultdict\n')]
|
import numpy as np
class Average:
@staticmethod
def aggregate(gradients):
assert len(gradients) > 0, "Empty list of gradient to aggregate"
if len(gradients) > 1:
return np.mean(gradients, axis=0)
else:
return gradients[0]
|
[
"numpy.mean"
] |
[((209, 235), 'numpy.mean', 'np.mean', (['gradients'], {'axis': '(0)'}), '(gradients, axis=0)\n', (216, 235), True, 'import numpy as np\n')]
|
# *******************************************************************************
#
# Copyright (c) 2021 <NAME>. All rights reserved.
#
# *******************************************************************************
import math, numpy
from coppertop.pipe import *
from coppertop.std.linalg import tvarray
@coppertop
def cov(A:tvarray) -> tvarray:
return numpy.cov(A).view(tvarray)
@coppertop
def mean(ndOrPy):
# should do full numpy?
return numpy.mean(ndOrPy)
@coppertop
def std(ndOrPy, dof=0):
# should do full numpy? std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=<no value>)
return numpy.std(ndOrPy, dof)
@coppertop
def logisticCDF(x, mu, s):
return 1 / (1 + math.exp(-1 * (x - mu) / s))
@coppertop
def logisticCDFInv(p, mu, s):
return mu + -s * math.log(1 / p - 1)
|
[
"math.exp",
"numpy.std",
"numpy.mean",
"math.log",
"numpy.cov"
] |
[((464, 482), 'numpy.mean', 'numpy.mean', (['ndOrPy'], {}), '(ndOrPy)\n', (474, 482), False, 'import math, numpy\n'), ((627, 649), 'numpy.std', 'numpy.std', (['ndOrPy', 'dof'], {}), '(ndOrPy, dof)\n', (636, 649), False, 'import math, numpy\n'), ((368, 380), 'numpy.cov', 'numpy.cov', (['A'], {}), '(A)\n', (377, 380), False, 'import math, numpy\n'), ((709, 736), 'math.exp', 'math.exp', (['(-1 * (x - mu) / s)'], {}), '(-1 * (x - mu) / s)\n', (717, 736), False, 'import math, numpy\n'), ((801, 820), 'math.log', 'math.log', (['(1 / p - 1)'], {}), '(1 / p - 1)\n', (809, 820), False, 'import math, numpy\n')]
|
import numpy as np
from int_tabulated import *
def GetNDVItoDate(NDVI, Time, Start_End, bpy, DaysPerBand, CurrentBand):
#;
#;jzhu,8/9/2011,This program calculates total ndvi integration (ndvi*day) from start of season to currentband, the currentband is the dayindex of interesting day.
#
FILL=-1.0
ny=1
#;DaysPerBand=365./bpy
NowT=CurrentBand #CurrentBand is the index of NDVI, the index start from 0
NowN=NDVI[NowT]
SeasonLength=NowT-Start_End['SOST'][0]
NDVItoDate=np.zeros(ny)+FILL
if SeasonLength < 0:
SeasonLength = FILL
if SeasonLength > 0 and SeasonLength < bpy: #<2>
#index range
segl=int(np.ceil(Start_End['SOST'][0]))
segh=int(np.floor(NowT )) + 1
XSeg= Time[ segl: segh ] #Xseg[Start_End['SOST'][0]:NowT]
NDVILine= NDVI[ segl : segh ]
#if XSeg[0] != Start_End['SOST'][0]: #<3>
# XSeg = np.concatenate([ np.array( [Start_End['SOST'][0] ] ), XSeg])
# NDVILine = np.concatenate([ np.array([ Start_End['SOSN'][0] ] ), NDVILine])
#<3>
#if XSeg[len(XSeg)-1] != NowT : #<4>
# XSeg = np.concatenate( [XSeg, np.array([NowT]) ] )
# NDVILine= np.concatenate( [NDVILine, np.array([NowN]) ] )
#<4>
BaseLine=XSeg*0+Start_End['SOSN'][0]
# get rid of duplicated point and sort the XSeg
XSeg, index=np.unique(XSeg,return_index=True)
NDVILine=NDVILine[index]
BaseLine=BaseLine[index]
IntNDVI=Int_Tabulated(XSeg, NDVILine)
IntBase=Int_Tabulated(XSeg, BaseLine)
NDVItoDate[0]=(IntNDVI-IntBase)*DaysPerBand
else: #<2>
NDVItoDate[0]=FILL
NDVItoDate={'NDVItoDate':NDVItoDate[0],'NowT':NowT,'NowN':NowN}
return NDVItoDate
|
[
"numpy.floor",
"numpy.zeros",
"numpy.ceil",
"numpy.unique"
] |
[((529, 541), 'numpy.zeros', 'np.zeros', (['ny'], {}), '(ny)\n', (537, 541), True, 'import numpy as np\n'), ((1508, 1542), 'numpy.unique', 'np.unique', (['XSeg'], {'return_index': '(True)'}), '(XSeg, return_index=True)\n', (1517, 1542), True, 'import numpy as np\n'), ((703, 732), 'numpy.ceil', 'np.ceil', (["Start_End['SOST'][0]"], {}), "(Start_End['SOST'][0])\n", (710, 732), True, 'import numpy as np\n'), ((754, 768), 'numpy.floor', 'np.floor', (['NowT'], {}), '(NowT)\n', (762, 768), True, 'import numpy as np\n')]
|
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from brightics.common.repr import BrtcReprBuilder, strip_margin, plt2MD, \
pandasDF2MD, keyValues2MD
from brightics.common.groupby import _function_by_group
from brightics.common.utils import check_required_parameters
from brightics.common.utils import get_default_from_parameters_if_required
from brightics.common.validation import validate
from brightics.common.validation import from_under
import pandas as pd
import numpy as np
from scipy.stats import mannwhitneyu
import itertools
def mann_whitney_test(table, group_by=None, **params):
check_required_parameters(_mann_whitney_test, params, ['table'])
params = get_default_from_parameters_if_required(params, _mann_whitney_test)
if group_by is not None:
return _function_by_group(_mann_whitney_test, table, group_by=group_by, **params)
else:
return _mann_whitney_test(table, **params)
def _mann_whitney_test(table, response_col, factor_col, use_continuity=True):
result = dict()
rb = BrtcReprBuilder()
rb.addMD("""## Mann Whitney test Result""")
groups = dict()
uniq_factor = table[factor_col].unique()
for name in uniq_factor:
groups[name] = np.array(table[response_col])[np.where(table[factor_col] == name)]
for name1, name2 in itertools.combinations(uniq_factor, 2):
stats, pval = mannwhitneyu(groups[name1], groups[name2], use_continuity=use_continuity)
rb.addMD(strip_margin("""
| ## {name1} vs {name2}
|
| ### Statistics U value: {stats}
|
| ### P value: {pval}
""".format(name1=name1, name2=name2, stats=stats, pval=pval)))
name = str(name1) + '_' + str(name2)
result[name] = dict()
result[name]['Statistics'] = stats
result[name]['P value'] = pval
result['_repr_brtc_'] = rb.get()
return {'result': result}
|
[
"brightics.common.utils.get_default_from_parameters_if_required",
"scipy.stats.mannwhitneyu",
"brightics.common.repr.BrtcReprBuilder",
"itertools.combinations",
"numpy.where",
"numpy.array",
"brightics.common.utils.check_required_parameters",
"brightics.common.groupby._function_by_group"
] |
[((1165, 1229), 'brightics.common.utils.check_required_parameters', 'check_required_parameters', (['_mann_whitney_test', 'params', "['table']"], {}), "(_mann_whitney_test, params, ['table'])\n", (1190, 1229), False, 'from brightics.common.utils import check_required_parameters\n'), ((1248, 1315), 'brightics.common.utils.get_default_from_parameters_if_required', 'get_default_from_parameters_if_required', (['params', '_mann_whitney_test'], {}), '(params, _mann_whitney_test)\n', (1287, 1315), False, 'from brightics.common.utils import get_default_from_parameters_if_required\n'), ((1610, 1627), 'brightics.common.repr.BrtcReprBuilder', 'BrtcReprBuilder', ([], {}), '()\n', (1625, 1627), False, 'from brightics.common.repr import BrtcReprBuilder, strip_margin, plt2MD, pandasDF2MD, keyValues2MD\n'), ((1889, 1927), 'itertools.combinations', 'itertools.combinations', (['uniq_factor', '(2)'], {}), '(uniq_factor, 2)\n', (1911, 1927), False, 'import itertools\n'), ((1365, 1439), 'brightics.common.groupby._function_by_group', '_function_by_group', (['_mann_whitney_test', 'table'], {'group_by': 'group_by'}), '(_mann_whitney_test, table, group_by=group_by, **params)\n', (1383, 1439), False, 'from brightics.common.groupby import _function_by_group\n'), ((1951, 2024), 'scipy.stats.mannwhitneyu', 'mannwhitneyu', (['groups[name1]', 'groups[name2]'], {'use_continuity': 'use_continuity'}), '(groups[name1], groups[name2], use_continuity=use_continuity)\n', (1963, 2024), False, 'from scipy.stats import mannwhitneyu\n'), ((1798, 1827), 'numpy.array', 'np.array', (['table[response_col]'], {}), '(table[response_col])\n', (1806, 1827), True, 'import numpy as np\n'), ((1828, 1863), 'numpy.where', 'np.where', (['(table[factor_col] == name)'], {}), '(table[factor_col] == name)\n', (1836, 1863), True, 'import numpy as np\n')]
|
import tensorflow as tf
from capsule.utils import squash
import numpy as np
layers = tf.keras.layers
models = tf.keras.models
class GammaCapsule(tf.keras.Model):
def __init__(self, in_capsules, in_dim, out_capsules, out_dim, stdev=0.2, routing_iterations=2, use_bias=True, name=''):
super(GammaCapsule, self).__init__(name=name)
self.in_capsules = in_capsules
self.in_dim = in_dim
self.out_capsules = out_capsules
self.out_dim = out_dim
self.routing_iterations = routing_iterations
self.use_bias = use_bias
with tf.name_scope(self.name):
w_init = tf.random_normal_initializer(stddev=stdev)
self.W = tf.Variable(name="W", initial_value=w_init(shape=(1, out_capsules, in_capsules, out_dim, in_dim),
dtype='float32'),
trainable=True)
if self.use_bias:
bias_init = tf.constant_initializer(0.1)
self.bias = tf.Variable(name="bias", initial_value=bias_init(shape=(1, out_capsules, out_dim),
dtype='float32'),
trainable=True)
def call(self, u):
"""
param: u - (batch_size, in_caps, in_dim)
"""
batch_size = tf.shape(u)[0]
u_norm = tf.norm(u, axis=-1) # (batch_size, in_caps)
# Reshape u into (batch_size, out_caps, in_caps, out_dim, in_dim)
u = tf.expand_dims(u, 1)
u = tf.expand_dims(u, 3)
u = tf.tile(u, [1, self.out_capsules, 1, 1, 1])
u = tf.tile(u, [1, 1, 1, self.out_dim, 1])
# Duplicate transformation matrix for each batch
w = tf.tile(self.W, [batch_size, 1, 1, 1, 1])
# Dotwise product between u and w to get all votes
# shape = (batch_size, out_caps, in_caps, out_dim)
u_hat = tf.reduce_sum(u * w, axis=-1)
# Ensure that ||u_hat|| <= ||v_i||
u_hat_norm = tf.norm(u_hat, axis=-1, keepdims=True)
u_norm = tf.expand_dims(u_norm, axis=1)
u_norm = tf.expand_dims(u_norm, axis=3)
u_norm = tf.tile(u_norm, [1, self.out_capsules, 1, self.out_dim])
new_u_hat_norm = tf.math.minimum(u_hat_norm, u_norm)
u_hat = u_hat / u_hat_norm * new_u_hat_norm
# Scaled-distance-agreement routing
bias = tf.tile(self.bias, [batch_size, 1, 1])
b_ij = tf.zeros(shape=[batch_size, self.out_capsules, self.in_capsules, 1])
for r in range(self.routing_iterations):
c_ij = tf.nn.softmax(b_ij, axis=1)
c_ij_tiled = tf.tile(c_ij, [1, 1, 1, self.out_dim])
s_j = tf.reduce_sum(c_ij_tiled * u_hat, axis=2) + bias
v_j = squash(s_j)
if(r < self.routing_iterations - 1):
v_j = tf.expand_dims(v_j, 2)
v_j = tf.tile(v_j, [1, 1, self.in_capsules, 1]) # (batch_size, out_caps, in_caps, out_dim)
# Calculate scale factor t
p_p = 0.9
d = tf.norm(v_j - u_hat, axis=-1, keepdims=True)
d_o = tf.reduce_mean(tf.reduce_mean(d))
d_p = d_o * 0.5
t = tf.constant(np.log(p_p * (self.out_capsules - 1)) - np.log(1 - p_p), dtype=tf.float32) \
/ (d_p - d_o + 1e-12)
t = tf.expand_dims(t, axis=-1)
# Calc log prior using inverse distances
b_ij = t * d
return v_j
|
[
"tensorflow.nn.softmax",
"tensorflow.reduce_sum",
"numpy.log",
"tensorflow.constant_initializer",
"tensorflow.reduce_mean",
"tensorflow.tile",
"tensorflow.zeros",
"tensorflow.random_normal_initializer",
"tensorflow.shape",
"capsule.utils.squash",
"tensorflow.name_scope",
"tensorflow.norm",
"tensorflow.math.minimum",
"tensorflow.expand_dims"
] |
[((1410, 1429), 'tensorflow.norm', 'tf.norm', (['u'], {'axis': '(-1)'}), '(u, axis=-1)\n', (1417, 1429), True, 'import tensorflow as tf\n'), ((1545, 1565), 'tensorflow.expand_dims', 'tf.expand_dims', (['u', '(1)'], {}), '(u, 1)\n', (1559, 1565), True, 'import tensorflow as tf\n'), ((1579, 1599), 'tensorflow.expand_dims', 'tf.expand_dims', (['u', '(3)'], {}), '(u, 3)\n', (1593, 1599), True, 'import tensorflow as tf\n'), ((1614, 1657), 'tensorflow.tile', 'tf.tile', (['u', '[1, self.out_capsules, 1, 1, 1]'], {}), '(u, [1, self.out_capsules, 1, 1, 1])\n', (1621, 1657), True, 'import tensorflow as tf\n'), ((1670, 1708), 'tensorflow.tile', 'tf.tile', (['u', '[1, 1, 1, self.out_dim, 1]'], {}), '(u, [1, 1, 1, self.out_dim, 1])\n', (1677, 1708), True, 'import tensorflow as tf\n'), ((1779, 1820), 'tensorflow.tile', 'tf.tile', (['self.W', '[batch_size, 1, 1, 1, 1]'], {}), '(self.W, [batch_size, 1, 1, 1, 1])\n', (1786, 1820), True, 'import tensorflow as tf\n'), ((1956, 1985), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(u * w)'], {'axis': '(-1)'}), '(u * w, axis=-1)\n', (1969, 1985), True, 'import tensorflow as tf\n'), ((2051, 2089), 'tensorflow.norm', 'tf.norm', (['u_hat'], {'axis': '(-1)', 'keepdims': '(True)'}), '(u_hat, axis=-1, keepdims=True)\n', (2058, 2089), True, 'import tensorflow as tf\n'), ((2107, 2137), 'tensorflow.expand_dims', 'tf.expand_dims', (['u_norm'], {'axis': '(1)'}), '(u_norm, axis=1)\n', (2121, 2137), True, 'import tensorflow as tf\n'), ((2155, 2185), 'tensorflow.expand_dims', 'tf.expand_dims', (['u_norm'], {'axis': '(3)'}), '(u_norm, axis=3)\n', (2169, 2185), True, 'import tensorflow as tf\n'), ((2203, 2259), 'tensorflow.tile', 'tf.tile', (['u_norm', '[1, self.out_capsules, 1, self.out_dim]'], {}), '(u_norm, [1, self.out_capsules, 1, self.out_dim])\n', (2210, 2259), True, 'import tensorflow as tf\n'), ((2285, 2320), 'tensorflow.math.minimum', 'tf.math.minimum', (['u_hat_norm', 'u_norm'], {}), '(u_hat_norm, u_norm)\n', (2300, 2320), True, 'import tensorflow as tf\n'), ((2433, 2471), 'tensorflow.tile', 'tf.tile', (['self.bias', '[batch_size, 1, 1]'], {}), '(self.bias, [batch_size, 1, 1])\n', (2440, 2471), True, 'import tensorflow as tf\n'), ((2487, 2555), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[batch_size, self.out_capsules, self.in_capsules, 1]'}), '(shape=[batch_size, self.out_capsules, self.in_capsules, 1])\n', (2495, 2555), True, 'import tensorflow as tf\n'), ((587, 611), 'tensorflow.name_scope', 'tf.name_scope', (['self.name'], {}), '(self.name)\n', (600, 611), True, 'import tensorflow as tf\n'), ((634, 676), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'stdev'}), '(stddev=stdev)\n', (662, 676), True, 'import tensorflow as tf\n'), ((1378, 1389), 'tensorflow.shape', 'tf.shape', (['u'], {}), '(u)\n', (1386, 1389), True, 'import tensorflow as tf\n'), ((2624, 2651), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['b_ij'], {'axis': '(1)'}), '(b_ij, axis=1)\n', (2637, 2651), True, 'import tensorflow as tf\n'), ((2677, 2715), 'tensorflow.tile', 'tf.tile', (['c_ij', '[1, 1, 1, self.out_dim]'], {}), '(c_ij, [1, 1, 1, self.out_dim])\n', (2684, 2715), True, 'import tensorflow as tf\n'), ((2801, 2812), 'capsule.utils.squash', 'squash', (['s_j'], {}), '(s_j)\n', (2807, 2812), False, 'from capsule.utils import squash\n'), ((981, 1009), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.1)'], {}), '(0.1)\n', (1004, 1009), True, 'import tensorflow as tf\n'), ((2734, 2775), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(c_ij_tiled * u_hat)'], {'axis': '(2)'}), '(c_ij_tiled * u_hat, axis=2)\n', (2747, 2775), True, 'import tensorflow as tf\n'), ((2885, 2907), 'tensorflow.expand_dims', 'tf.expand_dims', (['v_j', '(2)'], {}), '(v_j, 2)\n', (2899, 2907), True, 'import tensorflow as tf\n'), ((2930, 2971), 'tensorflow.tile', 'tf.tile', (['v_j', '[1, 1, self.in_capsules, 1]'], {}), '(v_j, [1, 1, self.in_capsules, 1])\n', (2937, 2971), True, 'import tensorflow as tf\n'), ((3121, 3165), 'tensorflow.norm', 'tf.norm', (['(v_j - u_hat)'], {'axis': '(-1)', 'keepdims': '(True)'}), '(v_j - u_hat, axis=-1, keepdims=True)\n', (3128, 3165), True, 'import tensorflow as tf\n'), ((3426, 3452), 'tensorflow.expand_dims', 'tf.expand_dims', (['t'], {'axis': '(-1)'}), '(t, axis=-1)\n', (3440, 3452), True, 'import tensorflow as tf\n'), ((3203, 3220), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['d'], {}), '(d)\n', (3217, 3220), True, 'import tensorflow as tf\n'), ((3286, 3323), 'numpy.log', 'np.log', (['(p_p * (self.out_capsules - 1))'], {}), '(p_p * (self.out_capsules - 1))\n', (3292, 3323), True, 'import numpy as np\n'), ((3326, 3341), 'numpy.log', 'np.log', (['(1 - p_p)'], {}), '(1 - p_p)\n', (3332, 3341), True, 'import numpy as np\n')]
|
# code-checked
# server-checked
import cv2
import numpy as np
import os
import os.path as osp
import random
import torch
from torch.utils import data
import pickle
def generate_scale_label(image, label):
f_scale = 0.5 + random.randint(0, 16)/10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_NEAREST)
return image, label
def id2trainId(label, id_to_trainid):
label_copy = label.copy()
for k, v in id_to_trainid.items():
label_copy[label == k] = v
return label_copy
################################################################################
# Cityscapes
################################################################################
class DatasetCityscapesAugmentation(data.Dataset):
def __init__(self, root, list_path, max_iters=None, crop_size=(512, 512), ignore_label=255):
self.root = root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.ignore_label = ignore_label
self.img_ids = [i_id.strip().split() for i_id in open(list_path)]
print ("DatasetCityscapesAugmentation - num unique examples: %d" % len(self.img_ids))
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
print ("DatasetCityscapesAugmentation - num examples: %d" % len(self.img_ids))
self.files = []
for item in self.img_ids:
image_path, label_path = item
name = osp.splitext(osp.basename(label_path))[0]
img_file = osp.join(self.root, image_path)
label_file = osp.join(self.root, label_path)
self.files.append({
"img": img_file,
"label": label_file,
"name": name,
"weight": 1
})
self.id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,
3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,
7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,
14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,
18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,
28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
label = id2trainId(label, self.id_to_trainid)
size = image.shape
name = datafiles["name"]
image, label = generate_scale_label(image, label)
image = np.asarray(image, np.float32)
mean = (102.9801, 115.9465, 122.7717)
image = image[:,:,::-1]
image -= mean
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
image = image.transpose((2, 0, 1))
flip = np.random.choice(2)*2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy(), np.array(size), name
class DatasetCityscapesEval(data.Dataset):
def __init__(self, root, list_path, ignore_label=255):
self.root = root
self.list_path = list_path
self.ignore_label = ignore_label
self.img_ids = [i_id.strip().split() for i_id in open(list_path)]
print ("DatasetCityscapesEval - num examples: %d" % len(self.img_ids))
self.files = []
for item in self.img_ids:
image_path, label_path = item
name = osp.splitext(osp.basename(label_path))[0]
img_file = osp.join(self.root, image_path)
label_file = osp.join(self.root, label_path)
self.files.append({
"img": img_file,
"label": label_file,
"name": name,
"weight": 1
})
self.id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,
3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,
7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,
14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,
18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,
28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
if not os.path.exists(datafiles["img"]): # (26 out of 25000 images are missing)
return self.__getitem__(0)
label = id2trainId(label, self.id_to_trainid)
size = image.shape
name = datafiles["name"]
image = np.asarray(image, np.float32)
mean = (102.9801, 115.9465, 122.7717)
image = image[:,:,::-1]
image -= mean
image = image.transpose((2, 0, 1))
return image.copy(), label.copy(), np.array(size), name
class DatasetCityscapesEvalSeq(data.Dataset):
def __init__(self, data_path, sequence="00"):
self.data_path = data_path
self.img_dir = self.data_path + "/leftImg8bit/demoVideo/stuttgart_" + sequence + "/"
self.examples = []
file_names = os.listdir(self.img_dir)
for file_name in file_names:
img_id = file_name.split("_leftImg8bit.png")[0]
img_path = self.img_dir + file_name
example = {}
example["img_path"] = img_path
example["img_id"] = img_id
self.examples.append(example)
self.num_examples = len(self.examples)
print ("DatasetCityscapesEvalSeq - num examples: %d" % self.num_examples)
def __len__(self):
return len(self.examples)
def __getitem__(self, index):
datafiles = self.examples[index]
image = cv2.imread(datafiles["img_path"], cv2.IMREAD_COLOR)
size = image.shape
name = datafiles["img_id"]
image = np.asarray(image, np.float32)
mean = (102.9801, 115.9465, 122.7717)
image = image[:,:,::-1]
image -= mean
image = image.transpose((2, 0, 1))
return image.copy(), np.array(size), name
################################################################################
# Synscapes
################################################################################
class DatasetSynscapesAugmentation(data.Dataset):
def __init__(self, root, root_meta, type="train", max_iters=None, crop_size=(512, 512), ignore_label=255):
self.root = root
self.root_meta = root_meta
self.crop_h, self.crop_w = crop_size
self.ignore_label = ignore_label
if type == "train":
with open(root_meta + "/train_img_ids.pkl", "rb") as file: # (needed for python3)
self.img_ids = pickle.load(file)
elif type == "val":
with open(root_meta + "/val_img_ids.pkl", "rb") as file: # (needed for python3)
self.img_ids = pickle.load(file)
else:
raise Exception("type must be either 'train' or 'val'!")
print ("DatasetSynscapesAugmentation - num unique examples: %d" % len(self.img_ids))
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
print ("DatasetSynscapesAugmentation - num examples: %d" % len(self.img_ids))
self.files = []
for img_id in self.img_ids:
self.files.append({
"img": self.root + "/img/rgb-2k/" + img_id + ".png",
"label": self.root_meta + "/gtFine/" + img_id + ".png",
"name": img_id,
"weight": 1
})
self.id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,
3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,
7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,
14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,
18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,
28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
if not os.path.exists(datafiles["img"]): # (26 out of 25000 images are missing)
return self.__getitem__(0)
label = id2trainId(label, self.id_to_trainid)
size = image.shape
name = datafiles["name"]
image, label = generate_scale_label(image, label)
image = np.asarray(image, np.float32)
mean = (102.9801, 115.9465, 122.7717)
image = image[:,:,::-1]
image -= mean
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
image = image.transpose((2, 0, 1))
flip = np.random.choice(2)*2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy(), np.array(size), name
class DatasetSynscapesEval(data.Dataset):
def __init__(self, root, root_meta, type="val", ignore_label=255):
self.root = root
self.root_meta = root_meta
self.ignore_label = ignore_label
if type == "train":
with open(root_meta + "/train_img_ids.pkl", "rb") as file: # (needed for python3)
self.img_ids = pickle.load(file)
elif type == "val":
with open(root_meta + "/val_img_ids.pkl", "rb") as file: # (needed for python3)
self.img_ids = pickle.load(file)
else:
raise Exception("type must be either 'train' or 'val'!")
print ("DatasetSynscapesEval - num examples: %d" % len(self.img_ids))
self.files = []
for img_id in self.img_ids:
self.files.append({
"img": self.root + "/img/rgb-2k/" + img_id + ".png",
"label": self.root_meta + "/gtFine/" + img_id + ".png",
"name": img_id,
"weight": 1
})
self.id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,
3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,
7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,
14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,
18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,
28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
if not os.path.exists(datafiles["img"]): # (26 out of 25000 images are missing)
return self.__getitem__(0)
label = id2trainId(label, self.id_to_trainid)
size = image.shape
name = datafiles["name"]
image = np.asarray(image, np.float32)
mean = (102.9801, 115.9465, 122.7717)
image = image[:,:,::-1]
image -= mean
image = image.transpose((2, 0, 1))
return image.copy(), label.copy(), np.array(size), name
|
[
"random.randint",
"os.path.basename",
"numpy.asarray",
"cv2.copyMakeBorder",
"os.path.exists",
"cv2.imread",
"pickle.load",
"numpy.array",
"numpy.random.choice",
"os.path.join",
"os.listdir",
"cv2.resize"
] |
[((266, 345), 'cv2.resize', 'cv2.resize', (['image', 'None'], {'fx': 'f_scale', 'fy': 'f_scale', 'interpolation': 'cv2.INTER_LINEAR'}), '(image, None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_LINEAR)\n', (276, 345), False, 'import cv2\n'), ((358, 443), 'cv2.resize', 'cv2.resize', (['label', 'None'], {'fx': 'f_scale', 'fy': 'f_scale', 'interpolation': 'cv2.INTER_NEAREST'}), '(label, None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_NEAREST\n )\n', (368, 443), False, 'import cv2\n'), ((2677, 2723), 'cv2.imread', 'cv2.imread', (["datafiles['img']", 'cv2.IMREAD_COLOR'], {}), "(datafiles['img'], cv2.IMREAD_COLOR)\n", (2687, 2723), False, 'import cv2\n'), ((2740, 2792), 'cv2.imread', 'cv2.imread', (["datafiles['label']", 'cv2.IMREAD_GRAYSCALE'], {}), "(datafiles['label'], cv2.IMREAD_GRAYSCALE)\n", (2750, 2792), False, 'import cv2\n'), ((2983, 3012), 'numpy.asarray', 'np.asarray', (['image', 'np.float32'], {}), '(image, np.float32)\n', (2993, 3012), True, 'import numpy as np\n'), ((3684, 3722), 'random.randint', 'random.randint', (['(0)', '(img_h - self.crop_h)'], {}), '(0, img_h - self.crop_h)\n', (3698, 3722), False, 'import random\n'), ((3739, 3777), 'random.randint', 'random.randint', (['(0)', '(img_w - self.crop_w)'], {}), '(0, img_w - self.crop_w)\n', (3753, 3777), False, 'import random\n'), ((3794, 3883), 'numpy.asarray', 'np.asarray', (['img_pad[h_off:h_off + self.crop_h, w_off:w_off + self.crop_w]', 'np.float32'], {}), '(img_pad[h_off:h_off + self.crop_h, w_off:w_off + self.crop_w],\n np.float32)\n', (3804, 3883), True, 'import numpy as np\n'), ((3896, 3987), 'numpy.asarray', 'np.asarray', (['label_pad[h_off:h_off + self.crop_h, w_off:w_off + self.crop_w]', 'np.float32'], {}), '(label_pad[h_off:h_off + self.crop_h, w_off:w_off + self.crop_w],\n np.float32)\n', (3906, 3987), True, 'import numpy as np\n'), ((5755, 5801), 'cv2.imread', 'cv2.imread', (["datafiles['img']", 'cv2.IMREAD_COLOR'], {}), "(datafiles['img'], cv2.IMREAD_COLOR)\n", (5765, 5801), False, 'import cv2\n'), ((5818, 5870), 'cv2.imread', 'cv2.imread', (["datafiles['label']", 'cv2.IMREAD_GRAYSCALE'], {}), "(datafiles['label'], cv2.IMREAD_GRAYSCALE)\n", (5828, 5870), False, 'import cv2\n'), ((6132, 6161), 'numpy.asarray', 'np.asarray', (['image', 'np.float32'], {}), '(image, np.float32)\n', (6142, 6161), True, 'import numpy as np\n'), ((6648, 6672), 'os.listdir', 'os.listdir', (['self.img_dir'], {}), '(self.img_dir)\n', (6658, 6672), False, 'import os\n'), ((7249, 7300), 'cv2.imread', 'cv2.imread', (["datafiles['img_path']", 'cv2.IMREAD_COLOR'], {}), "(datafiles['img_path'], cv2.IMREAD_COLOR)\n", (7259, 7300), False, 'import cv2\n'), ((7379, 7408), 'numpy.asarray', 'np.asarray', (['image', 'np.float32'], {}), '(image, np.float32)\n', (7389, 7408), True, 'import numpy as np\n'), ((9879, 9925), 'cv2.imread', 'cv2.imread', (["datafiles['img']", 'cv2.IMREAD_COLOR'], {}), "(datafiles['img'], cv2.IMREAD_COLOR)\n", (9889, 9925), False, 'import cv2\n'), ((9942, 9994), 'cv2.imread', 'cv2.imread', (["datafiles['label']", 'cv2.IMREAD_GRAYSCALE'], {}), "(datafiles['label'], cv2.IMREAD_GRAYSCALE)\n", (9952, 9994), False, 'import cv2\n'), ((10313, 10342), 'numpy.asarray', 'np.asarray', (['image', 'np.float32'], {}), '(image, np.float32)\n', (10323, 10342), True, 'import numpy as np\n'), ((11014, 11052), 'random.randint', 'random.randint', (['(0)', '(img_h - self.crop_h)'], {}), '(0, img_h - self.crop_h)\n', (11028, 11052), False, 'import random\n'), ((11069, 11107), 'random.randint', 'random.randint', (['(0)', '(img_w - self.crop_w)'], {}), '(0, img_w - self.crop_w)\n', (11083, 11107), False, 'import random\n'), ((11124, 11213), 'numpy.asarray', 'np.asarray', (['img_pad[h_off:h_off + self.crop_h, w_off:w_off + self.crop_w]', 'np.float32'], {}), '(img_pad[h_off:h_off + self.crop_h, w_off:w_off + self.crop_w],\n np.float32)\n', (11134, 11213), True, 'import numpy as np\n'), ((11226, 11317), 'numpy.asarray', 'np.asarray', (['label_pad[h_off:h_off + self.crop_h, w_off:w_off + self.crop_w]', 'np.float32'], {}), '(label_pad[h_off:h_off + self.crop_h, w_off:w_off + self.crop_w],\n np.float32)\n', (11236, 11317), True, 'import numpy as np\n'), ((13305, 13351), 'cv2.imread', 'cv2.imread', (["datafiles['img']", 'cv2.IMREAD_COLOR'], {}), "(datafiles['img'], cv2.IMREAD_COLOR)\n", (13315, 13351), False, 'import cv2\n'), ((13368, 13420), 'cv2.imread', 'cv2.imread', (["datafiles['label']", 'cv2.IMREAD_GRAYSCALE'], {}), "(datafiles['label'], cv2.IMREAD_GRAYSCALE)\n", (13378, 13420), False, 'import cv2\n'), ((13682, 13711), 'numpy.asarray', 'np.asarray', (['image', 'np.float32'], {}), '(image, np.float32)\n', (13692, 13711), True, 'import numpy as np\n'), ((227, 248), 'random.randint', 'random.randint', (['(0)', '(16)'], {}), '(0, 16)\n', (241, 248), False, 'import random\n'), ((1668, 1699), 'os.path.join', 'osp.join', (['self.root', 'image_path'], {}), '(self.root, image_path)\n', (1676, 1699), True, 'import os.path as osp\n'), ((1725, 1756), 'os.path.join', 'osp.join', (['self.root', 'label_path'], {}), '(self.root, label_path)\n', (1733, 1756), True, 'import os.path as osp\n'), ((3295, 3389), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['image', '(0)', 'pad_h', '(0)', 'pad_w', 'cv2.BORDER_CONSTANT'], {'value': '(0.0, 0.0, 0.0)'}), '(image, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=(\n 0.0, 0.0, 0.0))\n', (3313, 3389), False, 'import cv2\n'), ((3441, 3540), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['label', '(0)', 'pad_h', '(0)', 'pad_w', 'cv2.BORDER_CONSTANT'], {'value': '(self.ignore_label,)'}), '(label, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=(\n self.ignore_label,))\n', (3459, 3540), False, 'import cv2\n'), ((4182, 4196), 'numpy.array', 'np.array', (['size'], {}), '(size)\n', (4190, 4196), True, 'import numpy as np\n'), ((4746, 4777), 'os.path.join', 'osp.join', (['self.root', 'image_path'], {}), '(self.root, image_path)\n', (4754, 4777), True, 'import os.path as osp\n'), ((4803, 4834), 'os.path.join', 'osp.join', (['self.root', 'label_path'], {}), '(self.root, label_path)\n', (4811, 4834), True, 'import os.path as osp\n'), ((5887, 5919), 'os.path.exists', 'os.path.exists', (["datafiles['img']"], {}), "(datafiles['img'])\n", (5901, 5919), False, 'import os\n'), ((6351, 6365), 'numpy.array', 'np.array', (['size'], {}), '(size)\n', (6359, 6365), True, 'import numpy as np\n'), ((7584, 7598), 'numpy.array', 'np.array', (['size'], {}), '(size)\n', (7592, 7598), True, 'import numpy as np\n'), ((10011, 10043), 'os.path.exists', 'os.path.exists', (["datafiles['img']"], {}), "(datafiles['img'])\n", (10025, 10043), False, 'import os\n'), ((10625, 10719), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['image', '(0)', 'pad_h', '(0)', 'pad_w', 'cv2.BORDER_CONSTANT'], {'value': '(0.0, 0.0, 0.0)'}), '(image, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=(\n 0.0, 0.0, 0.0))\n', (10643, 10719), False, 'import cv2\n'), ((10771, 10870), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['label', '(0)', 'pad_h', '(0)', 'pad_w', 'cv2.BORDER_CONSTANT'], {'value': '(self.ignore_label,)'}), '(label, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=(\n self.ignore_label,))\n', (10789, 10870), False, 'import cv2\n'), ((11512, 11526), 'numpy.array', 'np.array', (['size'], {}), '(size)\n', (11520, 11526), True, 'import numpy as np\n'), ((13437, 13469), 'os.path.exists', 'os.path.exists', (["datafiles['img']"], {}), "(datafiles['img'])\n", (13451, 13469), False, 'import os\n'), ((13901, 13915), 'numpy.array', 'np.array', (['size'], {}), '(size)\n', (13909, 13915), True, 'import numpy as np\n'), ((4043, 4062), 'numpy.random.choice', 'np.random.choice', (['(2)'], {}), '(2)\n', (4059, 4062), True, 'import numpy as np\n'), ((8246, 8263), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (8257, 8263), False, 'import pickle\n'), ((11373, 11392), 'numpy.random.choice', 'np.random.choice', (['(2)'], {}), '(2)\n', (11389, 11392), True, 'import numpy as np\n'), ((11902, 11919), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (11913, 11919), False, 'import pickle\n'), ((1616, 1640), 'os.path.basename', 'osp.basename', (['label_path'], {}), '(label_path)\n', (1628, 1640), True, 'import os.path as osp\n'), ((4694, 4718), 'os.path.basename', 'osp.basename', (['label_path'], {}), '(label_path)\n', (4706, 4718), True, 'import os.path as osp\n'), ((8415, 8432), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (8426, 8432), False, 'import pickle\n'), ((12071, 12088), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (12082, 12088), False, 'import pickle\n')]
|
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@copyright 2017
@licence: 2-clause BSD licence
This file contains the main code for the phase-state machine
"""
import numpy as _np
import pandas as _pd
import itertools
from numba import jit
import warnings as _warnings
@jit(nopython=True, cache=True)
def _limit(a):
"""
faster version of numpy clip, also modifies array in place
"""
#numba doesn't support indexing by boolean
#a[a<lower]=lower
#a[a>upper]=upper
shape = a.shape
for j in range(shape[1]):
for i in range(shape[0]):
if a[i,j] < 0.0:
a[i,j] = 0.0
if a[i,j] > 1.0:
a[i,j] = 1.0
@jit(nopython=True, cache=True)
def _signfunc(x):
return 1.0-2*(x<0)
@jit(nopython=True, cache=True)
def ReLU(x):
return 0.5*(_np.abs(x)+x)
# Alternative, differentiable "sign" function
# Also improves stability of the state's sign
#@jit(nopython=True)
#def _signfunc(x, epsilon=1e-3):
# return _np.tanh(x/epsilon)
#_np.set_printoptions(precision=3, suppress=True)
@jit(nopython=True, cache=True)
def _step(statevector, #main state vector. Input and output, modified in-place
#outputs, modified in place:
dotstatevector, #velocity of main state vector
activationMatrix, #Activation for each potential state and transition
phasesMatrix, #Phases for each transition
phaseVelocitiesMatrix, #Derivative of phases for each transition
#inputs:
phaseVelocityExponentInput, #input to modify velocity of each transition individually (exponential scale, basis 2)
BiasMatrix, #input to depart / avert departure from states
stateConnectivityGreedinessAdjustment, #input to modify how strong a successor state pulls the system towards itself, relative to the predecessor state
stateConnectivityCompetingGreedinessAdjustment, #input to adjust greediness in between compeeting successor states
phasesInput, # phase target in case a transition is enslaved to an external phase
velocityAdjustmentGain, # gain related to enslaving phase
noise_velocity, # vector that gets added to state velocity (usually in order to inject some base noise)
#parameters:
numStates, #number of states / dimensions
betaInv, #precomputed from beta parameter (state locations / scale)
stateConnectivityAbs, #precomputed from state graph
stateConnectivitySignMap, #precomputed from state graph
stateConnectivityIsBidirectional, #precomputed from state graph
stateConnectivityNrEdges, #precomputed from state graph
rhoZero, #coupling values for creating discrete states
rhoDelta, #coupling values for creating stable heteroclinic channels
alpha, #growth rate of states, determines speed of transitioning
dt, # time step duration in seconds
dtInv, #precomputed from dt
nonlinearityParamsLambda, #Kumaraswamy distribution parameters to modify gradualness of activation
nonlinearityParamsPsi, #Kumaraswamy distribution parameters to modify gradualness of phase progress
stateVectorExponent, #modifies the bending of heteroclinic channels
speedLimit, #safety limit to state velocity
epsilonLambda, #determines the region of zero activation around the coordinates axes
#for comparative study:
emulateHybridAutomaton, #set this to true to hack phasta into acting like a discrete state graph / hybrid automaton
triggervalue_successors, #for HA emulation mode, modified in-place
):
"""
Core phase-state machine computation.
Written as a function in order to be able to optimize it with numba
Note: The function modifies several arguments (numpy arrays) in place.
"""
#compute adjustment to the instantaneously effective growth factor
scaledactivation = activationMatrix * (1.0 / max(1.0, _np.sum(activationMatrix)))
kd = 2** _np.sum( scaledactivation * phaseVelocityExponentInput)
#compute mu for phase control:
phaseerrors = activationMatrix * (phasesInput-phasesMatrix)
correctiveAction = phaseerrors * velocityAdjustmentGain
correctiveActionPredecessor = _np.zeros((numStates))
for i in range(numStates):
correctiveActionPredecessor += correctiveAction[:,i]
correctiveActionSuccessor = _np.zeros((numStates))
for i in range(numStates):
correctiveActionSuccessor += correctiveAction[i,:]
mu = correctiveActionPredecessor - correctiveActionSuccessor
statevector_abs = _np.abs(statevector)
#adjust signs of the bias values depending on the transition direction:
biases = _np.dot(BiasMatrix * stateConnectivitySignMap * _np.outer(1-statevector_abs,statevector_abs), statevector)
noise_statevector = noise_velocity * dt
#If requested, decide whether to start a transition using a threshold, and stick to that decision no matter what until the transition finishes
if emulateHybridAutomaton:
predecessors = 1.0*(_np.abs(statevector)*betaInv > 0.99)
successors = (_np.dot(stateConnectivityAbs, predecessors) > 0.5 )
notsuccessors = (_np.dot(stateConnectivityAbs, predecessors) < 0.5 )
triggervalue_successors[notsuccessors] = 0.0
noise_statevector = _np.zeros((numStates))
threshold = 0.1
if _np.any(triggervalue_successors >= threshold ):
chosensuccessor = _np.argmax(triggervalue_successors)
value_chosen = triggervalue_successors[chosensuccessor]
notchosensuccessors = successors.copy()
notchosensuccessors[chosensuccessor] = 0
triggervalue_successors[:] = 0.0
triggervalue_successors[chosensuccessor] = value_chosen
if triggervalue_successors[chosensuccessor] < 1e5:
triggervalue_successors[ chosensuccessor ] = 1e6
#print(chosensuccessor)
noise_statevector[chosensuccessor] = 1.0
else:
triggervalue_successors[:] += biases * dt + noise_velocity
statevector[:] = statevector #for numba
statesigns = _signfunc(statevector)
statesignsOuterProduct = _np.outer(statesigns,statesigns) #precompute this, as we need it several times
#stateVectorExponent=1 #straight channels: |x| (original SHC by Horchler/Rabinovich)
#stateVectorExponent=2 #spherical channels: |x|**2 (default for phasta)
x_gamma = (statevector*statesigns)**stateVectorExponent
#Compute a mask that ensures the attractor works with negative state values too, that the transition's "sign" is observed, and that unidirectional edges do not accidentally change between positive and negative state values
#the computation is formulated such that only algebraic and continuous functions (e.g. ReLu) are used
M_T = ReLU(statesignsOuterProduct*stateConnectivitySignMap)
#Appropriate signs for transition-related greediness adjustment, depending on whether a graph edge is bidirectional or not:
TransitionGreedinessAdjustmentSign = (stateConnectivityNrEdges * ReLU(statesignsOuterProduct) - stateConnectivityIsBidirectional) * stateConnectivitySignMap
#sum everything into a transition/greedinesses matrix (T+G):
T_G = M_T*stateConnectivityAbs + TransitionGreedinessAdjustmentSign*stateConnectivityGreedinessAdjustment + stateConnectivityCompetingGreedinessAdjustment
#This is the core computation and time integration of the dynamical system:
growth = alpha + _np.dot(rhoZero, x_gamma) + _np.dot(rhoDelta * T_G, x_gamma)
dotstatevector[:] = statevector * growth * kd + mu + biases #estimate velocity. do not add noise to velocity, promp mixer doesnt like jumps
dotstatevector_L2 = _np.sqrt(_np.sum(dotstatevector**2))
velocity_limitfactor = _np.minimum(1.0, speedLimit/(1e-8 + dotstatevector_L2)) #limit speed of the motion in state space to avoid extreme phase velocities that a robot cannot
statevector[:] = (statevector + dotstatevector*dt*velocity_limitfactor + noise_statevector) #set the new state
#prepare a normalized state vector for the subsequent operations:
statevector_abs = _np.abs(statevector)
S = statevector_abs.reshape((numStates,1))
S2 = S*S
S_plus_P = S + S.T
statevectorL1 = _np.sum(S)
statevectorL2 = _np.sum(S2)
#compute the transition/state activation matrix (Lambda)
activations = stateConnectivitySignMap * _np.outer(statevector, statevector) * 16 * (statevectorL2) / (S_plus_P**4+statevectorL1**4)
activationMatrix[:,:] = activations * stateConnectivityAbs #function shown in visualization_of_activationfunction.py
_limit(activationMatrix)
#apply nonlinearity:
if (nonlinearityParamsLambda[0] != 1.0 or nonlinearityParamsLambda[1] != 1.0 ):
activationMatrix[:,:] = 1.0-(1.0-activationMatrix**nonlinearityParamsLambda[0])**nonlinearityParamsLambda[1] #Kumaraswamy CDF
#compute the state activation and put it into the diagonal of Lambda:
residual = max(0.0, 1.0 - _np.sum(activationMatrix))
stateactivation_normalized = S2/ _np.sum(S2)
for i in range(numStates):
activationMatrix[i,i] = stateactivation_normalized[i,0] * residual
#compute the phase progress matrix (Psi)
epsilonPsi = 0.0001
newphases = (S+epsilonPsi) / (S_plus_P+2*epsilonPsi)
_limit(newphases)
#apply nonlinearity:
if (nonlinearityParamsPsi[0] != 1.0 or nonlinearityParamsPsi[1] != 1.0 ):
newphases = 1.0-(1.0-newphases**nonlinearityParamsPsi[0])**nonlinearityParamsPsi[1] #Kumaraswamy CDF
phaseVelocitiesMatrix[:,:] = (newphases - phasesMatrix) * dtInv
phasesMatrix[:,:] = newphases
return
_KumaraswamyCDFParameters = {
'kumaraswamy1,1': (1.,1.),
'kumaraswamy2,1': (2.,1.),
'kumaraswamy1,2': (1.,2.),
#values for the Kumaraswamy CDF that approximate the given incomplete beta function:
'beta2,2': (1.913227338072261,2.2301669931409323),
'beta3,3': (2.561444544688591,3.680069490606511),
'beta2,5': (1.6666251656562021,5.9340642444701555),
}
class Kernel():
"""
This class provides a dynamical system that can behave like a state machine.
The transitions are smooth though, which enables interestingbehaviors like online-synchronisation and negotiation of branch alternatives
The most important parameters are:
numStates: the number of quasi-discrete states the system should have
predecessors: a list of lists which defines the preceeding states of each state
Note: Don't set up mutual predecessors (i.e. a loop with two states). This does not work. You need at least 3 states for a loop.
alpha: determines the speed at which a state becomes dominant. Effectively speeds up or slows down the machine
epsilon: "noise" added to the states, which has the effect of reducing the average dwell time for the preceeding states
Less important paramters:
beta: scaling factor for the state variable (usually 1.0)
nu: determines how easy it is to push away from a state (usually 1.5).
dt: time step at which the system is simulated (default: 1e-2)
Inputs:
Observed phase Psi_d: A matrix analogous to the phase matrix, containing phase estimates conditional to the transition or phase being activated
phase control gain K_p: A matrix analogous to the activation matrix, which indicates how confident the state observation is
inputbias: vector that signals which state should currently be the next (e.g. from perception)
Output:
stateVector: The actual, evolving state of the dynamical system.
phase matrix Psi: A (numStates x numStates) matrix aggregating all phase variables for each possible transition, plus the state vector on the diagonal
activation matrix Lambda: A matrix which contains the corresponding transition activation values. state
activations correspond to the 1-sum(transition activations), so that sum(matrix) = 1 (i.e.e can be used as a
weighing matrix)
"""
def __init__(self, **kwargs):
self.numStates = 0
self.t = 0.0
self.statehistorylen = 0
self.historyIndex = 0
self.setParameters(**kwargs)
def setParameters(self,
numStates=3,
predecessors=None,
successors=[[1],[2],[0]],
alphaTime=None,
alpha=40.0,
epsilon=1e-9,
nu=1.0,
beta=1.0,
dt=1e-2,
stateVectorExponent=2.0,
speedLimit = _np.inf,
initialState=0,
nonlinearityLambda='kumaraswamy1,1',
nonlinearityPsi='kumaraswamy1,1',
inputFilterTimeConstant = 0.1,
reuseNoiseSampleTimes = 10,
reset=False,
recordSteps=-1,
emulateHybridAutomaton=False):
"""
Method to set or reconfigure the phase-state-machine
numStates: The number of states the system should have
predecessors: A list of lists which contain the state indices of the respective predecessors
successors: A list of lists which contain the state indices of the respective successors
Note: use of predecessors and successors parameter is mutually exclusive!
For the meaning of the other parameters, please consult the paper or the code
"""
oldcount = self.numStates
#parameters:
self.numStates = numStates
if alphaTime is None: #backwards compatibility: if no alphatime is provided, use dt-dependent alpha value
self.alphaTime = self._sanitizeParam(alpha)/dt
else:
self.alphaTime = self._sanitizeParam(alphaTime)
self.beta = self._sanitizeParam(beta)
self.betaInv = 1.0/self.beta #this is used often, so precompute once
self.nu = self._sanitizeParam(nu)
self.nu_term = self.nu/(1 + self.nu) #equations usually use this term - precompute it
self.epsilon = self._sanitizeParam(epsilon) * self.beta #Wiener process noise
self.epsilonLambda=0.01 #regularization parameter of activation function
self.maxGreediness=10.0 #maximum factor to allow for increasing decisiveness (mainly to guard against input errors)
self.reuseNoiseSampleTimes = reuseNoiseSampleTimes
self.stateVectorExponent =stateVectorExponent
self.speedLimit = speedLimit
if initialState >= self.numStates:
raise ValueError()
self.initialState = initialState
if predecessors is not None: #convert list of predecessors into list of successors
self.successors = self._predecessorListToSuccessorList(predecessors)
else:
self.successors = successors
self.updateDt(dt) #also calls self._updateRho
self.nonlinearityParamsLambda = _KumaraswamyCDFParameters[nonlinearityLambda] #nonlinearity for sparsifying activation values
self.nonlinearityParamsPsi = _KumaraswamyCDFParameters[nonlinearityPsi] #nonlinearity that linearizes phase variables
#inputs:
self.BiasMatrix = _np.zeros((self.numStates,self.numStates)) #determines transition preferences and state timeout duration
self.BiasMatrixDesired = _np.zeros((self.numStates,self.numStates)) #determines transition preferences and state timeout duration
self.emulateHybridAutomaton = emulateHybridAutomaton #set this to true to emulate discrete switching behavior on bias input
self.triggervalue_successors = _np.zeros((self.numStates))
self.phasesInput = _np.zeros((self.numStates,self.numStates)) #input to synchronize state transitions (slower/faster)
self.velocityAdjustmentGain = _np.zeros((self.numStates,self.numStates)) #gain of the control enslaving the given state transition
self.phaseVelocityExponentInput = _np.zeros((self.numStates,self.numStates)) #contains values that limit transition velocity
self.stateConnectivityGreedinessAdjustment = _np.zeros((self.numStates,self.numStates)) #contains values that adjust transition greediness
self.stateConnectivityCompetingGreedinessAdjustment = _np.zeros((self.numStates,self.numStates)) #contains values that adjust competing transition greediness
self.stateConnectivityGreedinessTransitions = _np.zeros((self.numStates,self.numStates))
self.stateConnectivityGreedinessCompetingSuccessors = _np.zeros((self.numStates,self.numStates))
self.inputfilterK = dt / max(dt , inputFilterTimeConstant) #how much inputs should be low-passed (to avoid sudden changes in phasta state)
#internal data structures
if self.numStates != oldcount or reset: #force a reset if number of states change
self.statevector = _np.zeros((numStates))
self.dotstatevector = _np.zeros((numStates))
self.statevector[self.initialState] = self.beta[self.initialState] #start at a state
self.phasesActivation = _np.zeros((self.numStates,self.numStates))
self.phasesProgress = _np.zeros((self.numStates,self.numStates))
self.phasesProgressVelocities = _np.zeros((self.numStates,self.numStates))
self.biases = _np.zeros((self.numStates, self.numStates))
self.noise_velocity = 0.0
self.noiseValidCounter = 0
#these data structures are used to save the history of the system:
if recordSteps< 0:
pass
elif recordSteps == 0:
self.statehistorylen = 0
self.historyIndex = 0
else:
self.statehistorylen = recordSteps
self.statehistory = _np.empty((self.statehistorylen, self.numStates+1))
self.statehistory.fill(_np.nan)
self.phasesActivationHistory= _np.zeros((self.statehistorylen, self.numStates,self.numStates))
self.phasesProgressHistory = _np.zeros((self.statehistorylen, self.numStates,self.numStates))
self.historyIndex = 0
def _updateRho(self):
"""
internal method to compute the P matrix from preset parameters
also computes the state connectivity matrix
reimplements the computation by the SHCtoolbox code
"""
stateConnectivityAbs = _np.zeros((self.numStates, self.numStates))
stateConnectivitySignMap =_np.tri(self.numStates, self.numStates, k=0) - _np.tri(self.numStates, self.numStates, k=-1).T
for state, successorsPerState in enumerate(self.successors):
#precedecessorcount = len(predecessorsPerState)
for successor in successorsPerState:
if state == successor: raise ValueError("Cannot set a state ({0}) as successor of itself!".format(state))
stateConnectivityAbs[successor,state] = 1
stateConnectivitySignMap[successor,state] = 1
stateConnectivitySignMap[state, successor] = -1
self.stateConnectivityAbs = stateConnectivityAbs
self.stateConnectivitySignMap = stateConnectivitySignMap
#precompute some things:
self.stateConnectivityIsBidirectional = _np.sqrt(self.stateConnectivityAbs * self.stateConnectivityAbs.T)
self.stateConnectivityNrEdges = stateConnectivityAbs + stateConnectivityAbs.T
self.stateConnectivity = self.stateConnectivityAbs
#compute a matrix that has ones for states that have a common predecessor, i.e. pairs of states which compete (except for self-competition)
self.connectivitySigned = self.stateConnectivitySignMap*self.stateConnectivityAbs
self.competingStates = _np.dot(self.stateConnectivityAbs, self.stateConnectivityAbs.T) * (1-_np.eye(self.numStates))
#first, fill in the standard values in rhoZero
# rhoZero = beta^-1 x alpha * (1 - I + alpha^-1 x alpha)
alphaInv = 1/self.alpha
s = _np.dot(self.alpha[:,_np.newaxis],self.betaInv[_np.newaxis,:])
rhoZero = s * (_np.eye(self.numStates) - 1 - _np.dot(self.alpha[:,_np.newaxis],alphaInv[_np.newaxis,:]))
#then fill the rhoDelta:
rhoDelta = (self.alpha[:,_np.newaxis]*self.betaInv[_np.newaxis,:] / self.nu_term[:,_np.newaxis])
self.rhoZero = rhoZero
self.rhoDelta = rhoDelta
successorCountInv = 1.0/_np.maximum(_np.sum(self.stateConnectivityAbs, axis=0)[_np.newaxis,:],1.0)
self.BiasMeanBalancingWeights = self.stateConnectivityAbs * successorCountInv
def step(self, until=None, period=None, nr_steps=1):
"""
Main algorithm, implementing the integration step, state space decomposition, phase control and velocity adjustment.
period: give a period to simulate
until: give a time until to simulate
nr_steps: give the number of steps to simulate at self.dt
If more than one argument is given, then precedence is: until > period > nr_steps
"""
if until is not None:
period = until - self.t
if period < 0.0:
raise RuntimeError("argument until is in the past")
#if a period is given, iterate until we finished that period:
if period is not None:
nr_steps = int(period // self.dt)
for i in range(nr_steps):
#execute a single step:
self.t = self.t + self.dt #advance time
self.noiseValidCounter = self.noiseValidCounter - 1
if self.noiseValidCounter <= 0: #do not sample every timestep as the dynamical system cannot react that fast anyway. Effectively low-pass-filters the noise.
self.noise_velocity = _np.random.normal(scale = self.epsilonPerSample, size=self.numStates) #sample a discretized wiener process noise
self.noiseValidCounter = self.reuseNoiseSampleTimes
#low-pass filter input to avoid sudden jumps in velocity
self.BiasMatrix += self.inputfilterK * (self.BiasMatrixDesired-self.BiasMatrix)
self.stateConnectivityGreedinessAdjustment += self.inputfilterK * (self.stateConnectivityGreedinessTransitions - self.stateConnectivityGreedinessAdjustment)
self.stateConnectivityCompetingGreedinessAdjustment += self.inputfilterK * (self.stateConnectivityGreedinessCompetingSuccessors -self.stateConnectivityCompetingGreedinessAdjustment)
_step( #arrays modified in-place:
self.statevector,
self.dotstatevector,
self.phasesActivation,
self.phasesProgress,
self.phasesProgressVelocities,
#inputs
self.phaseVelocityExponentInput,
self.BiasMatrix,
self.stateConnectivityGreedinessAdjustment,
self.stateConnectivityCompetingGreedinessAdjustment,
self.phasesInput,
self.velocityAdjustmentGain,
self.noise_velocity,
#parameters
self.numStates,
self.betaInv ,
self.stateConnectivityAbs,
self.stateConnectivitySignMap,
self.stateConnectivityIsBidirectional,
self.stateConnectivityNrEdges,
self.rhoZero,
self.rhoDelta,
self.alpha,
self.dt,
self.dtInv,
self.nonlinearityParamsLambda,
self.nonlinearityParamsPsi,
self.stateVectorExponent,
self.speedLimit,
self.epsilonLambda,
self.emulateHybridAutomaton,
self.triggervalue_successors
)
#note the currently most active state/transition (for informative purposes)
i = _np.argmax(self.phasesActivation)
self.currentPredecessor = i % self.numStates
self.currentSuccessor = i // self.numStates
self._recordState()
return self.statevector
def get1DState(self):
"""
return value of a one-dimensional signal that indicates which state we are in, or in which transition
"""
value = self.currentPredecessor + (self.currentSuccessor - self.currentPredecessor) * self.phasesProgress[self.currentSuccessor,self.currentPredecessor]
return value
def sayState(self):
"""
returns a string describing the current state
"""
if self.currentPredecessor == self.currentSuccessor:
return "{0}".format(self.currentPredecessor )
else:
return "{0}->{1}".format(self.currentPredecessor , self.currentSuccessor)
def updateDt(self, dt):
"""
upadate the time step used to integrate the dynamical system:
"""
self.dt = dt
self.dtInv = 1.0 / dt
self.epsilonPerSample = self.epsilon *_np.sqrt(self.dt*self.reuseNoiseSampleTimes)/dt #factor accounts for the accumulation during a time step (assuming a Wiener process)
self.alpha = self.alphaTime * self.dt
self._updateRho()
def updateEpsilon(self, epsilon):
"""
Update the noise vector
"""
self.epsilon = epsilon
self.updateDt(self.dt) #need to recompute self.epsilonPerSample
def updateSuccessors(self, listoflist):
"""
recompute the system according to the given list of predecessors
"""
self.successors=listoflist
self._updateRho()
def updateGreediness(self, greedinesses):
"""
update the greediness for competing transitions / successor states
Low values make the system maintain co-activated transitions for a long time, high values make transitions very competitive.
0.0: complete indecisiveness (transitions do not compete at all and may not converge towards an exclusive successor state)
1.0: behavior of the original SHC network by [1]
20.0: extremely greedy transitions, behaves much like a discrete state machine
negative values: abort transition and return to the predecessor state
Absolute values less than 1.0 also reduce speed of transitions, 0.0 stops transitions completely.
This value is considered during a transition away from the predecessor state,
i.e. it influences the transition dynamics while honoring the basic state connectivity
greediness: vector of size self.numStates or matrix of size (numStates,numStates)
scalar: set a common greediness value for all competing transitions
vector: greediness values for all competing transitions leading to the related successor state
matrix: set greediness value for each competing transition individually
"""
greedinesses = _np.asarray(greedinesses)
if greedinesses.ndim == 1:
greedinesses = greedinesses[_np.newaxis,:]
elif greedinesses.ndim == 0:
greedinesses = _np.full((1, self.numStates),greedinesses)
#adjust the strength / reverse direction of the outgoing shc's according to greedinesses:
greediness_successorstates = _np.clip((0.5*greedinesses-0.5), -1.0, 0.0) # _np.clip(g, -self.nu_term, 0.0)
strength = self.stateConnectivityAbs * greediness_successorstates.T #works for (1,-1) transition pairs too
self.stateConnectivityGreedinessTransitions = strength + strength.T
#Adjust competition between nodes according to their greediness:
kappa=0.
# self.stateConnectivityGreedinessCompetingSuccessors = self.competingStates * 0.5*(1-(1.+kappa)*greedinesses+kappa*greedinesses.T)
self.stateConnectivityGreedinessCompetingSuccessors = self.competingStates * 0.5*(1-greedinesses)
def updateCompetingTransitionGreediness(self,greedinesses):
_warnings.warn("Please replace updateCompetingTransitionGreediness with updateGreediness asap!", DeprecationWarning, stacklevel=2)
self.updateGreediness(greedinesses)
def _predecessorListToSuccessorList(self, predecessors):
""" helper to convert lists of predecessor states into lists of successor states"""
successors = [ [] for i in range(self.numStates) ] #create a list of lists
for i, predecessorsPerState in enumerate(predecessors):
for pre in predecessorsPerState:
successors[pre].append(i)
return successors
def updatePredecessors(self, listoflist):
"""
recompute the system according to the given list of predecessors
"""
self.successors = self._predecessorListToSuccessorList(predecessors)
self._updateRho()
def getPredecessors(self):
"""
return the predecessors
"""
successors = [ [] for i in range(self.numStates) ] #create a list of lists
for i, predecessorsPerState in enumerate(predecessors):
for pre in predecessorsPerState:
successors[pre].append(i)
return successors
def updateBiases(self, successorBias):
"""
changes the "bias" input array
Small values bias the system to hasten transitions towards that state
Large, short spikes can be used to override any state and force the system into any state,
regardless of state connectivity
successorBias: numpy array of biases for each (successor state biased towards, current state) pair
if scalar: set all successor biases to the same value
if vector: set successor biases to the given vector for every state
if matrix: set each (successor state, current state) pair individually
"""
bias = _np.asarray(successorBias)
if bias.ndim == 1:
self.BiasMatrixDesired[:,:] = (self.stateConnectivity) * bias[:,_np.newaxis]
else:
self.BiasMatrixDesired[:,:] = bias
def updateB(self, successorBias):
_warnings.warn("Please replace updateB() with updateBiases() asap!",stacklevel=2)
self.updateBiases(successorBias)
def updateTransitionTriggerInput(self, successorBias):
_warnings.warn("Please replace updateTransitionTriggerInput() with updateBiases() asap!",stacklevel=2)
self.updateBiases(successorBias)
def updatePhasesInput(self, phases):
"""
changes the Psi_d matrix
Use this as phase reference to sync the system with a phase from perception
"""
_np.copyto(self.phasesInput, phases)
def updateVelocityEnslavementGain(self, gains):
"""
changes the K_p matrix
Set the gain values to use for each phase transition.
"""
_np.copyto(self.velocityAdjustmentGain, gains)
def updateTransitionPhaseVelocityExponentInput(self, limits):
"""
Update the matrix that specifies how fast the given phases should progress
Each element effectively is an exponent with base 2 for adjusting each phase velocity individually
limits[j,i]: exponent for the transition from i to j
limits[i,i]: 0 (enforced implicitly)
if limits is a vector: treat it as common exponent for transitions of the same predecessor state
if limits is a scalar: set as common exponent for all transitions
While phase velocity can also be controlled by the self.alpha vector directly,
large variations to individual states' alpha parameter can alter the
convergence behavior and we may lose the stable heteroclinic channel properties
This method here effectly "scales" the timeline during transitions
"""
limits = _np.asarray(limits)
if limits.ndim == 1:
limits = limits[_np.newaxis,:]
elif limits.ndim == 0:
limits = limits[_np.newaxis,_np.newaxis]
self.phaseVelocityExponentInput[:,:] = limits
#_np.fill_diagonal(self.phaseVelocityExponentInput , 0.0)
def getHistory(self):
"""
return the historic values for plotting
"""
if self.statehistorylen == 0:
raise RuntimeError("no history is being recorded")
return (self.statehistory[:self.historyIndex,:],
self.phasesActivationHistory[:self.historyIndex,:,:],
self.phasesProgressHistory[:self.historyIndex,:,:]
)
def _sanitizeParam(self, p):
"""
internal helper to provide robust handling of lists and numpy array input data
"""
if _np.isscalar(p):
sanitizedP = _np.empty((self.numStates))
sanitizedP.fill(float(p))
else:
try:
p = p[0:self.numStates]
except IndexError:
raise Exception("Parameter has not the length of numStates!")
sanitizedP = _np.array(p)
return sanitizedP
def _recordState(self):
"""
internal helper to save the current state for later plotting
"""
if self.historyIndex < self.statehistorylen:
self.statehistory[self.historyIndex, 0] = self.t
self.statehistory[self.historyIndex, 1:self.numStates+1] = self.statevector
self.phasesActivationHistory[self.historyIndex, :,:] = self.phasesActivation
self.phasesProgressHistory[self.historyIndex, :,:] = self.phasesProgress
if self.historyIndex < self.statehistorylen:
self.historyIndex = self.historyIndex + 1
|
[
"numpy.abs",
"numpy.sum",
"numpy.argmax",
"numpy.empty",
"numpy.clip",
"numpy.random.normal",
"numpy.full",
"numpy.tri",
"numpy.minimum",
"numpy.asarray",
"numpy.dot",
"numpy.copyto",
"numpy.outer",
"numpy.isscalar",
"numpy.zeros",
"numpy.any",
"numba.jit",
"numpy.array",
"numpy.eye",
"warnings.warn",
"numpy.sqrt"
] |
[((271, 301), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'cache': '(True)'}), '(nopython=True, cache=True)\n', (274, 301), False, 'from numba import jit\n'), ((692, 722), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'cache': '(True)'}), '(nopython=True, cache=True)\n', (695, 722), False, 'from numba import jit\n'), ((767, 797), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'cache': '(True)'}), '(nopython=True, cache=True)\n', (770, 797), False, 'from numba import jit\n'), ((1075, 1105), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'cache': '(True)'}), '(nopython=True, cache=True)\n', (1078, 1105), False, 'from numba import jit\n'), ((4858, 4878), 'numpy.zeros', '_np.zeros', (['numStates'], {}), '(numStates)\n', (4867, 4878), True, 'import numpy as _np\n'), ((5017, 5037), 'numpy.zeros', '_np.zeros', (['numStates'], {}), '(numStates)\n', (5026, 5037), True, 'import numpy as _np\n'), ((5234, 5254), 'numpy.abs', '_np.abs', (['statevector'], {}), '(statevector)\n', (5241, 5254), True, 'import numpy as _np\n'), ((7001, 7034), 'numpy.outer', '_np.outer', (['statesigns', 'statesigns'], {}), '(statesigns, statesigns)\n', (7010, 7034), True, 'import numpy as _np\n'), ((8704, 8762), 'numpy.minimum', '_np.minimum', (['(1.0)', '(speedLimit / (1e-08 + dotstatevector_L2))'], {}), '(1.0, speedLimit / (1e-08 + dotstatevector_L2))\n', (8715, 8762), True, 'import numpy as _np\n'), ((9097, 9117), 'numpy.abs', '_np.abs', (['statevector'], {}), '(statevector)\n', (9104, 9117), True, 'import numpy as _np\n'), ((9237, 9247), 'numpy.sum', '_np.sum', (['S'], {}), '(S)\n', (9244, 9247), True, 'import numpy as _np\n'), ((9272, 9283), 'numpy.sum', '_np.sum', (['S2'], {}), '(S2)\n', (9279, 9283), True, 'import numpy as _np\n'), ((4584, 4638), 'numpy.sum', '_np.sum', (['(scaledactivation * phaseVelocityExponentInput)'], {}), '(scaledactivation * phaseVelocityExponentInput)\n', (4591, 4638), True, 'import numpy as _np\n'), ((6023, 6043), 'numpy.zeros', '_np.zeros', (['numStates'], {}), '(numStates)\n', (6032, 6043), True, 'import numpy as _np\n'), ((6089, 6134), 'numpy.any', '_np.any', (['(triggervalue_successors >= threshold)'], {}), '(triggervalue_successors >= threshold)\n', (6096, 6134), True, 'import numpy as _np\n'), ((8425, 8457), 'numpy.dot', '_np.dot', (['(rhoDelta * T_G)', 'x_gamma'], {}), '(rhoDelta * T_G, x_gamma)\n', (8432, 8457), True, 'import numpy as _np\n'), ((8645, 8673), 'numpy.sum', '_np.sum', (['(dotstatevector ** 2)'], {}), '(dotstatevector ** 2)\n', (8652, 8673), True, 'import numpy as _np\n'), ((10093, 10104), 'numpy.sum', '_np.sum', (['S2'], {}), '(S2)\n', (10100, 10104), True, 'import numpy as _np\n'), ((16425, 16468), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (16434, 16468), True, 'import numpy as _np\n'), ((16563, 16606), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (16572, 16606), True, 'import numpy as _np\n'), ((16840, 16865), 'numpy.zeros', '_np.zeros', (['self.numStates'], {}), '(self.numStates)\n', (16849, 16865), True, 'import numpy as _np\n'), ((16904, 16947), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (16913, 16947), True, 'import numpy as _np\n'), ((17041, 17084), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (17050, 17084), True, 'import numpy as _np\n'), ((17185, 17228), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (17194, 17228), True, 'import numpy as _np\n'), ((17330, 17373), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (17339, 17373), True, 'import numpy as _np\n'), ((17486, 17529), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (17495, 17529), True, 'import numpy as _np\n'), ((17644, 17687), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (17653, 17687), True, 'import numpy as _np\n'), ((17749, 17792), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (17758, 17792), True, 'import numpy as _np\n'), ((19669, 19712), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (19678, 19712), True, 'import numpy as _np\n'), ((20531, 20596), 'numpy.sqrt', '_np.sqrt', (['(self.stateConnectivityAbs * self.stateConnectivityAbs.T)'], {}), '(self.stateConnectivityAbs * self.stateConnectivityAbs.T)\n', (20539, 20596), True, 'import numpy as _np\n'), ((21289, 21354), 'numpy.dot', '_np.dot', (['self.alpha[:, _np.newaxis]', 'self.betaInv[_np.newaxis, :]'], {}), '(self.alpha[:, _np.newaxis], self.betaInv[_np.newaxis, :])\n', (21296, 21354), True, 'import numpy as _np\n'), ((25665, 25698), 'numpy.argmax', '_np.argmax', (['self.phasesActivation'], {}), '(self.phasesActivation)\n', (25675, 25698), True, 'import numpy as _np\n'), ((28762, 28787), 'numpy.asarray', '_np.asarray', (['greedinesses'], {}), '(greedinesses)\n', (28773, 28787), True, 'import numpy as _np\n'), ((29129, 29174), 'numpy.clip', '_np.clip', (['(0.5 * greedinesses - 0.5)', '(-1.0)', '(0.0)'], {}), '(0.5 * greedinesses - 0.5, -1.0, 0.0)\n', (29137, 29174), True, 'import numpy as _np\n'), ((29817, 29957), 'warnings.warn', '_warnings.warn', (['"""Please replace updateCompetingTransitionGreediness with updateGreediness asap!"""', 'DeprecationWarning'], {'stacklevel': '(2)'}), "(\n 'Please replace updateCompetingTransitionGreediness with updateGreediness asap!'\n , DeprecationWarning, stacklevel=2)\n", (29831, 29957), True, 'import warnings as _warnings\n'), ((31742, 31768), 'numpy.asarray', '_np.asarray', (['successorBias'], {}), '(successorBias)\n', (31753, 31768), True, 'import numpy as _np\n'), ((32001, 32087), 'warnings.warn', '_warnings.warn', (['"""Please replace updateB() with updateBiases() asap!"""'], {'stacklevel': '(2)'}), "('Please replace updateB() with updateBiases() asap!',\n stacklevel=2)\n", (32015, 32087), True, 'import warnings as _warnings\n'), ((32192, 32304), 'warnings.warn', '_warnings.warn', (['"""Please replace updateTransitionTriggerInput() with updateBiases() asap!"""'], {'stacklevel': '(2)'}), "(\n 'Please replace updateTransitionTriggerInput() with updateBiases() asap!',\n stacklevel=2)\n", (32206, 32304), True, 'import warnings as _warnings\n'), ((32544, 32580), 'numpy.copyto', '_np.copyto', (['self.phasesInput', 'phases'], {}), '(self.phasesInput, phases)\n', (32554, 32580), True, 'import numpy as _np\n'), ((32776, 32822), 'numpy.copyto', '_np.copyto', (['self.velocityAdjustmentGain', 'gains'], {}), '(self.velocityAdjustmentGain, gains)\n', (32786, 32822), True, 'import numpy as _np\n'), ((33788, 33807), 'numpy.asarray', '_np.asarray', (['limits'], {}), '(limits)\n', (33799, 33807), True, 'import numpy as _np\n'), ((34650, 34665), 'numpy.isscalar', '_np.isscalar', (['p'], {}), '(p)\n', (34662, 34665), True, 'import numpy as _np\n'), ((828, 838), 'numpy.abs', '_np.abs', (['x'], {}), '(x)\n', (835, 838), True, 'import numpy as _np\n'), ((5400, 5447), 'numpy.outer', '_np.outer', (['(1 - statevector_abs)', 'statevector_abs'], {}), '(1 - statevector_abs, statevector_abs)\n', (5409, 5447), True, 'import numpy as _np\n'), ((5798, 5841), 'numpy.dot', '_np.dot', (['stateConnectivityAbs', 'predecessors'], {}), '(stateConnectivityAbs, predecessors)\n', (5805, 5841), True, 'import numpy as _np\n'), ((5881, 5924), 'numpy.dot', '_np.dot', (['stateConnectivityAbs', 'predecessors'], {}), '(stateConnectivityAbs, predecessors)\n', (5888, 5924), True, 'import numpy as _np\n'), ((6171, 6206), 'numpy.argmax', '_np.argmax', (['triggervalue_successors'], {}), '(triggervalue_successors)\n', (6181, 6206), True, 'import numpy as _np\n'), ((8397, 8422), 'numpy.dot', '_np.dot', (['rhoZero', 'x_gamma'], {}), '(rhoZero, x_gamma)\n', (8404, 8422), True, 'import numpy as _np\n'), ((10025, 10050), 'numpy.sum', '_np.sum', (['activationMatrix'], {}), '(activationMatrix)\n', (10032, 10050), True, 'import numpy as _np\n'), ((18105, 18125), 'numpy.zeros', '_np.zeros', (['numStates'], {}), '(numStates)\n', (18114, 18125), True, 'import numpy as _np\n'), ((18162, 18182), 'numpy.zeros', '_np.zeros', (['numStates'], {}), '(numStates)\n', (18171, 18182), True, 'import numpy as _np\n'), ((18319, 18362), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (18328, 18362), True, 'import numpy as _np\n'), ((18396, 18439), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (18405, 18439), True, 'import numpy as _np\n'), ((18483, 18526), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (18492, 18526), True, 'import numpy as _np\n'), ((18552, 18595), 'numpy.zeros', '_np.zeros', (['(self.numStates, self.numStates)'], {}), '((self.numStates, self.numStates))\n', (18561, 18595), True, 'import numpy as _np\n'), ((19747, 19791), 'numpy.tri', '_np.tri', (['self.numStates', 'self.numStates'], {'k': '(0)'}), '(self.numStates, self.numStates, k=0)\n', (19754, 19791), True, 'import numpy as _np\n'), ((21022, 21085), 'numpy.dot', '_np.dot', (['self.stateConnectivityAbs', 'self.stateConnectivityAbs.T'], {}), '(self.stateConnectivityAbs, self.stateConnectivityAbs.T)\n', (21029, 21085), True, 'import numpy as _np\n'), ((34692, 34717), 'numpy.empty', '_np.empty', (['self.numStates'], {}), '(self.numStates)\n', (34701, 34717), True, 'import numpy as _np\n'), ((34963, 34975), 'numpy.array', '_np.array', (['p'], {}), '(p)\n', (34972, 34975), True, 'import numpy as _np\n'), ((4539, 4564), 'numpy.sum', '_np.sum', (['activationMatrix'], {}), '(activationMatrix)\n', (4546, 4564), True, 'import numpy as _np\n'), ((19794, 19839), 'numpy.tri', '_np.tri', (['self.numStates', 'self.numStates'], {'k': '(-1)'}), '(self.numStates, self.numStates, k=-1)\n', (19801, 19839), True, 'import numpy as _np\n'), ((21091, 21114), 'numpy.eye', '_np.eye', (['self.numStates'], {}), '(self.numStates)\n', (21098, 21114), True, 'import numpy as _np\n'), ((21405, 21466), 'numpy.dot', '_np.dot', (['self.alpha[:, _np.newaxis]', 'alphaInv[_np.newaxis, :]'], {}), '(self.alpha[:, _np.newaxis], alphaInv[_np.newaxis, :])\n', (21412, 21466), True, 'import numpy as _np\n'), ((23217, 23284), 'numpy.random.normal', '_np.random.normal', ([], {'scale': 'self.epsilonPerSample', 'size': 'self.numStates'}), '(scale=self.epsilonPerSample, size=self.numStates)\n', (23234, 23284), True, 'import numpy as _np\n'), ((26776, 26822), 'numpy.sqrt', '_np.sqrt', (['(self.dt * self.reuseNoiseSampleTimes)'], {}), '(self.dt * self.reuseNoiseSampleTimes)\n', (26784, 26822), True, 'import numpy as _np\n'), ((28942, 28985), 'numpy.full', '_np.full', (['(1, self.numStates)', 'greedinesses'], {}), '((1, self.numStates), greedinesses)\n', (28950, 28985), True, 'import numpy as _np\n'), ((5734, 5754), 'numpy.abs', '_np.abs', (['statevector'], {}), '(statevector)\n', (5741, 5754), True, 'import numpy as _np\n'), ((9398, 9433), 'numpy.outer', '_np.outer', (['statevector', 'statevector'], {}), '(statevector, statevector)\n', (9407, 9433), True, 'import numpy as _np\n'), ((19023, 19076), 'numpy.empty', '_np.empty', (['(self.statehistorylen, self.numStates + 1)'], {}), '((self.statehistorylen, self.numStates + 1))\n', (19032, 19076), True, 'import numpy as _np\n'), ((19169, 19234), 'numpy.zeros', '_np.zeros', (['(self.statehistorylen, self.numStates, self.numStates)'], {}), '((self.statehistorylen, self.numStates, self.numStates))\n', (19178, 19234), True, 'import numpy as _np\n'), ((19279, 19344), 'numpy.zeros', '_np.zeros', (['(self.statehistorylen, self.numStates, self.numStates)'], {}), '((self.statehistorylen, self.numStates, self.numStates))\n', (19288, 19344), True, 'import numpy as _np\n'), ((21375, 21398), 'numpy.eye', '_np.eye', (['self.numStates'], {}), '(self.numStates)\n', (21382, 21398), True, 'import numpy as _np\n'), ((21729, 21771), 'numpy.sum', '_np.sum', (['self.stateConnectivityAbs'], {'axis': '(0)'}), '(self.stateConnectivityAbs, axis=0)\n', (21736, 21771), True, 'import numpy as _np\n')]
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from paddle.proto.ParameterConfig_pb2 import ParameterConfig
from collections import OrderedDict
import paddle.trainer.config_parser as cp
import struct
import tarfile
import cStringIO
from topology import Topology
__all__ = ['Parameters', 'create']
def create(layers):
"""
Create parameter pool by topology.
:param layers:
:return:
"""
topology = Topology(layers)
pool = Parameters()
initializers = cp.g_parameter_initializer_map
for param in topology.proto().parameters:
pool.__append_config__(param)
if param.name in initializers:
pool[param.name] = initializers[param.name](param.name)
return pool
class Parameters(object):
"""
`Parameters` manages all the learnable parameters in a neural network.
It stores parameters' information in an OrderedDict. The key is
the name of a parameter, and value is a parameter's configuration(in
protobuf format), such as initialization mean and std, its size, whether it
is a static parameter, and so on.
:param __param_conf__: store the configurations of learnable parameters in
the network in an OrderedDict. Parameter is added one by one into the
dict by following their created order in the network: parameters of
the previous layers in a network are careted first. You can visit the
parameters from bottom to top by iterating over this dict.
:type __param_conf__: OrderedDict
:param __gradient_machines__: all of the parameters in a neural network are
appended to a PaddlePaddle gradient machine, which is used internally to
copy parameter values between C++ and Python end.
:type __gradient_machines__: list
:param __tmp_params__: a dict to store dummy parameters if no
__gradient_machines__ is appended to `Parameters`.
:type __tmp_params__: dict
Basically usage is
.. code-block:: python
data = paddle.layers.data(...)
...
out = paddle.layers.fc(...)
parameters = paddle.parameters.create(out)
parameter_names = parameters.names()
fc_mat = parameters.get('fc')
print fc_mat
"""
def __init__(self):
self.__param_conf__ = OrderedDict()
self.__gradient_machines__ = []
self.__tmp_params__ = dict()
def __append_config__(self, param_conf):
"""
Append a parameter configuration. It used to initialize Parameters and
should be invoked only in paddle.parameters.create
:param param_conf: The parameter configuration in protobuf
:type param_conf: ParameterConfig
:return: Nothing
"""
if not isinstance(param_conf, ParameterConfig):
raise ValueError("param_conf must be paddle.proto.ParameterConfig")
if param_conf.name in self.__param_conf__:
raise ValueError("duplicated parameter %s" % param_conf.name)
self.__param_conf__[param_conf.name] = param_conf
def keys(self):
"""
keys are the names of each parameter.
:return: list of parameter name
:rtype: list
"""
return self.__param_conf__.keys()
def names(self):
"""
names of each parameter.
:return: list of parameter name
:rtype: list
"""
return self.keys()
def has_key(self, key):
"""
has_key return true if there are such parameter name == key
:param key: Parameter name
:type key: basestring
:return: True if contains such key
"""
return key in self.__param_conf__.keys()
def __iter__(self):
"""
Return an iterator of parameter name. It is used by `for loop`
or `in` operator.
.. code-block:: python
parameters = paddle.parameters.create(...)
if "fc_param" in parameters:
print 'OK'
:return: an iterator of parameter name
:rtype: iterator
"""
return iter(self.__param_conf__)
def __getter_inner(self, key, param_type):
import py_paddle.swig_paddle as api
shape = self.get_shape(key)
if len(self.__gradient_machines__) == 0:
# create new parameter in python numpy.
if key in self.__tmp_params__:
return self.__tmp_params__[key]
else:
return np.ndarray(shape=shape, dtype=np.float32)
else:
for each_gradient_machine in self.__gradient_machines__:
param = __get_parameter_in_gradient_machine__(
each_gradient_machine, key)
# for simplify implementation now, we always copy from C++
assert isinstance(param, api.Parameter)
val = param.getBuf(param_type)
assert isinstance(val, api.Vector)
val = val.copyToNumpyArray()
return val
# else continue
raise RuntimeError("Unexpected branch")
def __getitem__(self, key):
"""
Get parameter by parameter name. It uses Python dict syntax.
:note: It will always copy the parameter from C++ side.
:param key: Parameter name
:type key: basestring
:return: parameter value
:rtype: np.ndarray
"""
import py_paddle.swig_paddle as api
return self.__getter_inner(key, api.PARAMETER_VALUE)
def get_shape(self, key):
"""
get shape of the parameter.
:param key: parameter name
:type key: basestring
:return: parameter's shape
:rtype: tuple
"""
if not isinstance(key, basestring):
raise ValueError("parameter name should be string")
if not self.has_key(key):
raise ValueError("No such parameter %s" % key)
conf = self.__param_conf__[key]
dims = conf.dims if conf.dims else (1, conf.size)
return tuple(map(int, dims))
def __setitem__(self, key, value):
"""
Set parameter by parameter name & value. It use Python dict syntax.
:note: It will always copy the parameter to C++ side.
:param key: Parameter name
:type key: basestring
:param value: Parameter matrix.
:type value: np.ndarray
:return: Nothing
"""
if not isinstance(value, np.ndarray):
raise ValueError("Must return ndarray")
value = value.astype(dtype=np.float32)
shape = self.get_shape(key)
if value.shape != shape:
raise ValueError("Value shape mismatch, expect %s, should %s" %
(shape, value.shape))
if len(self.__gradient_machines__) == 0:
self.__tmp_params__[key] = value
else:
for each_gradient_machine in self.__gradient_machines__:
__copy_parameter_to_gradient_machine__(each_gradient_machine,
key, value)
def get(self, parameter_name):
"""
Get parameter by parameter name.
:note: It will always copy the parameter from C++ side.
:param parameter_name: parameter name
:type parameter_name: basestring
:return: The parameter matrix.
:rtype: np.ndarray
"""
return self.__getitem__(key=parameter_name)
def get_grad(self, key):
"""
Get grandient by parameter name.
:note: It will always copy the parameter from C++ side.
:param key: parameter name
:type key: basestring
:return: The grandient matrix.
:rtype: np.ndarray
"""
import py_paddle.swig_paddle as api
if self.__param_conf__[key].is_static:
return np.zeros(self.__param_conf__[key].size, dtype=np.float32)
return self.__getter_inner(key, api.PARAMETER_GRADIENT)
def set(self, parameter_name, value):
"""
Set parameter by parameter name & matrix.
:param parameter_name: parameter name
:type parameter_name: basestring
:param value: parameter matrix
:type value: np.ndarray
:return: Nothing.
"""
self.__setitem__(key=parameter_name, value=value)
def append_gradient_machine(self, gradient_machine):
"""
append gradient machine to parameters. This method is used internally in
Trainer.train.
:param gradient_machine: PaddlePaddle C++ GradientMachine object.
:type gradient_machine: api.GradientMachine
:return:
"""
import py_paddle.swig_paddle as api
if not isinstance(gradient_machine, api.GradientMachine):
raise ValueError("gradient_machine should be api.GradientMachine")
if len(self.__tmp_params__) != 0:
for name, val in self.__tmp_params__.iteritems():
try:
__copy_parameter_to_gradient_machine__(gradient_machine,
name, val)
except ValueError:
# If no such parameter in gradient machine, then don't copy
pass
self.__gradient_machines__.append(gradient_machine)
def serialize(self, name, f):
"""
:param name:
:param f:
:type f: file
:return:
"""
param = self.get(name)
size = reduce(lambda a, b: a * b, param.shape)
f.write(struct.pack("IIQ", 0, 4, size))
param = param.astype(np.float32)
s = param.tostring()
wrote_size = 0
buf = buffer(s, wrote_size, 65535)
while buf: # f.write crashes with big data blog.
f.write(buf)
wrote_size += 65535
buf = buffer(s, wrote_size, 65535)
def deserialize(self, name, f):
"""
:param name:
:param f:
:type f: file
:return:
"""
f.read(16) # header
arr = np.frombuffer(f.read(), dtype=np.float32)
self.set(name, arr.reshape(self.get_shape(name)))
def to_tar(self, f):
tar = tarfile.TarFile(fileobj=f, mode='w')
for nm in self.names():
buf = cStringIO.StringIO()
self.serialize(nm, buf)
tarinfo = tarfile.TarInfo(name=nm)
buf.seek(0)
tarinfo.size = len(buf.getvalue())
tar.addfile(tarinfo, buf)
conf = self.__param_conf__[nm]
confStr = conf.SerializeToString()
tarinfo = tarfile.TarInfo(name="%s.protobuf" % nm)
tarinfo.size = len(confStr)
buf = cStringIO.StringIO(confStr)
buf.seek(0)
tar.addfile(tarinfo, fileobj=buf)
@staticmethod
def from_tar(f):
"""
Create a `Parameters` object from the given file. And
the `Parameters` only contains the parameters in this
file. It is adapted the parameters are same in the
defined network and the given file. For example, it
can be used in the inference.
:param f: the initialized model file.
:type f: tar file
:return: A Parameters object.
:rtype: Parameters.
"""
params = Parameters()
tar = tarfile.TarFile(fileobj=f, mode='r')
for finfo in tar:
assert isinstance(finfo, tarfile.TarInfo)
if finfo.name.endswith('.protobuf'):
f = tar.extractfile(finfo)
conf = ParameterConfig()
conf.ParseFromString(f.read())
params.__append_config__(conf)
for param_name in params.names():
f = tar.extractfile(param_name)
params.deserialize(param_name, f)
return params
def init_from_tar(self, f):
"""
Different from `from_tar`, this interface can be used to
init partial network parameters from another saved model.
:param f: the initialized model file.
:type f: tar file
:return: Nothing.
"""
tar_param = Parameters.from_tar(f)
for pname in tar_param.names():
if pname in self.names():
self.set(pname, tar_param.get(pname))
def __get_parameter_in_gradient_machine__(gradient_machine, name):
"""
:param gradient_machine:
:type gradient_machine: api.GradientMachine
:param name:
:return:
:rtype: api.Parameter
"""
params = filter(lambda p: p.getName() == name,
gradient_machine.getParameters())
if len(params) == 0:
raise ValueError("No such parameter")
elif len(params) > 1:
raise ValueError("Unexpected branch")
else:
return params[0]
def __copy_parameter_to_gradient_machine__(gradient_machine, name, arr):
"""
Copy a python ndarray into the gradient machine.
:param gradient_machine:
:type gradient_machine: api.GradientMachine
:param name:
:param arr:
:type arr: np.ndarray
:return:
:rtype: api.Parameter
"""
import py_paddle.swig_paddle as api
param = __get_parameter_in_gradient_machine__(gradient_machine, name)
vec = param.getBuf(api.PARAMETER_VALUE)
assert isinstance(vec, api.Vector)
vec.copyFromNumpyArray(arr.flatten())
|
[
"topology.Topology",
"tarfile.TarFile",
"paddle.proto.ParameterConfig_pb2.ParameterConfig",
"numpy.zeros",
"tarfile.TarInfo",
"struct.pack",
"collections.OrderedDict",
"cStringIO.StringIO",
"numpy.ndarray"
] |
[((1005, 1021), 'topology.Topology', 'Topology', (['layers'], {}), '(layers)\n', (1013, 1021), False, 'from topology import Topology\n'), ((2864, 2877), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2875, 2877), False, 'from collections import OrderedDict\n'), ((10804, 10840), 'tarfile.TarFile', 'tarfile.TarFile', ([], {'fileobj': 'f', 'mode': '"""w"""'}), "(fileobj=f, mode='w')\n", (10819, 10840), False, 'import tarfile\n'), ((11942, 11978), 'tarfile.TarFile', 'tarfile.TarFile', ([], {'fileobj': 'f', 'mode': '"""r"""'}), "(fileobj=f, mode='r')\n", (11957, 11978), False, 'import tarfile\n'), ((8435, 8492), 'numpy.zeros', 'np.zeros', (['self.__param_conf__[key].size'], {'dtype': 'np.float32'}), '(self.__param_conf__[key].size, dtype=np.float32)\n', (8443, 8492), True, 'import numpy as np\n'), ((10151, 10181), 'struct.pack', 'struct.pack', (['"""IIQ"""', '(0)', '(4)', 'size'], {}), "('IIQ', 0, 4, size)\n", (10162, 10181), False, 'import struct\n'), ((10891, 10911), 'cStringIO.StringIO', 'cStringIO.StringIO', ([], {}), '()\n', (10909, 10911), False, 'import cStringIO\n'), ((10970, 10994), 'tarfile.TarInfo', 'tarfile.TarInfo', ([], {'name': 'nm'}), '(name=nm)\n', (10985, 10994), False, 'import tarfile\n'), ((11217, 11257), 'tarfile.TarInfo', 'tarfile.TarInfo', ([], {'name': "('%s.protobuf' % nm)"}), "(name='%s.protobuf' % nm)\n", (11232, 11257), False, 'import tarfile\n'), ((11316, 11343), 'cStringIO.StringIO', 'cStringIO.StringIO', (['confStr'], {}), '(confStr)\n', (11334, 11343), False, 'import cStringIO\n'), ((5040, 5081), 'numpy.ndarray', 'np.ndarray', ([], {'shape': 'shape', 'dtype': 'np.float32'}), '(shape=shape, dtype=np.float32)\n', (5050, 5081), True, 'import numpy as np\n'), ((12174, 12191), 'paddle.proto.ParameterConfig_pb2.ParameterConfig', 'ParameterConfig', ([], {}), '()\n', (12189, 12191), False, 'from paddle.proto.ParameterConfig_pb2 import ParameterConfig\n')]
|
import numpy as np
import imageio
import os
AVAILABLE_IMAGES = ['barbara']
def _add_noise(img, sigma):
noise = np.random.normal(scale=sigma,
size=img.shape).astype(img.dtype)
return img + noise
def example_image(img_name, noise_std=0):
imgf = os.path.join('sparselandtools', 'applications', 'assets', img_name + '.png')
# read image
try:
img = imageio.imread(imgf)[:, :, 0].astype('float32')
except IndexError:
img = imageio.imread(imgf).astype('float32')
# add noise
img = _add_noise(img, sigma=noise_std)
return img
|
[
"imageio.imread",
"os.path.join",
"numpy.random.normal"
] |
[((290, 366), 'os.path.join', 'os.path.join', (['"""sparselandtools"""', '"""applications"""', '"""assets"""', "(img_name + '.png')"], {}), "('sparselandtools', 'applications', 'assets', img_name + '.png')\n", (302, 366), False, 'import os\n'), ((118, 163), 'numpy.random.normal', 'np.random.normal', ([], {'scale': 'sigma', 'size': 'img.shape'}), '(scale=sigma, size=img.shape)\n', (134, 163), True, 'import numpy as np\n'), ((408, 428), 'imageio.imread', 'imageio.imread', (['imgf'], {}), '(imgf)\n', (422, 428), False, 'import imageio\n'), ((493, 513), 'imageio.imread', 'imageio.imread', (['imgf'], {}), '(imgf)\n', (507, 513), False, 'import imageio\n')]
|
import os
import sys
import argparse
import numpy as np
import theano.tensor as T
homepath = os.path.join('..', '..')
if not homepath in sys.path:
sys.path.insert(0, homepath)
from dlearn.models.layer import FullConnLayer, ConvPoolLayer
from dlearn.models.nnet import NeuralNet
from dlearn.utils import actfuncs, costfuncs
from dlearn.utils.serialize import load_data, save_data
from dlearn.optimization import sgd
# Program arguments parser
desctxt = """
Train latent network. Use learned attribute and segmentation network.
"""
dataset_txt = """
The input dataset data_name.pkl.
"""
attr_txt = """
The attribute network model_name.pkl.
"""
seg_txt = """
The segmentation network model_name.pkl.
"""
output_txt = """
If not specified, the output model will be saved as model_latent.pkl.
Otherwise it will be saved as model_latent_name.pkl.
"""
parser = argparse.ArgumentParser(description=desctxt)
parser.add_argument('-d', '--dataset', nargs=1, required=True,
metavar='name', help=dataset_txt)
parser.add_argument('-a', '--attribute', nargs=1, required=True,
metavar='name', help=attr_txt)
parser.add_argument('-s', '--segmentation', nargs=1, required=True,
metavar='name', help=seg_txt)
parser.add_argument('-o', '--output', nargs='?', default=None,
metavar='name', help=output_txt)
args = parser.parse_args()
def train_model(dataset, attr_model, seg_model):
def shape_constrained_pooling(fmaps):
s = fmaps.sum(axis=[2, 3])
Z = abs(actfuncs.tanh(fmaps)).sum(axis=[2, 3])
return s / Z
X = T.tensor4()
A = T.matrix()
feature_layers = []
feature_layers.append(ConvPoolLayer(
input=X,
input_shape=(3, 160, 80),
filter_shape=(32, 3, 5, 5),
pool_shape=(2, 2),
active_func=actfuncs.tanh,
flatten=False,
W=attr_model.blocks[0]._W,
b=0.0
))
feature_layers.append(ConvPoolLayer(
input=feature_layers[-1].output,
input_shape=feature_layers[-1].output_shape,
filter_shape=(64, 32, 5, 5),
pool_shape=(2, 2),
active_func=actfuncs.tanh,
flatten=False,
W=attr_model.blocks[1]._W,
b=0.0
))
seg_layers = []
seg_layers.append(FullConnLayer(
input=feature_layers[-1].output.flatten(2),
input_shape=np.prod(feature_layers[-1].output_shape),
output_shape=1024,
dropout_ratio=0.1,
active_func=actfuncs.tanh,
W=seg_model.blocks[2]._W,
b=seg_model.blocks[2]._b
))
seg_layers.append(FullConnLayer(
input=seg_layers[-1].output,
input_shape=seg_layers[-1].output_shape,
output_shape=37 * 17,
dropout_input=seg_layers[-1].dropout_output,
active_func=actfuncs.sigmoid,
W=seg_model.blocks[3]._W,
b=seg_model.blocks[3]._b
))
S = seg_layers[-1].output
S = S * (S >= 0.1)
S = S.reshape((S.shape[0], 37, 17))
S = S.dimshuffle(0, 'x', 1, 2)
S_dropout = seg_layers[-1].dropout_output
S_dropout = S_dropout * (S_dropout >= 0.1)
S_dropout = S_dropout.reshape((S_dropout.shape[0], 37, 17))
S_dropout = S_dropout.dimshuffle(0, 'x', 1, 2)
attr_layers = []
'''
attr_layers.append(ConvPoolLayer(
input=feature_layers[-1].output * S,
input_shape=feature_layers[-1].output_shape,
filter_shape=(128, 64, 3, 3),
pool_shape=(2, 2),
dropout_input=feature_layers[-1].output * S_dropout,
active_func=actfuncs.tanh,
flatten=False,
W=attr_model.blocks[2]._W,
b=0.0
))
'''
attr_layers.append(FullConnLayer(
input=shape_constrained_pooling(feature_layers[-1].output * S),
input_shape=feature_layers[-1].output_shape,
output_shape=64,
dropout_input=shape_constrained_pooling(
feature_layers[-1].dropout_output * S_dropout),
dropout_ratio=0.1,
active_func=actfuncs.tanh,
W=attr_model.blocks[2]._W,
b=attr_model.blocks[2]._b
))
attr_layers.append(FullConnLayer(
input=attr_layers[-1].output,
input_shape=attr_layers[-1].output_shape,
output_shape=11,
dropout_input=attr_layers[-1].dropout_output,
active_func=actfuncs.sigmoid,
W=attr_model.blocks[3]._W,
b=attr_model.blocks[3]._b
))
model = NeuralNet(feature_layers + seg_layers + attr_layers,
X, attr_layers[-1].output)
model.target = A
model.cost = costfuncs.binxent(attr_layers[-1].dropout_output, A) + \
1e-3 * model.get_norm(2)
model.error = costfuncs.binerr(attr_layers[-1].output, A)
sgd.train(model, dataset, lr=1e-3, momentum=0.9,
batch_size=100, n_epochs=300,
epoch_waiting=10)
return model
if __name__ == '__main__':
dataset_file = 'data_{0}.pkl'.format(args.dataset[0])
attr_file = 'model_{0}.pkl'.format(args.attribute[0])
seg_file = 'model_{0}.pkl'.format(args.segmentation[0])
out_file = 'model_latent.pkl' if args.output is None else \
'model_latent_{0}.pkl'.format(args.output)
dataset = load_data(dataset_file)
attr_model = load_data(attr_file)
seg_model = load_data(seg_file)
model = train_model(dataset, attr_model, seg_model)
save_data(model, out_file)
|
[
"argparse.ArgumentParser",
"theano.tensor.tensor4",
"dlearn.utils.serialize.load_data",
"dlearn.optimization.sgd.train",
"dlearn.utils.costfuncs.binxent",
"sys.path.insert",
"dlearn.utils.costfuncs.binerr",
"dlearn.models.layer.ConvPoolLayer",
"numpy.prod",
"dlearn.utils.actfuncs.tanh",
"dlearn.utils.serialize.save_data",
"dlearn.models.nnet.NeuralNet",
"os.path.join",
"dlearn.models.layer.FullConnLayer",
"theano.tensor.matrix"
] |
[((94, 118), 'os.path.join', 'os.path.join', (['""".."""', '""".."""'], {}), "('..', '..')\n", (106, 118), False, 'import os\n'), ((868, 912), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desctxt'}), '(description=desctxt)\n', (891, 912), False, 'import argparse\n'), ((153, 181), 'sys.path.insert', 'sys.path.insert', (['(0)', 'homepath'], {}), '(0, homepath)\n', (168, 181), False, 'import sys\n'), ((1622, 1633), 'theano.tensor.tensor4', 'T.tensor4', ([], {}), '()\n', (1631, 1633), True, 'import theano.tensor as T\n'), ((1642, 1652), 'theano.tensor.matrix', 'T.matrix', ([], {}), '()\n', (1650, 1652), True, 'import theano.tensor as T\n'), ((4436, 4515), 'dlearn.models.nnet.NeuralNet', 'NeuralNet', (['(feature_layers + seg_layers + attr_layers)', 'X', 'attr_layers[-1].output'], {}), '(feature_layers + seg_layers + attr_layers, X, attr_layers[-1].output)\n', (4445, 4515), False, 'from dlearn.models.nnet import NeuralNet\n'), ((4685, 4728), 'dlearn.utils.costfuncs.binerr', 'costfuncs.binerr', (['attr_layers[-1].output', 'A'], {}), '(attr_layers[-1].output, A)\n', (4701, 4728), False, 'from dlearn.utils import actfuncs, costfuncs\n'), ((4734, 4836), 'dlearn.optimization.sgd.train', 'sgd.train', (['model', 'dataset'], {'lr': '(0.001)', 'momentum': '(0.9)', 'batch_size': '(100)', 'n_epochs': '(300)', 'epoch_waiting': '(10)'}), '(model, dataset, lr=0.001, momentum=0.9, batch_size=100, n_epochs=\n 300, epoch_waiting=10)\n', (4743, 4836), False, 'from dlearn.optimization import sgd\n'), ((5219, 5242), 'dlearn.utils.serialize.load_data', 'load_data', (['dataset_file'], {}), '(dataset_file)\n', (5228, 5242), False, 'from dlearn.utils.serialize import load_data, save_data\n'), ((5260, 5280), 'dlearn.utils.serialize.load_data', 'load_data', (['attr_file'], {}), '(attr_file)\n', (5269, 5280), False, 'from dlearn.utils.serialize import load_data, save_data\n'), ((5297, 5316), 'dlearn.utils.serialize.load_data', 'load_data', (['seg_file'], {}), '(seg_file)\n', (5306, 5316), False, 'from dlearn.utils.serialize import load_data, save_data\n'), ((5379, 5405), 'dlearn.utils.serialize.save_data', 'save_data', (['model', 'out_file'], {}), '(model, out_file)\n', (5388, 5405), False, 'from dlearn.utils.serialize import load_data, save_data\n'), ((1704, 1884), 'dlearn.models.layer.ConvPoolLayer', 'ConvPoolLayer', ([], {'input': 'X', 'input_shape': '(3, 160, 80)', 'filter_shape': '(32, 3, 5, 5)', 'pool_shape': '(2, 2)', 'active_func': 'actfuncs.tanh', 'flatten': '(False)', 'W': 'attr_model.blocks[0]._W', 'b': '(0.0)'}), '(input=X, input_shape=(3, 160, 80), filter_shape=(32, 3, 5, 5),\n pool_shape=(2, 2), active_func=actfuncs.tanh, flatten=False, W=\n attr_model.blocks[0]._W, b=0.0)\n', (1717, 1884), False, 'from dlearn.models.layer import FullConnLayer, ConvPoolLayer\n'), ((1974, 2198), 'dlearn.models.layer.ConvPoolLayer', 'ConvPoolLayer', ([], {'input': 'feature_layers[-1].output', 'input_shape': 'feature_layers[-1].output_shape', 'filter_shape': '(64, 32, 5, 5)', 'pool_shape': '(2, 2)', 'active_func': 'actfuncs.tanh', 'flatten': '(False)', 'W': 'attr_model.blocks[1]._W', 'b': '(0.0)'}), '(input=feature_layers[-1].output, input_shape=feature_layers[-\n 1].output_shape, filter_shape=(64, 32, 5, 5), pool_shape=(2, 2),\n active_func=actfuncs.tanh, flatten=False, W=attr_model.blocks[1]._W, b=0.0)\n', (1987, 2198), False, 'from dlearn.models.layer import FullConnLayer, ConvPoolLayer\n'), ((2619, 2865), 'dlearn.models.layer.FullConnLayer', 'FullConnLayer', ([], {'input': 'seg_layers[-1].output', 'input_shape': 'seg_layers[-1].output_shape', 'output_shape': '(37 * 17)', 'dropout_input': 'seg_layers[-1].dropout_output', 'active_func': 'actfuncs.sigmoid', 'W': 'seg_model.blocks[3]._W', 'b': 'seg_model.blocks[3]._b'}), '(input=seg_layers[-1].output, input_shape=seg_layers[-1].\n output_shape, output_shape=37 * 17, dropout_input=seg_layers[-1].\n dropout_output, active_func=actfuncs.sigmoid, W=seg_model.blocks[3]._W,\n b=seg_model.blocks[3]._b)\n', (2632, 2865), False, 'from dlearn.models.layer import FullConnLayer, ConvPoolLayer\n'), ((4127, 4373), 'dlearn.models.layer.FullConnLayer', 'FullConnLayer', ([], {'input': 'attr_layers[-1].output', 'input_shape': 'attr_layers[-1].output_shape', 'output_shape': '(11)', 'dropout_input': 'attr_layers[-1].dropout_output', 'active_func': 'actfuncs.sigmoid', 'W': 'attr_model.blocks[3]._W', 'b': 'attr_model.blocks[3]._b'}), '(input=attr_layers[-1].output, input_shape=attr_layers[-1].\n output_shape, output_shape=11, dropout_input=attr_layers[-1].\n dropout_output, active_func=actfuncs.sigmoid, W=attr_model.blocks[3]._W,\n b=attr_model.blocks[3]._b)\n', (4140, 4373), False, 'from dlearn.models.layer import FullConnLayer, ConvPoolLayer\n'), ((4577, 4629), 'dlearn.utils.costfuncs.binxent', 'costfuncs.binxent', (['attr_layers[-1].dropout_output', 'A'], {}), '(attr_layers[-1].dropout_output, A)\n', (4594, 4629), False, 'from dlearn.utils import actfuncs, costfuncs\n'), ((2391, 2431), 'numpy.prod', 'np.prod', (['feature_layers[-1].output_shape'], {}), '(feature_layers[-1].output_shape)\n', (2398, 2431), True, 'import numpy as np\n'), ((1553, 1573), 'dlearn.utils.actfuncs.tanh', 'actfuncs.tanh', (['fmaps'], {}), '(fmaps)\n', (1566, 1573), False, 'from dlearn.utils import actfuncs, costfuncs\n')]
|
import numpy as np
from common import numerical_gradient, softmax, cross_entropy_error
from collections import OrderedDict
class Relu:
def __init__(self):
self.mask = None
def forward(self, x):
self.mask = (x <= 0)
out = x.copy()
out[self.mask] = 0
return out
def backward(self, dout):
dout[self.mask] = 0
dx = dout
return dx
class Affine:
def __init__(self, W, b):
self.W = W
self.b = b
self.x = None
self.dW = None
self.db = None
def forward(self, x):
self.x = x
return np.dot(x, self.W) + self.b
def backward(self, dout):
dx = np.dot(dout, self.W.T)
self.dW = np.dot(self.x.T, dout)
self.db = np.sum(dout, axis=0)
return dx
class SoftmaxWithLoss:
def __init__(self):
self.loss = None
self.y = None
self.x = None
def forward(self, x, t):
self.t = t
self.y = softmax(x)
self.loss = cross_entropy_error(self.y, self.t)
return self.loss
def backward(self, dout=1):
batch_size = self.t.shape[0]
dx = (self.y - self.t) / batch_size
return dx
class TowLayerNet:
def __init__(self, input_size, hidden_size, output_size,
weight_init_std=0.01):
self.params = {}
self.params['W1'] = weight_init_std * \
np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = weight_init_std * \
np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
self.layers = OrderedDict()
self.layers['Affine1'] = \
Affine(self.params['W1'], self.params['b1'])
self.layers['Relu1'] = Relu()
self.layers['Affine2'] = \
Affine(self.params['W2'], self.params['b2'])
self.lastLayer = SoftmaxWithLoss()
def predict(self, x):
for layer in self.layers.values():
x = layer.forward(x)
return x
def loss(self, x, t):
y = self.predict(x)
return self.lastLayer.forward(y, t)
def accuracy(self, x, t):
y = self.predict(x)
y = np.argmax(y, axis=1)
if t.ndim != 1:
t = np.argmax(t, axis=1)
accuracy = np.sum(y == t) / float(x.shape[0])
return accuracy
def numerical_gradient(self, x, t):
loss_W = lambda W: self.loss(x, t)
grads = {}
grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
grads['b2'] = numerical_gradient(loss_W, self.params['b2'])
return grads
def gradient(self, x, t):
# 순전파
self.loss(x, t)
dout = 1
dout = self.lastLayer.backward(dout)
layers = list(self.layers.values())
layers.reverse()
for layer in layers:
dout = layer.backward(dout)
grads = {}
grads['W1'] = self.layers['Affine1'].dW
grads['b1'] = self.layers['Affine1'].db
grads['W2'] = self.layers['Affine2'].dW
grads['b2'] = self.layers['Affine2'].db
return grads
# ========== ========== #
# 오차역전파법을 통해 구한 기울기 검증하기
from dataset.mnist import load_mnist
(x_train, t_train), (x_test, t_test) = \
load_mnist(normalize=True, one_hot_label=True)
network = TowLayerNet(input_size=784, hidden_size=50, output_size=10)
x_batch = x_train[:3]
t_batch = t_train[:3]
grad_numerical = network.numerical_gradient(x_batch, t_batch)
grad_backprop = network.gradient(x_batch, t_batch)
for k in grad_numerical.keys():
diff = np.average(np.abs(grad_backprop[k] - grad_numerical[k]))
print(k + ' : ' + str(diff))
# ========== ========== #
# 학습 구현
iters_num = 10000
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.1
train_loss_list = []
train_acc_list = []
test_acc_list = []
iter_per_epoch = max(train_size / batch_size, 1)
print('#========== ==========#')
print('iters_num: %s' % str(iters_num))
print('train_size: %s' % str(train_size))
print('batch_size: %s' % str(batch_size))
print('learning_rate: %s' % str(learning_rate))
print('iter_per_epoch: %s' % str(iter_per_epoch))
print('#========== ==========#')
for i in range(iters_num):
batch_mask = np.random.choice(train_size, batch_size)
# print(batch_mask)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
# 오차역전파법으로 기울기 구하기
grad = network.gradient(x_batch, t_batch)
# 갱신
for key in ('W1', 'b1', 'W2', 'b2'):
network.params[key] -= learning_rate * grad[key]
loss = network.loss(x_batch, t_batch)
train_loss_list.append(loss)
if i % iter_per_epoch == 0:
train_acc = network.accuracy(x_train, t_train)
test_acc = network.accuracy(x_test, t_test)
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
print(train_acc, test_acc)
|
[
"numpy.sum",
"numpy.abs",
"numpy.argmax",
"common.numerical_gradient",
"numpy.random.randn",
"common.softmax",
"numpy.zeros",
"dataset.mnist.load_mnist",
"numpy.random.choice",
"collections.OrderedDict",
"numpy.dot",
"common.cross_entropy_error"
] |
[((3510, 3556), 'dataset.mnist.load_mnist', 'load_mnist', ([], {'normalize': '(True)', 'one_hot_label': '(True)'}), '(normalize=True, one_hot_label=True)\n', (3520, 3556), False, 'from dataset.mnist import load_mnist\n'), ((4487, 4527), 'numpy.random.choice', 'np.random.choice', (['train_size', 'batch_size'], {}), '(train_size, batch_size)\n', (4503, 4527), True, 'import numpy as np\n'), ((697, 719), 'numpy.dot', 'np.dot', (['dout', 'self.W.T'], {}), '(dout, self.W.T)\n', (703, 719), True, 'import numpy as np\n'), ((738, 760), 'numpy.dot', 'np.dot', (['self.x.T', 'dout'], {}), '(self.x.T, dout)\n', (744, 760), True, 'import numpy as np\n'), ((779, 799), 'numpy.sum', 'np.sum', (['dout'], {'axis': '(0)'}), '(dout, axis=0)\n', (785, 799), True, 'import numpy as np\n'), ((1003, 1013), 'common.softmax', 'softmax', (['x'], {}), '(x)\n', (1010, 1013), False, 'from common import numerical_gradient, softmax, cross_entropy_error\n'), ((1034, 1069), 'common.cross_entropy_error', 'cross_entropy_error', (['self.y', 'self.t'], {}), '(self.y, self.t)\n', (1053, 1069), False, 'from common import numerical_gradient, softmax, cross_entropy_error\n'), ((1519, 1540), 'numpy.zeros', 'np.zeros', (['hidden_size'], {}), '(hidden_size)\n', (1527, 1540), True, 'import numpy as np\n'), ((1687, 1708), 'numpy.zeros', 'np.zeros', (['output_size'], {}), '(output_size)\n', (1695, 1708), True, 'import numpy as np\n'), ((1732, 1745), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1743, 1745), False, 'from collections import OrderedDict\n'), ((2302, 2322), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (2311, 2322), True, 'import numpy as np\n'), ((2589, 2634), 'common.numerical_gradient', 'numerical_gradient', (['loss_W', "self.params['W1']"], {}), "(loss_W, self.params['W1'])\n", (2607, 2634), False, 'from common import numerical_gradient, softmax, cross_entropy_error\n'), ((2657, 2702), 'common.numerical_gradient', 'numerical_gradient', (['loss_W', "self.params['b1']"], {}), "(loss_W, self.params['b1'])\n", (2675, 2702), False, 'from common import numerical_gradient, softmax, cross_entropy_error\n'), ((2725, 2770), 'common.numerical_gradient', 'numerical_gradient', (['loss_W', "self.params['W2']"], {}), "(loss_W, self.params['W2'])\n", (2743, 2770), False, 'from common import numerical_gradient, softmax, cross_entropy_error\n'), ((2793, 2838), 'common.numerical_gradient', 'numerical_gradient', (['loss_W', "self.params['b2']"], {}), "(loss_W, self.params['b2'])\n", (2811, 2838), False, 'from common import numerical_gradient, softmax, cross_entropy_error\n'), ((3841, 3885), 'numpy.abs', 'np.abs', (['(grad_backprop[k] - grad_numerical[k])'], {}), '(grad_backprop[k] - grad_numerical[k])\n', (3847, 3885), True, 'import numpy as np\n'), ((626, 643), 'numpy.dot', 'np.dot', (['x', 'self.W'], {}), '(x, self.W)\n', (632, 643), True, 'import numpy as np\n'), ((1450, 1490), 'numpy.random.randn', 'np.random.randn', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (1465, 1490), True, 'import numpy as np\n'), ((1617, 1658), 'numpy.random.randn', 'np.random.randn', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (1632, 1658), True, 'import numpy as np\n'), ((2363, 2383), 'numpy.argmax', 'np.argmax', (['t'], {'axis': '(1)'}), '(t, axis=1)\n', (2372, 2383), True, 'import numpy as np\n'), ((2404, 2418), 'numpy.sum', 'np.sum', (['(y == t)'], {}), '(y == t)\n', (2410, 2418), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import numpy as np
from scipy.stats import norm
def bachelier(So, K, sigma, T, option_type):
'''
Calculate European option price using Bachelier model:
dSt = sigma * S0 * dWt
St = S0*(1 + sigma*Wt)
Parameter
---------
So: float
price of underlying asset at time 0
K: float
strike price of option
sigma: float
variance of Brownian motion
T: float
length of time
option_type: str
type of European option.
Including: van call/put (vanilla), con call/put (cash-or-nothing), aon call/put (asset-or-nothing)
Return
------
val: value of the option at time 0
'''
xs = (K-So) / (So * sigma * np.sqrt(T))
val = None
if So == K:
return sigma*So*np.sqrt(T/(2*np.pi))
if option_type == 'van call':
val = (So - K) * norm.cdf(-xs) + So*sigma*np.sqrt(T)*norm.pdf(-xs)
elif option_type == 'van put':
val = (K - So) * norm.cdf(xs) + So*sigma*np.sqrt(T)*norm.pdf(xs)
elif option_type == 'con call':
val = norm.cdf(-xs)
elif option_type == 'con put':
val = norm.cdf(xs)
elif option_type == 'aon call':
val = So*norm.cdf(-xs) + So*sigma*np.sqrt(T)*norm.pdf(-xs)
elif option_type == 'aon put':
val = So*norm.cdf(xs) - So*sigma*np.sqrt(T)*norm.pdf(xs)
else:
raise(ValueError("Option type is invalid. " +
"Should be either 'van call', 'van put', 'con call', 'con put', 'aon call', or 'aon put'"))
return val
def black_scholes(So, K, r, sigma, T, option_type):
'''
Calculate European option price using Black-Scholes (1973) model:
dSt = r*dSt + sigma*St*dWt
St = S0*exp{(r-sigma^2/2)t + sigma*Wt}
Parameter
---------
So: float
price of underlying asset at time 0
K: float
strike price of option
r: float
drift of St
sigma: float
variance of Brownian motion
T: float
length of time
option_type: str
type of European option.
Including: van call/put (vanilla), con call/put (cash-or-nothing), aon call/put (asset-or-nothing)
Return
------
val: value of the option at time 0
'''
d1 = (np.log(So/K) + (r+sigma**2/2)*T) / (sigma*np.sqrt(T))
d2 = (np.log(So/K) + (r-sigma**2/2)*T) / (sigma*np.sqrt(T))
val = None
if So == K:
return sigma*So*np.sqrt(T/(2*np.pi))
if option_type == 'van call':
val = So*norm.cdf(d1) - K*np.e**(-r*T)*norm.cdf(d2)
elif option_type == 'van put':
val = -So*norm.cdf(-d1) + K*np.e**(-r*T)*norm.cdf(-d2)
elif option_type == 'con call':
val = np.e**(-r*T) * norm.cdf(d2)
elif option_type == 'con put':
val = np.e**(-r*T) * norm.cdf(-d2)
elif option_type == 'aon call':
val = So*norm.cdf(d1)
elif option_type == 'aon put':
val = So*norm.cdf(-d1)
else:
raise(ValueError("Option type is invalid. " +
"Should be either 'van call', 'van put', 'con call', 'con put', 'aon call', or 'aon put'"))
return val
|
[
"scipy.stats.norm.cdf",
"scipy.stats.norm.pdf",
"numpy.log",
"numpy.sqrt"
] |
[((748, 758), 'numpy.sqrt', 'np.sqrt', (['T'], {}), '(T)\n', (755, 758), True, 'import numpy as np\n'), ((820, 844), 'numpy.sqrt', 'np.sqrt', (['(T / (2 * np.pi))'], {}), '(T / (2 * np.pi))\n', (827, 844), True, 'import numpy as np\n'), ((2341, 2355), 'numpy.log', 'np.log', (['(So / K)'], {}), '(So / K)\n', (2347, 2355), True, 'import numpy as np\n'), ((2383, 2393), 'numpy.sqrt', 'np.sqrt', (['T'], {}), '(T)\n', (2390, 2393), True, 'import numpy as np\n'), ((2405, 2419), 'numpy.log', 'np.log', (['(So / K)'], {}), '(So / K)\n', (2411, 2419), True, 'import numpy as np\n'), ((2447, 2457), 'numpy.sqrt', 'np.sqrt', (['T'], {}), '(T)\n', (2454, 2457), True, 'import numpy as np\n'), ((2519, 2543), 'numpy.sqrt', 'np.sqrt', (['(T / (2 * np.pi))'], {}), '(T / (2 * np.pi))\n', (2526, 2543), True, 'import numpy as np\n'), ((905, 918), 'scipy.stats.norm.cdf', 'norm.cdf', (['(-xs)'], {}), '(-xs)\n', (913, 918), False, 'from scipy.stats import norm\n'), ((941, 954), 'scipy.stats.norm.pdf', 'norm.pdf', (['(-xs)'], {}), '(-xs)\n', (949, 954), False, 'from scipy.stats import norm\n'), ((1113, 1126), 'scipy.stats.norm.cdf', 'norm.cdf', (['(-xs)'], {}), '(-xs)\n', (1121, 1126), False, 'from scipy.stats import norm\n'), ((2596, 2608), 'scipy.stats.norm.cdf', 'norm.cdf', (['d1'], {}), '(d1)\n', (2604, 2608), False, 'from scipy.stats import norm\n'), ((2626, 2638), 'scipy.stats.norm.cdf', 'norm.cdf', (['d2'], {}), '(d2)\n', (2634, 2638), False, 'from scipy.stats import norm\n'), ((930, 940), 'numpy.sqrt', 'np.sqrt', (['T'], {}), '(T)\n', (937, 940), True, 'import numpy as np\n'), ((1015, 1027), 'scipy.stats.norm.cdf', 'norm.cdf', (['xs'], {}), '(xs)\n', (1023, 1027), False, 'from scipy.stats import norm\n'), ((1050, 1062), 'scipy.stats.norm.pdf', 'norm.pdf', (['xs'], {}), '(xs)\n', (1058, 1062), False, 'from scipy.stats import norm\n'), ((1176, 1188), 'scipy.stats.norm.cdf', 'norm.cdf', (['xs'], {}), '(xs)\n', (1184, 1188), False, 'from scipy.stats import norm\n'), ((2692, 2705), 'scipy.stats.norm.cdf', 'norm.cdf', (['(-d1)'], {}), '(-d1)\n', (2700, 2705), False, 'from scipy.stats import norm\n'), ((2723, 2736), 'scipy.stats.norm.cdf', 'norm.cdf', (['(-d2)'], {}), '(-d2)\n', (2731, 2736), False, 'from scipy.stats import norm\n'), ((2802, 2814), 'scipy.stats.norm.cdf', 'norm.cdf', (['d2'], {}), '(d2)\n', (2810, 2814), False, 'from scipy.stats import norm\n'), ((1039, 1049), 'numpy.sqrt', 'np.sqrt', (['T'], {}), '(T)\n', (1046, 1049), True, 'import numpy as np\n'), ((2879, 2892), 'scipy.stats.norm.cdf', 'norm.cdf', (['(-d2)'], {}), '(-d2)\n', (2887, 2892), False, 'from scipy.stats import norm\n'), ((2946, 2958), 'scipy.stats.norm.cdf', 'norm.cdf', (['d1'], {}), '(d1)\n', (2954, 2958), False, 'from scipy.stats import norm\n'), ((1242, 1255), 'scipy.stats.norm.cdf', 'norm.cdf', (['(-xs)'], {}), '(-xs)\n', (1250, 1255), False, 'from scipy.stats import norm\n'), ((1278, 1291), 'scipy.stats.norm.pdf', 'norm.pdf', (['(-xs)'], {}), '(-xs)\n', (1286, 1291), False, 'from scipy.stats import norm\n'), ((3011, 3024), 'scipy.stats.norm.cdf', 'norm.cdf', (['(-d1)'], {}), '(-d1)\n', (3019, 3024), False, 'from scipy.stats import norm\n'), ((1267, 1277), 'numpy.sqrt', 'np.sqrt', (['T'], {}), '(T)\n', (1274, 1277), True, 'import numpy as np\n'), ((1344, 1356), 'scipy.stats.norm.cdf', 'norm.cdf', (['xs'], {}), '(xs)\n', (1352, 1356), False, 'from scipy.stats import norm\n'), ((1379, 1391), 'scipy.stats.norm.pdf', 'norm.pdf', (['xs'], {}), '(xs)\n', (1387, 1391), False, 'from scipy.stats import norm\n'), ((1368, 1378), 'numpy.sqrt', 'np.sqrt', (['T'], {}), '(T)\n', (1375, 1378), True, 'import numpy as np\n')]
|
from pydlm import dlm, trend, seasonality
from scipy.stats import norm
import numpy as np
import matplotlib.pyplot as plt
# A linear trend
linear_trend = trend(degree=1, discount=1, name='linear_trend', w=10)
# A seasonality
time_series = []
for i in range(10):
if i == 0:
x_sim = np.random.normal(0,1,1)
else:
x_sim = np.random.normal(x_sim,10,1)
time_series.append(np.random.normal(x_sim,10,1))
time_series = np.array(time_series)
simple_dlm = dlm(time_series) + linear_trend
simple_dlm.fit()
filteredMean = simple_dlm.getMean(filterType='forwardFilter')
filteredVar = simple_dlm.getVar(filterType='forwardFilter')
ll = 0
one_step_ahead_samples = []
for i in range(len(time_series)):
tmp_samples = []
for j in range(1000):
tmp = np.random.normal(filteredMean[i],filteredVar[i], 1)
tmp_samples.append(np.random.normal(tmp,1,1))
one_step_ahead_samples.append(tmp_samples)
one_step_ahead_samples = np.array(one_step_ahead_samples)
upper_pi = []
lower_pi = []
for p in one_step_ahead_samples:
upper_pi.append(np.percentile(p,95))
lower_pi.append(np.percentile(p,5))
time_series_shifted = time_series
#plt.plot(range(len(time_series_shifted)),time_series_shifted,color='orange')
#plt.fill_between(range(len(time_series_shifted)),upper_pi,lower_pi,alpha=.3)
#plt.show()
from pykalman import KalmanFilter
random_state = np.random.RandomState(0)
transition_matrix = 1
transition_offset = .1
observation_matrix = 1
observation_offset = 1
transition_covariance = 10
observation_covariance = 1
initial_state_mean = 0
initial_state_covariance = 1
# sample from model
kf = KalmanFilter(
transition_matrix, observation_matrix, transition_covariance,
observation_covariance, transition_offset, observation_offset,
initial_state_mean, initial_state_covariance,
random_state=random_state
)
filtered_state_means, filtered_state_variances = kf.filter(time_series)
filteredMean = filtered_state_means.reshape((-1))
filteredVar = filtered_state_variances.reshape((-1))
one_step_ahead_samples = []
for i in range(len(time_series)):
tmp_samples = []
for j in range(10000):
tmp = np.random.normal(filteredMean[i],filteredVar[i], 1)
tmp2 = np.random.normal(tmp,10,1)
tmp_samples.append(np.random.normal(tmp2,10,1))
one_step_ahead_samples.append(tmp_samples)
one_step_ahead_samples = np.array(one_step_ahead_samples)
upper_pi = []
lower_pi = []
for p in one_step_ahead_samples:
upper_pi.append(np.percentile(p,95))
lower_pi.append(np.percentile(p,5))
time_series = time_series.reshape((-1))
time_series_shifted = time_series.tolist()[1:] + [10]
plt.plot(range(len(time_series_shifted)),time_series_shifted,color='orange')
plt.fill_between(range(len(time_series_shifted)),upper_pi,lower_pi,alpha=.3)
plt.show()
|
[
"matplotlib.pyplot.show",
"pydlm.trend",
"numpy.random.RandomState",
"pykalman.KalmanFilter",
"numpy.percentile",
"numpy.array",
"numpy.random.normal",
"pydlm.dlm"
] |
[((155, 209), 'pydlm.trend', 'trend', ([], {'degree': '(1)', 'discount': '(1)', 'name': '"""linear_trend"""', 'w': '(10)'}), "(degree=1, discount=1, name='linear_trend', w=10)\n", (160, 209), False, 'from pydlm import dlm, trend, seasonality\n'), ((441, 462), 'numpy.array', 'np.array', (['time_series'], {}), '(time_series)\n', (449, 462), True, 'import numpy as np\n'), ((957, 989), 'numpy.array', 'np.array', (['one_step_ahead_samples'], {}), '(one_step_ahead_samples)\n', (965, 989), True, 'import numpy as np\n'), ((1388, 1412), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (1409, 1412), True, 'import numpy as np\n'), ((1637, 1855), 'pykalman.KalmanFilter', 'KalmanFilter', (['transition_matrix', 'observation_matrix', 'transition_covariance', 'observation_covariance', 'transition_offset', 'observation_offset', 'initial_state_mean', 'initial_state_covariance'], {'random_state': 'random_state'}), '(transition_matrix, observation_matrix, transition_covariance,\n observation_covariance, transition_offset, observation_offset,\n initial_state_mean, initial_state_covariance, random_state=random_state)\n', (1649, 1855), False, 'from pykalman import KalmanFilter\n'), ((2477, 2509), 'numpy.array', 'np.array', (['one_step_ahead_samples'], {}), '(one_step_ahead_samples)\n', (2485, 2509), True, 'import numpy as np\n'), ((2903, 2913), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2911, 2913), True, 'import matplotlib.pyplot as plt\n'), ((476, 492), 'pydlm.dlm', 'dlm', (['time_series'], {}), '(time_series)\n', (479, 492), False, 'from pydlm import dlm, trend, seasonality\n'), ((295, 320), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (311, 320), True, 'import numpy as np\n'), ((345, 375), 'numpy.random.normal', 'np.random.normal', (['x_sim', '(10)', '(1)'], {}), '(x_sim, 10, 1)\n', (361, 375), True, 'import numpy as np\n'), ((397, 427), 'numpy.random.normal', 'np.random.normal', (['x_sim', '(10)', '(1)'], {}), '(x_sim, 10, 1)\n', (413, 427), True, 'import numpy as np\n'), ((779, 831), 'numpy.random.normal', 'np.random.normal', (['filteredMean[i]', 'filteredVar[i]', '(1)'], {}), '(filteredMean[i], filteredVar[i], 1)\n', (795, 831), True, 'import numpy as np\n'), ((1072, 1092), 'numpy.percentile', 'np.percentile', (['p', '(95)'], {}), '(p, 95)\n', (1085, 1092), True, 'import numpy as np\n'), ((1113, 1132), 'numpy.percentile', 'np.percentile', (['p', '(5)'], {}), '(p, 5)\n', (1126, 1132), True, 'import numpy as np\n'), ((2246, 2298), 'numpy.random.normal', 'np.random.normal', (['filteredMean[i]', 'filteredVar[i]', '(1)'], {}), '(filteredMean[i], filteredVar[i], 1)\n', (2262, 2298), True, 'import numpy as np\n'), ((2313, 2341), 'numpy.random.normal', 'np.random.normal', (['tmp', '(10)', '(1)'], {}), '(tmp, 10, 1)\n', (2329, 2341), True, 'import numpy as np\n'), ((2592, 2612), 'numpy.percentile', 'np.percentile', (['p', '(95)'], {}), '(p, 95)\n', (2605, 2612), True, 'import numpy as np\n'), ((2633, 2652), 'numpy.percentile', 'np.percentile', (['p', '(5)'], {}), '(p, 5)\n', (2646, 2652), True, 'import numpy as np\n'), ((858, 885), 'numpy.random.normal', 'np.random.normal', (['tmp', '(1)', '(1)'], {}), '(tmp, 1, 1)\n', (874, 885), True, 'import numpy as np\n'), ((2376, 2405), 'numpy.random.normal', 'np.random.normal', (['tmp2', '(10)', '(1)'], {}), '(tmp2, 10, 1)\n', (2392, 2405), True, 'import numpy as np\n')]
|
""" Implementation of :py:class:`Dataset` object. A folder containing a set of subjects with CT and RS in dicom format
is converted into nii format. A new folder is created keeping the same organization.
"""
import os
import numpy as np
from dcmrtstruct2nii import dcmrtstruct2nii, list_rt_structs
class Dataset:
"""
From dicom to dataset class. Convert CT and RTSTRUCT into nii, readable by deep learning frameworks.
All subfolders representing subject must contain the CT and the RS associated.
Example:
>>> from segmentation_rt.rs2mask import Dataset
>>> structures = ['Heart', 'Breast L', 'Breast R']
>>> dataset = Dataset('data/dicom_dataset', 'data/nii_dataset', structures)
>>> dataset.make()
:param string path:
Root directory.
:param string export_path:
Export path.
:param list[string] structures:
List of desired structure(s).
:param bool force:
Force export even if one structure is missing.
"""
def __init__(self, path, export_path, structures, force=True):
self.path = path
self.export_path = export_path
self.structures = structures
self.dataset_name = os.path.basename(export_path)
self.force = force
self.root_path = os.path.dirname(self.path)
self.patients = [folder for folder in os.listdir(self.path) if
os.path.isdir(os.path.join(self.path, folder))]
self.patient_paths = [os.path.join(self.path, patient) for patient in self.patients]
self.rs_paths = self.get_rs()
def __str__(self):
return self.dataset_name
def get_rs(self):
"""
List RTSTRUCT for each patient.
:rtype: list[str]
"""
rs_paths = []
for path in self.patient_paths:
files = [filename for filename in os.listdir(path) if filename.startswith("RS")]
assert len(files) > 0, 'at least one RS is required'
rs = files[0]
rs_paths.append(os.path.join(path, rs))
return rs_paths
def find_structures(self, index):
"""
List missing and not missing structures in a RTSTRUCT.
:param index: index of the patient.
:type index: int
:return: List missing and not missing structures.
:rtype: (list[str],list[str])
"""
structures = list_rt_structs(self.rs_paths[index])
ref_structures = np.array(self.structures)
maks = np.in1d(ref_structures, structures)
not_missing = ref_structures[maks]
missing = ref_structures[~maks]
if len(missing):
print(f"WARNING ! Some structures are missing: {missing}\n")
return missing, not_missing
def make(self):
"""Create structures and convert the CT in nii format for each subject."""
print(f"Structure(s) to export: {self.structures}")
print(f"Patient(s) identification : {self.patients}")
for index, path_patient in enumerate(self.patient_paths):
patient_id = self.patients[index]
print(f"Exporting {index + 1} ({patient_id}) on {len(self.patients)}")
nii_output = os.path.join(self.export_path, patient_id)
missing, not_missing = self.find_structures(index)
if len(missing) == 0 or self.force:
dcmrtstruct2nii(self.rs_paths[index], path_patient, nii_output, not_missing, False,
mask_foreground_value=1)
nii_maks = [nii_mask for nii_mask in os.listdir(nii_output) if nii_mask.startswith('mask')]
for nii in nii_maks:
name = os.path.splitext(nii)[0].split("_")[1].replace("-", " ")
os.rename(os.path.join(nii_output, nii), os.path.join(nii_output, name + '.nii'))
os.rename(os.path.join(nii_output, "image.nii"), os.path.join(nii_output, "ct.nii"))
else:
print(f"Skip {patient_id} because of missing structure(s)")
print("Export done")
|
[
"dcmrtstruct2nii.list_rt_structs",
"dcmrtstruct2nii.dcmrtstruct2nii",
"os.path.basename",
"os.path.dirname",
"numpy.array",
"os.path.splitext",
"os.path.join",
"os.listdir",
"numpy.in1d"
] |
[((1215, 1244), 'os.path.basename', 'os.path.basename', (['export_path'], {}), '(export_path)\n', (1231, 1244), False, 'import os\n'), ((1298, 1324), 'os.path.dirname', 'os.path.dirname', (['self.path'], {}), '(self.path)\n', (1313, 1324), False, 'import os\n'), ((2406, 2443), 'dcmrtstruct2nii.list_rt_structs', 'list_rt_structs', (['self.rs_paths[index]'], {}), '(self.rs_paths[index])\n', (2421, 2443), False, 'from dcmrtstruct2nii import dcmrtstruct2nii, list_rt_structs\n'), ((2469, 2494), 'numpy.array', 'np.array', (['self.structures'], {}), '(self.structures)\n', (2477, 2494), True, 'import numpy as np\n'), ((2510, 2545), 'numpy.in1d', 'np.in1d', (['ref_structures', 'structures'], {}), '(ref_structures, structures)\n', (2517, 2545), True, 'import numpy as np\n'), ((1499, 1531), 'os.path.join', 'os.path.join', (['self.path', 'patient'], {}), '(self.path, patient)\n', (1511, 1531), False, 'import os\n'), ((3214, 3256), 'os.path.join', 'os.path.join', (['self.export_path', 'patient_id'], {}), '(self.export_path, patient_id)\n', (3226, 3256), False, 'import os\n'), ((1371, 1392), 'os.listdir', 'os.listdir', (['self.path'], {}), '(self.path)\n', (1381, 1392), False, 'import os\n'), ((2045, 2067), 'os.path.join', 'os.path.join', (['path', 'rs'], {}), '(path, rs)\n', (2057, 2067), False, 'import os\n'), ((3385, 3497), 'dcmrtstruct2nii.dcmrtstruct2nii', 'dcmrtstruct2nii', (['self.rs_paths[index]', 'path_patient', 'nii_output', 'not_missing', '(False)'], {'mask_foreground_value': '(1)'}), '(self.rs_paths[index], path_patient, nii_output, not_missing,\n False, mask_foreground_value=1)\n', (3400, 3497), False, 'from dcmrtstruct2nii import dcmrtstruct2nii, list_rt_structs\n'), ((1435, 1466), 'os.path.join', 'os.path.join', (['self.path', 'folder'], {}), '(self.path, folder)\n', (1447, 1466), False, 'import os\n'), ((1879, 1895), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1889, 1895), False, 'import os\n'), ((3885, 3922), 'os.path.join', 'os.path.join', (['nii_output', '"""image.nii"""'], {}), "(nii_output, 'image.nii')\n", (3897, 3922), False, 'import os\n'), ((3924, 3958), 'os.path.join', 'os.path.join', (['nii_output', '"""ct.nii"""'], {}), "(nii_output, 'ct.nii')\n", (3936, 3958), False, 'import os\n'), ((3580, 3602), 'os.listdir', 'os.listdir', (['nii_output'], {}), '(nii_output)\n', (3590, 3602), False, 'import os\n'), ((3786, 3815), 'os.path.join', 'os.path.join', (['nii_output', 'nii'], {}), '(nii_output, nii)\n', (3798, 3815), False, 'import os\n'), ((3817, 3856), 'os.path.join', 'os.path.join', (['nii_output', "(name + '.nii')"], {}), "(nii_output, name + '.nii')\n", (3829, 3856), False, 'import os\n'), ((3699, 3720), 'os.path.splitext', 'os.path.splitext', (['nii'], {}), '(nii)\n', (3715, 3720), False, 'import os\n')]
|
import numpy as numpy
a = numpy.array([5,2,6,2,7,5,6,8,2,9])
print ('First array:')
print(a)
print('\n')
print('Unique values of first array:')
u = numpy.unique(a)
print(u)
print('\n')
print('Unique array and indices array:')
u, indices = numpy.unique(a, return_index = True)
print (indices)
print('\n')
print('We can see each number corresponds to index in original array:')
print(a)
print('\n')
print('Indices of unique array:')
u, indices = numpy.unique(a, return_inverse = True)
print (u)
print ('\n')
print('Indices are:')
print(indices)
print('\n')
print('Reconstruct the original array using indices')
print (u[indices])
print ('\n')
print('Return the count repetitions of unique elements:')
u, indices = numpy.unique(a, return_counts = True)
print(u)
print(indices)
|
[
"numpy.array",
"numpy.unique"
] |
[((26, 69), 'numpy.array', 'numpy.array', (['[5, 2, 6, 2, 7, 5, 6, 8, 2, 9]'], {}), '([5, 2, 6, 2, 7, 5, 6, 8, 2, 9])\n', (37, 69), True, 'import numpy as numpy\n'), ((150, 165), 'numpy.unique', 'numpy.unique', (['a'], {}), '(a)\n', (162, 165), True, 'import numpy as numpy\n'), ((242, 276), 'numpy.unique', 'numpy.unique', (['a'], {'return_index': '(True)'}), '(a, return_index=True)\n', (254, 276), True, 'import numpy as numpy\n'), ((449, 485), 'numpy.unique', 'numpy.unique', (['a'], {'return_inverse': '(True)'}), '(a, return_inverse=True)\n', (461, 485), True, 'import numpy as numpy\n'), ((720, 755), 'numpy.unique', 'numpy.unique', (['a'], {'return_counts': '(True)'}), '(a, return_counts=True)\n', (732, 755), True, 'import numpy as numpy\n')]
|
import os
import numpy
import pandas
from skimage import io
def read_ids_from_csv(csv_file):
""" Reads a column named 'ID' from csv_file. This function was
created to make sure basic I/O works in unit testing.
"""
csv = pandas.read_csv(csv_file)
return csv.ID
def read_hpa_image(image_id, root_dir):
""" Reads a four channel HPA cell image given by 'image_id' from
'root_dir' and returns it as a (H x W x 4) numpy array.
"""
root = os.path.join(root_dir, image_id)
stems = ("_red.png", "_blue.png", "_yellow.png", "_green.png")
paths = [root+stem for stem in stems]
image = [io.imread(path) for path in paths]
return numpy.dstack(image)
|
[
"pandas.read_csv",
"os.path.join",
"skimage.io.imread",
"numpy.dstack"
] |
[((242, 267), 'pandas.read_csv', 'pandas.read_csv', (['csv_file'], {}), '(csv_file)\n', (257, 267), False, 'import pandas\n'), ((480, 512), 'os.path.join', 'os.path.join', (['root_dir', 'image_id'], {}), '(root_dir, image_id)\n', (492, 512), False, 'import os\n'), ((681, 700), 'numpy.dstack', 'numpy.dstack', (['image'], {}), '(image)\n', (693, 700), False, 'import numpy\n'), ((635, 650), 'skimage.io.imread', 'io.imread', (['path'], {}), '(path)\n', (644, 650), False, 'from skimage import io\n')]
|
from matplotlib import pyplot as plt
import pickle
import numpy as np
import os,sys
'''
results = []
for i in range(10):
with open(f'/home/yiran/pc_mapping/arena-v2/examples/bc_saved_models/refactor_success_max_mine/run{i}/test_result.npy', 'rb') as f:
result_i = pickle.load(f)
result_number = [v for (k,v) in result_i.items()]
results.append(result_number)
results = np.array(results)
result_mean = results.mean(axis=0)
result_std = results.std(axis=0)
print(result_mean, result_std)
exit()
'''
x = [1,3,5,7,9,11,13,15,17,19]
y_ub = np.arange(1,21,2)
y_heuristic = [1.0, 3.0, 4.99, 6.83, 8.53, 9.46, 10.89, 12.3, 13.69, 13.64]
y_DDQN = [0.99, 2.88, 4.78, 3.82, 2.37, 2.14, 1.35, 1.01, 0.91,1.09]
y_refactor_max = [0.98, 2.86, 4.64, 5.67, 5.81, 5.82, 5.35, 5.07, 3.34, 3.11]
y_refactor_success_max = [0.99, 3.0, 4.94, 6.55, 7.74, 8.47, 8.48, 7.72, 7.29, 5.85]
y_refactor_purify_10of10_max = [1.0, 2.97, 4.85, 6.76, 8.05, 8.42, 8.66, 8.03, 7.58, 5.65]
y_refactor_purify_9of10_max = [1.0, 3.0, 4.94, 6.69, 8.27, 9.27, 9.3, 8.88, 8.87, 7.94]
y_refactor_purify_8of10_max = [1.0, 3.0, 4.95, 6.76, 7.68, 8.14, 8.11, 8.18, 6.99, 5.09]
y_refactor_purify_7of10_max = [1.0, 3.0, 4.93, 6.91, 8.32, 9.46, 10.64, 11.7, 11.81, 10.86]
y_refactor_purify_6of10_max = [1.0, 2.97, 4.93, 6.78, 8.35, 9.87, 10.78, 11.29, 12.0, 11.09]
y_refactor_purify_5of10_max = [1.0, 2.94, 5.0, 6.59, 8.28, 8.96, 10.22, 10.34, 10.93, 10.56]
y_refactor_purify_4of10_max = [1.0, 2.97, 5.0, 6.79, 8.16, 9.27, 8.16, 7.82, 7.47, 6.02]
y_refactor_purify_3of10_max = [1.0, 2.95, 4.96, 6.56, 7.96, 9.14, 8.64, 7.64, 7.36, 4.54]
y_refactor_purify_2of10_max = [1.0, 3.0, 4.95, 6.75, 8.32, 9.49, 9.55, 9.73, 9.75, 8.04]
y_refactor_purify_1of10_max = [1.0, 2.97, 4.96, 6.75, 7.92, 8.09, 7.92, 6.62, 5.85, 4.7]
plt.xlabel('number of coins')
plt.ylabel('collected coins (mean of 100 runs)')
plt.xlim(0, 19)
plt.xticks(np.arange(1,21,2))
plt.ylim(0, 19)
plt.yticks(np.arange(1,21,2))
plt.plot(x, y_ub, label='max score')
plt.plot(x, y_heuristic, label='IL heuristic')
plt.plot(x, y_refactor_purify_6of10_max, label='IL purify')
plt.plot(x, y_refactor_success_max, label='IL successful traj')
plt.plot(x, y_refactor_max, label='IL all traj')
plt.plot(x, y_DDQN, label='DoubleDQN')
plt.legend(loc='upper left')
plt.show()
|
[
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((556, 575), 'numpy.arange', 'np.arange', (['(1)', '(21)', '(2)'], {}), '(1, 21, 2)\n', (565, 575), True, 'import numpy as np\n'), ((1806, 1835), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""number of coins"""'], {}), "('number of coins')\n", (1816, 1835), True, 'from matplotlib import pyplot as plt\n'), ((1836, 1884), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""collected coins (mean of 100 runs)"""'], {}), "('collected coins (mean of 100 runs)')\n", (1846, 1884), True, 'from matplotlib import pyplot as plt\n'), ((1885, 1900), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(19)'], {}), '(0, 19)\n', (1893, 1900), True, 'from matplotlib import pyplot as plt\n'), ((1931, 1946), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(19)'], {}), '(0, 19)\n', (1939, 1946), True, 'from matplotlib import pyplot as plt\n'), ((1977, 2013), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_ub'], {'label': '"""max score"""'}), "(x, y_ub, label='max score')\n", (1985, 2013), True, 'from matplotlib import pyplot as plt\n'), ((2014, 2060), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_heuristic'], {'label': '"""IL heuristic"""'}), "(x, y_heuristic, label='IL heuristic')\n", (2022, 2060), True, 'from matplotlib import pyplot as plt\n'), ((2061, 2120), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_refactor_purify_6of10_max'], {'label': '"""IL purify"""'}), "(x, y_refactor_purify_6of10_max, label='IL purify')\n", (2069, 2120), True, 'from matplotlib import pyplot as plt\n'), ((2121, 2184), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_refactor_success_max'], {'label': '"""IL successful traj"""'}), "(x, y_refactor_success_max, label='IL successful traj')\n", (2129, 2184), True, 'from matplotlib import pyplot as plt\n'), ((2185, 2233), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_refactor_max'], {'label': '"""IL all traj"""'}), "(x, y_refactor_max, label='IL all traj')\n", (2193, 2233), True, 'from matplotlib import pyplot as plt\n'), ((2234, 2272), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_DDQN'], {'label': '"""DoubleDQN"""'}), "(x, y_DDQN, label='DoubleDQN')\n", (2242, 2272), True, 'from matplotlib import pyplot as plt\n'), ((2273, 2301), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (2283, 2301), True, 'from matplotlib import pyplot as plt\n'), ((2302, 2312), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2310, 2312), True, 'from matplotlib import pyplot as plt\n'), ((1912, 1931), 'numpy.arange', 'np.arange', (['(1)', '(21)', '(2)'], {}), '(1, 21, 2)\n', (1921, 1931), True, 'import numpy as np\n'), ((1958, 1977), 'numpy.arange', 'np.arange', (['(1)', '(21)', '(2)'], {}), '(1, 21, 2)\n', (1967, 1977), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
def complexity_hjorth(signal):
"""Hjorth's Complexity and Parameters
Hjorth Parameters are indicators of statistical properties used in signal processing in the
time domain introduced by Hjorth (1970). The parameters are activity, mobility, and complexity.
NeuroKit returns complexity directly in the output tuple, but the other parameters can be found
in the dictionary.
- The **complexity** parameter gives an estimate of the bandwidth of the signal, which
indicates the similarity of the shape of the signal to a pure sine wave (where the value
converges to 1). Complexity is define as the ratio of the mobility of the first derivative of
the signal to the mobility of the signal.
- The **mobility** parameter represents the mean frequency or the proportion of standard
deviation of the power spectrum. This is defined as the square root of variance of the first
derivative of the signal divided by the variance of the signal.
- The **activity** parameter is simply the variance of the signal.
See Also
--------
fractal_petrosian
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
Returns
-------
hjorth : float
Hjorth's Complexity.
info : dict
A dictionary containing additional information regarding the parameters used
to compute Hjorth's Complexity.
Examples
----------
>>> import neurokit2 as nk
>>>
>>> signal = nk.signal_simulate(duration=2, frequency=5)
>>>
>>> complexity, info = nk.complexity_hjorth(signal)
>>> complexity #doctest: +SKIP
References
----------
- https://github.com/raphaelvallat/antropy/blob/master/antropy
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Calculate derivatives
dx = np.diff(signal)
ddx = np.diff(dx)
# Calculate variance and its derivatives
x_var = np.var(signal) # = activity
dx_var = np.var(dx)
ddx_var = np.var(ddx)
# Mobility and complexity
mobility = np.sqrt(dx_var / x_var)
complexity = np.sqrt(ddx_var / dx_var) / mobility
return complexity, {"Mobility": mobility, "Activity": x_var}
|
[
"numpy.diff",
"numpy.var",
"numpy.sqrt"
] |
[((2112, 2127), 'numpy.diff', 'np.diff', (['signal'], {}), '(signal)\n', (2119, 2127), True, 'import numpy as np\n'), ((2138, 2149), 'numpy.diff', 'np.diff', (['dx'], {}), '(dx)\n', (2145, 2149), True, 'import numpy as np\n'), ((2208, 2222), 'numpy.var', 'np.var', (['signal'], {}), '(signal)\n', (2214, 2222), True, 'import numpy as np\n'), ((2250, 2260), 'numpy.var', 'np.var', (['dx'], {}), '(dx)\n', (2256, 2260), True, 'import numpy as np\n'), ((2275, 2286), 'numpy.var', 'np.var', (['ddx'], {}), '(ddx)\n', (2281, 2286), True, 'import numpy as np\n'), ((2333, 2356), 'numpy.sqrt', 'np.sqrt', (['(dx_var / x_var)'], {}), '(dx_var / x_var)\n', (2340, 2356), True, 'import numpy as np\n'), ((2374, 2399), 'numpy.sqrt', 'np.sqrt', (['(ddx_var / dx_var)'], {}), '(ddx_var / dx_var)\n', (2381, 2399), True, 'import numpy as np\n')]
|
import numpy as np
import warnings
def remove_base(seq, base, tolerance=1e-4):
"""
Functionality: Remove x from (x \sqcup z)
Since there might be some float errors, I allow for a mismatch of the time_stamps between
two seqs no larger than a threshold.
The threshold value: tolerance * max_time_stamp
:param list seq: x \sqcup z
:param list base: x
:param float tolerance: A rate.
:rtype: list
:return: z
"""
if len(seq) == 0:
return seq
tolerance = tolerance * seq[-1]['time_since_start']
n_seq = len(seq)
n_base = len(base)
seq_types = np.empty(shape=[n_seq], dtype=np.int64)
seq_time_stamps = np.empty(shape=[n_seq], dtype=np.float32)
base_types = np.empty(shape=[n_base], dtype=np.int64)
base_time_stamps = np.empty(shape=[n_base], dtype=np.float32)
for token_idx, token in enumerate(seq):
seq_types[token_idx] = token['type_event']
seq_time_stamps[token_idx] = token['time_since_start']
for token_idx, token in enumerate(base):
base_types[token_idx] = token['type_event']
base_time_stamps[token_idx] = token['time_since_start']
type_equal = base_types.repeat(n_seq).reshape(n_base, n_seq)
type_equal = type_equal == seq_types
time_equal = base_time_stamps.repeat(n_seq).reshape(n_base, n_seq)
time_equal = np.abs(time_equal - seq_time_stamps) < tolerance
to_remove = (type_equal & time_equal).any(axis=0)
rst = list()
for token_idx in np.where(~to_remove)[0]:
rst.append(seq[token_idx])
if len(rst) + len(base) != len(seq):
warnings.warn('Some base tokens are missing from the seq!')
return rst
def remove_bases_for_test(all_particles, golds, bases):
"""
Helper function for testing.
Functionality: Remove observed tokens from proposed particles and gold seqs.
:param list all_particles: x \sqcup z_m
:param list golds: x \sqcup z
:param list bases: x
:rtype: list, list
:return: particles (only z_m) and gold seqs (only z)
"""
assert len(all_particles) == len(golds) == len(bases)
rst_particles = list()
rst_golds = list()
for particles, gold, base in zip(all_particles, golds, bases):
new_particles = list()
for particle in particles:
new_particles.append(remove_base(particle, base))
rst_particles.append(new_particles)
rst_golds.append(remove_base(gold, base))
return rst_particles, rst_golds
# Following codes are just for testing
if __name__ == '__main__':
import pickle
dataset = pickle.load(open('data/pilottaxi/train.pkl', 'rb'))
seq = dataset['seqs'][0]
# base = dataset['seqs_obs'][0]
base = list()
from pprint import pprint
pprint('seq:')
pprint(seq)
pprint('base:')
pprint(base)
pprint('after removal:')
pprint(remove_base(seq, base))
assert len(seq) == len(remove_base(seq, base))
|
[
"numpy.abs",
"numpy.empty",
"numpy.where",
"pprint.pprint",
"warnings.warn"
] |
[((611, 650), 'numpy.empty', 'np.empty', ([], {'shape': '[n_seq]', 'dtype': 'np.int64'}), '(shape=[n_seq], dtype=np.int64)\n', (619, 650), True, 'import numpy as np\n'), ((673, 714), 'numpy.empty', 'np.empty', ([], {'shape': '[n_seq]', 'dtype': 'np.float32'}), '(shape=[n_seq], dtype=np.float32)\n', (681, 714), True, 'import numpy as np\n'), ((732, 772), 'numpy.empty', 'np.empty', ([], {'shape': '[n_base]', 'dtype': 'np.int64'}), '(shape=[n_base], dtype=np.int64)\n', (740, 772), True, 'import numpy as np\n'), ((796, 838), 'numpy.empty', 'np.empty', ([], {'shape': '[n_base]', 'dtype': 'np.float32'}), '(shape=[n_base], dtype=np.float32)\n', (804, 838), True, 'import numpy as np\n'), ((2761, 2775), 'pprint.pprint', 'pprint', (['"""seq:"""'], {}), "('seq:')\n", (2767, 2775), False, 'from pprint import pprint\n'), ((2780, 2791), 'pprint.pprint', 'pprint', (['seq'], {}), '(seq)\n', (2786, 2791), False, 'from pprint import pprint\n'), ((2796, 2811), 'pprint.pprint', 'pprint', (['"""base:"""'], {}), "('base:')\n", (2802, 2811), False, 'from pprint import pprint\n'), ((2816, 2828), 'pprint.pprint', 'pprint', (['base'], {}), '(base)\n', (2822, 2828), False, 'from pprint import pprint\n'), ((2833, 2857), 'pprint.pprint', 'pprint', (['"""after removal:"""'], {}), "('after removal:')\n", (2839, 2857), False, 'from pprint import pprint\n'), ((1354, 1390), 'numpy.abs', 'np.abs', (['(time_equal - seq_time_stamps)'], {}), '(time_equal - seq_time_stamps)\n', (1360, 1390), True, 'import numpy as np\n'), ((1497, 1517), 'numpy.where', 'np.where', (['(~to_remove)'], {}), '(~to_remove)\n', (1505, 1517), True, 'import numpy as np\n'), ((1607, 1666), 'warnings.warn', 'warnings.warn', (['"""Some base tokens are missing from the seq!"""'], {}), "('Some base tokens are missing from the seq!')\n", (1620, 1666), False, 'import warnings\n')]
|
import cv2
# cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture('../datasets/opencv/fish.mp4')
while True:
_ret, frame = cap.read()
frame = cv2.resize(frame, (500,400))
cv2.imshow('opencv camera', frame)
k = cv2.waitKey(1) #1msec 대기
if k==27 or k==13 : break
cap.release()
cv2.destroyAllWindows()
import numpy as np
while True:
_ret, frame = cap.read()
frame = cv2.resize(frame, (500,400))
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
h = hsv[:, :, 0] #hue
s = hsv[:, :, 1] #saturation
v = hsv[:, :, 2] # value brighthness
img = np.zeros(h.shape, dtype=np.uint8)
img[((h < 50) | (h > 200)) & (s > 100)] = 255
cv2.imshow('opencv camera', img)
k = cv2.waitKey(1) #1msec 대기
if k==27 or k==13 : break
cap.release()
cv2.destroyAllWindows()
|
[
"cv2.waitKey",
"cv2.cvtColor",
"cv2.imshow",
"numpy.zeros",
"cv2.VideoCapture",
"cv2.destroyAllWindows",
"cv2.resize"
] |
[((45, 92), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""../datasets/opencv/fish.mp4"""'], {}), "('../datasets/opencv/fish.mp4')\n", (61, 92), False, 'import cv2\n'), ((292, 315), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (313, 315), False, 'import cv2\n'), ((775, 798), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (796, 798), False, 'import cv2\n'), ((147, 176), 'cv2.resize', 'cv2.resize', (['frame', '(500, 400)'], {}), '(frame, (500, 400))\n', (157, 176), False, 'import cv2\n'), ((180, 214), 'cv2.imshow', 'cv2.imshow', (['"""opencv camera"""', 'frame'], {}), "('opencv camera', frame)\n", (190, 214), False, 'import cv2\n'), ((223, 237), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (234, 237), False, 'import cv2\n'), ((389, 418), 'cv2.resize', 'cv2.resize', (['frame', '(500, 400)'], {}), '(frame, (500, 400))\n', (399, 418), False, 'import cv2\n'), ((428, 466), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (440, 466), False, 'import cv2\n'), ((577, 610), 'numpy.zeros', 'np.zeros', (['h.shape'], {'dtype': 'np.uint8'}), '(h.shape, dtype=np.uint8)\n', (585, 610), True, 'import numpy as np\n'), ((665, 697), 'cv2.imshow', 'cv2.imshow', (['"""opencv camera"""', 'img'], {}), "('opencv camera', img)\n", (675, 697), False, 'import cv2\n'), ((706, 720), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (717, 720), False, 'import cv2\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 13 19:00:40 2016
@author: sebalander
"""
from numpy import zeros, sqrt, array, tan, arctan, prod, cos
from cv2 import Rodrigues
from lmfit import minimize, Parameters
#from calibration import calibrator
#xypToZplane = calibrator.xypToZplane
#
## %% ========== ========== PARAMETER HANDLING ========== ==========
#def formatParameters(rVec, tVec, linearCoeffs, distCoeffs):
# params = Parameters()
#
# if prod(rVec.shape) == 9:
# rVec = Rodrigues(rVec)[0]
#
# rVec = rVec.reshape(3)
#
# for i in range(3):
# params.add('rvec%d'%i,
# value=rVec[i], vary=True)
# params.add('tvec%d'%i,
# value=tVec[i], vary=True)
#
# # image center
# params.add('cameraMatrix0',
# value=linearCoeffs[0], vary=False)
# params.add('cameraMatrix1',
# value=linearCoeffs[1], vary=False)
#
# # k
# params.add('distCoeffs',
# value=distCoeffs, vary=False)
#
# return params
#
#def retrieveParameters(params):
# '''
#
# '''
# rvec = zeros((3,1))
# tvec = zeros((3,1))
# for i in range(3):
# rvec[i,0] = params['rvec%d'%i].value
# tvec[i,0] = params['tvec%d'%i].value
#
# cameraMatrix = zeros(2)
# cameraMatrix[0] = params['cameraMatrix0'].value
# cameraMatrix[1] = params['cameraMatrix1'].value
#
# distCoeffs = params['distCoeffs'].value
#
# return rvec, tvec, cameraMatrix, distCoeffs
# %% ========== ========== DIRECT ========== ==========
def radialDistort(rh, k, quot=False, der=False):
'''
returns distorted radius using distortion coefficient k
optionally it returns the distortion quotioent rpp = rp * q
'''
k.shape = 1
th = arctan(rh)
tanth = tan(th / 2)
rd = k * tanth
if der:
# rpp wrt rp
dDdH = k / cos(th / 2)**2 / 2 / (1 + rh**2)
# calculate quotient
q = rd / rh
# q wrt rpp
dQdH = ((dDdH - q) / rh).reshape((1, -1)) # deriv wrt undistorted coords
dQdK = (tanth / rh).reshape((1, -1))
if quot:
return q, dQdH, dQdK
else:
return rd, dQdH, dQdK
else:
if quot:
return rd / rh
else:
return rd
## we asume that intrinsic distortion paramters is just a scalar: distCoeffs=k
#def direct(fiducialPoints, rVec, tVec, linearCoeffs, distCoeffs):
# # format as matrix
# try:
# rVec.reshape(3)
# rVec = Rodrigues(rVec)[0]
# except:
# pass
#
# xyz = rVec.dot(fiducialPoints[0].T)+tVec
#
# xp = xyz[0]/xyz[2]
# yp = xyz[1]/xyz[2]
#
# rp = sqrt(xp**2 + yp**2)
# thetap = arctan(rp)
#
# rpp = distCoeffs*tan(thetap/2)
#
# rpp_rp = rpp/rp
#
# xpp = xp*rpp_rp
# ypp = yp*rpp_rp
#
# u = xpp + linearCoeffs[0]
# v = ypp + linearCoeffs[1]
#
# return array([u,v]).reshape((fiducialPoints.shape[1],1,2))
#
#def residualDirect(params, fiducialPoints, imageCorners):
# rVec, tVec, linearCoeffs, distCoeffs = retrieveParameters(params)
#
# projectedCorners = direct(fiducialPoints,
# rVec,
# tVec,
# linearCoeffs,
# distCoeffs)
#
# return imageCorners[:,0,:] - projectedCorners[:,0,:]
#
#def calibrateDirect(fiducialPoints, imageCorners, rVec, tVec, linearCoeffs, distCoeffs):
# initialParams = formatParameters(rVec, tVec, linearCoeffs, distCoeffs) # generate Parameters obj
#
# out = minimize(residualDirect,
# initialParams,
# args=(fiducialPoints,
# imageCorners))
#
# rvecOpt, tvecOpt, _, _ = retrieveParameters(out.params)
#
# return rvecOpt, tvecOpt, out.params
# %% ========== ========== INVERSE ========== ==========
def radialUndistort(rd, k, quot=False, der=False):
'''
takes distorted radius and returns the radius undistorted
optionally it returns the undistortion quotient rd = rh * q
'''
# polynomial coeffs, grade 7
# # (k1,k2,p1,p2[,k3[,k4,k5,k6[,s1,s2,s3,s4[,τx,τy]]]])
k.shape = -1
thetap = 2 * arctan(rd / k)
rh = tan(thetap)
retVal = True
if der:
# derivada de la directa
q, dQdH, dQdK = radialDistort(rh, k, quot, der)
if quot:
return q, retVal, dQdH, dQdK
else:
return rh, retVal, dQdH, dQdK
else:
if quot:
# returns q
return rd / rh, retVal
else:
return rh, retVal
#def inverse(imageCorners, rVec, tVec, linearCoeffs, distCoeffs):
#
# xpp = imageCorners[:,0,0]-linearCoeffs[0]
# ypp = imageCorners[:,0,1]-linearCoeffs[1]
# rpp = sqrt(xpp**2 + ypp**2)
#
# thetap = 2*arctan(rpp/distCoeffs)
#
# rp = tan(thetap)
#
# rp_rpp = rp/rpp
#
# xp = xpp * rp_rpp
# yp = ypp * rp_rpp
#
# # project to z=0 plane. perhaps calculate faster with homography function?
# XYZ = xypToZplane(xp, yp, rVec, tVec)
#
# return XYZ
#
#
#def residualInverse(params, fiducialPoints, imageCorners):
# rVec, tVec, linearCoeffs, distCoeffs = retrieveParameters(params)
#
# projectedFiducialPoints = inverse(imageCorners,
# rVec,
# tVec,
# linearCoeffs,
# distCoeffs)
#
# return fiducialPoints[0,:,:2] - projectedFiducialPoints[0,:,:2]
#
#def calibrateInverse(fiducialPoints, imageCorners, rVec, tVec, linearCoeffs, distCoeffs):
# initialParams = formatParameters(rVec, tVec, linearCoeffs, distCoeffs) # generate Parameters obj
#
# out = minimize(residualInverse,
# initialParams,
# args=(fiducialPoints,
# imageCorners))
#
# rvecOpt, tvecOpt, _, _ = retrieveParameters(out.params)
#
# return rvecOpt, tvecOpt, out.params
|
[
"numpy.arctan",
"numpy.tan",
"numpy.cos"
] |
[((1758, 1768), 'numpy.arctan', 'arctan', (['rh'], {}), '(rh)\n', (1764, 1768), False, 'from numpy import zeros, sqrt, array, tan, arctan, prod, cos\n'), ((1781, 1792), 'numpy.tan', 'tan', (['(th / 2)'], {}), '(th / 2)\n', (1784, 1792), False, 'from numpy import zeros, sqrt, array, tan, arctan, prod, cos\n'), ((4240, 4251), 'numpy.tan', 'tan', (['thetap'], {}), '(thetap)\n', (4243, 4251), False, 'from numpy import zeros, sqrt, array, tan, arctan, prod, cos\n'), ((4215, 4229), 'numpy.arctan', 'arctan', (['(rd / k)'], {}), '(rd / k)\n', (4221, 4229), False, 'from numpy import zeros, sqrt, array, tan, arctan, prod, cos\n'), ((1865, 1876), 'numpy.cos', 'cos', (['(th / 2)'], {}), '(th / 2)\n', (1868, 1876), False, 'from numpy import zeros, sqrt, array, tan, arctan, prod, cos\n')]
|
"""
plot.py defines functions for plotting phase diagrams of complex
coacervate liquid separation.
"""
# standard libraries
import matplotlib.pyplot as plt
from matplotlib import cm # colormap
import numpy as np
import pandas as pd
# custom libraries
import pe
import salt as nacl
# plotting libraries
import plotly.graph_objects as go
from bokeh.plotting import figure, output_file, show
from bokeh.models import ColumnDataSource, Title, Range1d
from bokeh.models.tools import HoverTool
# CONSTANTS
NA = 6.022E23 # Avogadro's number, molecules / mol
m3_2_L = 1E3
K_2_C = 273.15 # conversion from Kelvin to Celsius (subtract this)
m_2_A = 1E10
def alpha_custom_rho(data, rho_p_list, rho_s_list, beads_2_M,
T_range=[273.15,373.35], cmap_name='plasma', sigma=None,
colors=None, marker='o', lw=1, T_cels=False,
y_lim=[0.5, 1], square_box=False, tol=0.05, ax=None,
show_lgnd=True):
"""
Plots the volume fraction of supernatant phase I (alpha) vs. the overall
density of the varied component.
Note: currently eliminates data points with alpha = 1 because they tend to
be the result of numerical imprecision
T_range : 2-tuple
Lower and upper bounds on temperature to consider in degrees Kelvin
(even if T_cels is True)
tol : float, opt
Tolerance of how close volume fraction nearest single-phase region needs
to be to 1 to round up to 1 (for plotting dashed line)
"""
# creates list of colors for each value of the varied density
if colors is None:
colors = get_colors(cmap_name, len(rho_var_list))
# creates figure
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
# plots volume fraction of supernatant for each composition
for i, rho_pair in enumerate(zip(rho_p_list, rho_s_list)):
# plots binodal for low polymer concentration [M]
rho_p, rho_s = rho_pair
results = nacl.fixed_rho_total(data, rho_p, rho_s, beads_2_M)
rho_PCI_list = results['rho_PCI']
rho_PCII_list = results['rho_PCII']
rho_CI_list = results['rho_CI']
rho_CII_list = results['rho_CII']
lB_arr = results['lB']
alpha = results['alpha']
T_arr = pe.lB_2_T_arr(lB_arr, T_range, sigma=sigma)
liq_h2o = (T_arr >= T_range[0]) * (T_arr <= T_range[1]) * \
(np.asarray(alpha) != 1)
if T_cels:
T_arr -= K_2_C
# plots alpha vs. T for given rho_p
alpha_arr = np.array(alpha)[liq_h2o]
T_arr = T_arr[liq_h2o]
ax.plot(T_arr, alpha_arr, color=colors[i],
marker=marker, lw=lw, label=r'$\rho_p = $' + \
'{0:.2f} M, '.format(rho_p) + r'$\rho_s = $' + \
'{0:.2f} M'.format(rho_s))
### Single Phase
# plots dashed line to lowest temperature if single phase
# *checks if lowest plotted temperature reaches y axis
T_min = np.min(T_arr)
if T_min > np.min(ax.get_xlim()):
alpha_single_phase = alpha_arr[np.argmin(T_arr)]
# rounds up to 1 if volume fraction is close (discontinuous phase sep)
if np.abs(alpha_single_phase - 1) < tol:
ax.plot([T_min, T_min], [alpha_single_phase, 1], '-', lw=lw,
color=colors[i])
alpha_single_phase = 1
# rounds to 0.5 if volume fraction is close (passes through LCST)
if np.abs(alpha_single_phase - 0.5) < tol:
alpha_single_phase = 0.5
# plots horizontal dashed line to indicate single phase at low T
ax.plot([ax.get_xlim()[0], T_min],
[alpha_single_phase, alpha_single_phase], '--',
lw=lw, color=colors[i])
# determines labels and limits of axes
if T_cels:
x_lim = [T_range[0] - K_2_C, T_range[1] - K_2_C]
x_label = r'$T$'
x_unit = r'$^{\circ}$C'
else:
x_lim = T_range
x_lim = r'$T$ [K]'
y_label = r'$V_{sup}/V_{tot}$'
# formats plot
format_binodal(ax, x_label, x_unit, T_range, x_lim=x_lim, y_lim=y_lim,
y_label=y_label, square_box=square_box, show_lgnd=show_lgnd)
return ax
def alpha_vary_rho(data, rho_var_list, rho_fix, ch_var, beads_2_M,
T_range=[273.15,373.35], cmap_name='plasma', sigma=None,
colors=None, marker='o', lw=1, T_cels=False,
y_lim=[0.5, 1], title=None, square_box=False):
"""
Plots the volume fraction of supernatant phase I (alpha) vs. the overall
density of the varied component.
Note: currently eliminates data points with alpha = 1 because they tend to
be the result of numerical imprecision
T_range : 2-tuple
Lower and upper bounds on temperature to consider in degrees Kelvin
(even if T_cels==True)
"""
# creates dictionary of values based on which component's density is varied
d = get_plot_dict_p_s(ch_var)
# creates list of colors for each value of the varied density
if colors is None:
colors = get_colors(cmap_name, len(rho_var_list))
# creates figure
fig = plt.figure()
ax = fig.add_subplot(111)
for i, rho_var in enumerate(rho_var_list):
# plots binodal for low polymer concentration [M]
rho_pair = np.array([rho_var, rho_fix])
rho_p, rho_s = rho_pair[d['order']]
results = nacl.fixed_rho_total(data, rho_p, rho_s, beads_2_M)
rho_PCI_list = results['rho_PCI']
rho_PCII_list = results['rho_PCII']
rho_CI_list = results['rho_CI']
rho_CII_list = results['rho_CII']
lB_arr = results['lB']
alpha = results['alpha']
T_arr = pe.lB_2_T_arr(lB_arr, T_range, sigma=sigma)
liq_h2o = (T_arr >= T_range[0]) * (T_arr <= T_range[1]) * \
(np.asarray(alpha) != 1)
if T_cels:
T_arr -= K_2_C
# plots alpha vs. T for given rho_p
ax.plot(T_arr[liq_h2o], np.array(alpha)[liq_h2o], color=colors[i],
marker=marker, lw=lw, label=r'$\rho_' + d['ch_var'] + ' = $' + \
'{0:.2f} M'.format(rho_var))
# determines labels and limits of axes
if T_cels:
x_lim = [T_range[0] - K_2_C, T_range[1] - K_2_C]
x_label = r'$T$'
x_unit = r'$^{\circ}$C'
else:
x_lim = T_range
x_lim = r'$T$ [K]'
y_label = r'$V^{sup}/V^{tot}$'
if title is None:
title = 'Effect of Total {0:s} on Supernatant Volume, {1:s} = {2:.2f} M' \
.format(d['name_var'], r'$\rho_' + d['ch_fix'] + '$', rho_fix)
# formats plot
format_binodal(ax, x_label, x_unit, T_range, title=title, x_lim=x_lim,
y_lim=y_lim, y_label=y_label, square_box=square_box)
return ax
def binodal(lB_arr, left_list, right_list, left='rhoPCI', right='rhoPCII',
x_label='polyanion density', n_tie_lines=3, plot_T=True, sigma=None,
T_range=[273, 373], beads_2_M=None, title='', fix_eps=False,
deg_C=False, x_lim=None, y_lim=None, marker=True, line=False,
c1='blue', c2='red'):
"""
Plots binodal with polyanion density as x axis and temperature or
Bjerrum length as y axis using Bokeh interactive plotting methods.
Parameters
----------
lB_arr : (Nx1) numpy array
Array of Bjerrum lengths non-dimensionalized by sigma defined
in definition of "data" dictionary.
left_list : N-element list
List of x-axis variable in phase I (supernatant) [beads/sigma^3]
right_list : N-element list
List of x-axis variable in phase II (coacervate) [beads/sigma^3]
left : string
Name of heading in df of the variable given in left_list
right : string
Name of heading in df of the variable given in right_list
x_label : string
Variable to be plotted along the x-axis (without units)
n_tie_lines : int
Number of tie lines to plot
plot_T : bool
y axis is temperature [K] if True, Bjerrum [sigma] if False
T_range : 2-element list
Lower and upper bound for temperatures to plot (to limit temperatures
to those for which water is liquid)
beads_2_M : float
Conversion from beads/sigma^3 to moles of monomers / L. If None, no
conversion is made and the units on the x axis are beads/sigma^3.
title : string
Title of plot
fix_eps : bool
Fixed epsilon to constant value if True, or allows it to vary with
temperature if False.
deg_C : bool, opt
If True, temperature is shown in degrees Celsius (assuming it is
provided in Kelvin), default = False.
x_lim : 2-element tuple of floats, optional
Lower and upper bounds of x axis. If None provided, automatically set.
y_lim : 2-element tuple of floats, optional
Lower and upper bounds of y axis. If None provided, automatically set.
Returns
-------
p : bokeh plot
Plot of binodal. Use bokeh's "show(p)" to display. Use "output_notebook()" beforehand
to show the plot in the same cell (instead of a separate browser tab).
"""
left_arr = np.copy(left_list)
right_arr = np.copy(right_list)
# calculates conversion from beads / sigma^3 to mol/L
if beads_2_M is not None:
left_arr *= beads_2_M
right_arr *= beads_2_M
units_rho = '[mol/L]'
else:
units_rho = '[beads/sigma^3]'
# computes temperature corresponding to Bjerrum lengths
T_arr = pe.lB_2_T_arr(lB_arr, T_range, fix_eps=fix_eps, sigma=sigma)
# stores results in dataframe for plotting
df_mu = pd.DataFrame(columns=['BJ', 'T', left, right])
liq_h2o = np.logical_and(T_arr >= T_range[0], T_arr <= T_range[1])
df_mu['BJ'] = lB_arr[liq_h2o]
df_mu['T'] = T_arr[liq_h2o] - deg_C*273 # converts to degrees Celsius if requested
df_mu[left] = left_arr[liq_h2o] # monomer density
df_mu[right] = right_arr[liq_h2o] # monomer density
# plots binodal at fixed chemical potential
n_plot = len(df_mu)
if n_plot == 0:
print('No data to plot in plot.binodal()--error likely.')
p = no_salt(df_mu, n_plot, left=left, right=right, x_label=x_label,
n_tie_lines=n_tie_lines, plot_T=plot_T, marker=marker, line=line,
title=title, units_rho=units_rho, deg_C=deg_C, c1=c1, c2=c2)
# sets axis limits if requested
if x_lim is not None:
p.x_range = Range1d(*x_lim)
if y_lim is not None:
p.y_range = Range1d(*y_lim)
return p
def binodal_custom_rho(data, rho_p_list, rho_s_list, beads_2_M,
x_var='polycation', x_label=r'$\rho_{PSS}$', sigma=None,
T_range=[273.15,373.15], cmap_name='plasma', colors=None,
marker='o', fill_left='none', fill_right='full', lw_sup=1,
lw_co=3, lgnd_out=True, lw=1, x_lim=None, T_cels=False,
c_sup='#1414FF', c_co='#FF0000', ls_sup='-',
square_box=False, plot_fixed_rho=False, ax=None,
show_lgnd=True):
"""
Like `binodal_vary_rho()` but allows user to customize both rho_p and rho_s
(overall) of each condition, rather than fixing one for all conditions.
"""
# creates list of colors for each value of rho_p
if colors is None:
if cmap_name is not None:
colors = get_colors(cmap_name, len(rho_var_list))
# creates figure
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
for i, rho_pair in enumerate(zip(rho_p_list, rho_s_list)):
rho_p, rho_s = rho_pair
# plots binodal for low polymer concentration [M]
results = nacl.fixed_rho_total(data, rho_p, rho_s, beads_2_M)
rho_PCI_list = results['rho_PCI']
rho_PCII_list = results['rho_PCII']
rho_CI_list = results['rho_CI']
rho_CII_list = results['rho_CII']
rho_PAI_list = results['rho_PAI']
rho_PAII_list = results['rho_PAII']
rho_AI_list = results['rho_AI']
rho_AII_list = results['rho_AII']
lB_arr = results['lB']
alpha = results['alpha']
# selects the x-axis data
if x_var == 'polycation':
left_arr = np.array(rho_PCI_list)
right_arr = np.array(rho_PCII_list)
elif x_var == 'polyanion':
left_arr = np.array(rho_PAI_list)
right_arr = np.array(rho_PAII_list)
elif x_var == 'cation':
left_arr = np.array(rho_CI_list)
right_arr = np.array(rho_CII_list)
elif x_var == 'anion':
left_arr = np.array(rho_AI_list)
right_arr = np.array(rho_AII_list)
elif x_var == 'solvent':
left_arr = pe.calc_rho_solv(rho_PCI_list,
rho_CI_list,
beads_2_M)
right_arr = pe.calc_rho_solv(rho_PCII_list,
rho_CII_list,
beads_2_M)
elif x_var == 'polyelectrolyte':
left_arr = np.array(rho_PCI_list) + np.array(rho_PAI_list)
right_arr = np.array(rho_PCII_list) + np.array(rho_PAII_list)
elif x_var == 'salt':
left_arr = np.array(rho_CI_list)
right_arr = np.array(rho_CII_list)
else:
print('Error. Invalid x variable in plot.binodal_vary_rho().')
# computes temperature and identifies data within range
T_arr = pe.lB_2_T_arr(lB_arr, T_range, sigma=sigma)
liq_h2o = np.logical_and(T_arr >= T_range[0], T_arr <= T_range[1])
# converts temperature from Kelvin to Celsius
if T_cels:
T_arr -= K_2_C
# assigns separate colors to coacervate and supernatant if not specified
if colors is not None:
c_sup = colors[i]
c_co = colors[i]
# supernatant
ax.plot(left_arr[liq_h2o], T_arr[liq_h2o], color=c_sup,
marker=marker, fillstyle=fill_left, ls=ls_sup,
label=r'$\rho_p = $' + '{0:.2f} M, '.format(rho_p) + \
r'$\rho_s = $' + '{0:.2f} M, supernatant'.format(rho_s),
lw=lw_sup)
# coacervate
ax.plot(right_arr[liq_h2o], T_arr[liq_h2o], color=c_co,
marker=marker, fillstyle=fill_right,
label=r'$\rho_p = $' + '{0:.2f} M, '.format(rho_p) + \
r'$\rho_s = $' + '{0:.2f} M, coacervate'.format(rho_s),
lw=lw_co)
# plots dashed line indicating fixed density if requested
if plot_fixed_rho:
# defines dictionary mapping x variable to corresponding fixed
# density
x_var_2_rho_fixed = {'polycation' : rho_p/2,
'cation' : rho_s,
'solvent' : 1 - rho_p - rho_s,
'polyelectrolyte' : rho_p,
'salt' : rho_s}
# selects appropriate fixed density based on x variable
rho_fixed = x_var_2_rho_fixed[x_var]
# determines color based on which branch is closest
if (rho_fixed - np.max(left_arr[liq_h2o])) > \
(np.min(right_arr[liq_h2o]) - rho_fixed):
# coacervate branch is closest to fixed density
color = c_co
else:
# supernatant branch is closest to fixed density
color = c_sup
# plots fixed density as vertical dashed line
ax.plot([rho_fixed, rho_fixed], ax.get_ylim(), '--', color=color,
lw=lw_sup)
# determines units of density to display on plot
if beads_2_M is not None:
units_rho = 'mol/L'
else:
units_rho = 'beads/sigma^3'
# formats plot
format_binodal(ax, x_label, units_rho, T_range, x_lim=x_lim,
T_cels=T_cels, square_box=square_box, show_lgnd=show_lgnd)
return ax
def binodal_custom_rho_rho(data, lB_list, rho_p_list, rho_s_list,
beads_2_M, show_tie_line=True,
cmap_name='plasma', colors=None, sigma=None,
marker='o', fill_left='none', fill_right='full',
lgnd_out=True, tol=1E-4, ms=10, T_cels=False, show_lB=False,
T_range=[273.15, 373.15], lw=2, square_box=False, ax=None,
colors_symbols=None, mew=1.5, x_lim=None, y_lim=None,
show_lgnd=True):
"""
Plots the binodal as a function of salt density and polyelectrolyte
density. Different Bjerrum lengths/temperatures are represented by
different trend lines.
Returns
-------
None.
"""
# variables defining order of plotted objects
back = 0
front = 10
# lists symbols for plotting overall composition
sym_list = ['*', '^', 's', '<', '>', 'v', '+', 'x']
# creates list of colors for each value of rho_p
if colors is None:
colors = get_colors(cmap_name, len(lB_list))
# determines units
if beads_2_M != 1:
units_rho = 'mol/L'
else:
units_rho = r'beads/$\sigma^3$'
# creates figure
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
# loops through each temperature / Bjerrum length in data
for i, lB in enumerate(lB_list):
df = data[lB]
# loads binodal data for supernatant (I) and coacervate (II)
# doubles polycation concentration to include polyanion in polymer
# concentration
ion_I_list = list(beads_2_M*df['rhoCI'])
ion_II_list = list(beads_2_M*df['rhoCII'])
polymer_I_list = list(2*beads_2_M*df['rhoPCI'])
polymer_II_list = list(2*beads_2_M*df['rhoPCII'])
# critical points
polymer_c = polymer_I_list[-1]
ion_c = ion_I_list[-1]
# computes temperature
T = pe.lB_2_T(lB, sigma=sigma)
if T_cels:
T_unit = r'$^{\circ}$C'
T -= K_2_C
else:
T_unit = ' K'
# plots tie lines and overall composition
for j, rho_pair in enumerate(zip(rho_p_list, rho_s_list)):
rho_p, rho_s = rho_pair
results = nacl.fixed_rho_total(data, rho_p, rho_s, beads_2_M)
rho_PCI_list = results['rho_PCI']
rho_PCII_list = results['rho_PCII']
rho_CI_list = results['rho_CI']
rho_CII_list = results['rho_CII']
lB_arr = np.asarray(results['lB'])
alpha = results['alpha']
# converts to arrays of polymer and salt concentrations
rho_p_I = 2*np.asarray(rho_PCI_list)
rho_s_I = np.asarray(rho_CI_list)
rho_p_II = 2*np.asarray(rho_PCII_list)
rho_s_II = np.asarray(rho_CII_list)
# continues if no T in range has 2 phases for concentration
# finds closest match given Bjerrum length
try:
i_tie = np.where(np.abs(lB_arr - lB) < tol)[0][0]
except:
print('lB = {0:.3f} gives 1 phase for'.format(lB) + \
' rho_p = {0:.3f} [{1:s}],'.format(rho_p, units_rho) + \
'rho_s = {0:.3f} [{1:s}].'.format(rho_s, units_rho))
continue
# tie line
if show_tie_line:
ax.plot([rho_p_I[i_tie], rho_p_II[i_tie]],
[rho_s_I[i_tie], rho_s_II[i_tie]], '--',
color='k', lw=lw, zorder=back)
# supernatant
ax.plot(rho_p_I[i_tie], rho_s_I[i_tie], color=colors[i],
marker='o', fillstyle='none', zorder=front)
# coacervate
ax.plot(rho_p_II[i_tie], rho_s_II[i_tie], color=colors[i],
marker='o', fillstyle='none', zorder=front)
# plots overall composition last time through
if i == len(lB_list)-1:
short = {'mol/L' : 'M', 'beads/sigma^3' : r'$\sigma^{-3}$'}
if sym_list[j] == '*':
ms_boost = 4
else:
ms_boost = 0
# if provided, can specify marker face color
if colors_symbols is not None:
mfc = colors_symbols[j]
else:
mfc = 'w'
# plots symbol representing composition
ax.plot(rho_p, rho_s, marker=sym_list[j], markerfacecolor=mfc,
ms=ms+ms_boost, markeredgecolor='k',
markeredgewidth=mew, lw=0,
label=r'$\rho_p = $ ' + '{0:.2f} {1:s}'.format(rho_p,
short[units_rho]) + r', $\rho_s = $ ' + \
'{0:.2f} {1:s}'.format(rho_s, short[units_rho]),
zorder=front)
# plots binodal, flipping coacervate order to be in order
label = r'$T = $' + '{0:d}{1:s}'.format(int(T), T_unit)
if show_lB:
label += r', $l_B = $ ' + '{0:.3f}'.format(lB)
ax.plot(polymer_I_list + polymer_II_list[::-1],
ion_I_list + ion_II_list[::-1],
color=colors[i], lw=lw,
label=label, zorder=back)
# plots critical point
ax.plot(polymer_c, ion_c, marker='o',
fillstyle='full', color=colors[i], zorder=front)
# formats plot
x_label = r'$\rho_p$'
y_label = r'$\rho_s$ [' + units_rho + ']'
# determines component with varied concentration
name_pair = ['Polymer', 'Salt']
format_binodal(ax, x_label, units_rho, T_range, y_label=y_label,
x_lim=x_lim, y_lim=y_lim, lgnd_out=lgnd_out,
square_box=square_box, show_lgnd=show_lgnd)
return ax
def binodal_line_3d(data, mode='lines', ms=8, op=0.1,
c1='black', c2='black', lw=8, fig=None):
"""Plots line binodal in 3d plot."""
x1, y1, z1, x2, y2, z2 = data
fig = line_3d(x1, y1, z1, mode=mode, ms=ms, op=op, c=c1, lw=lw, fig=fig)
fig = line_3d(x2, y2, z2, mode=mode, ms=ms, op=op, c=c2, lw=lw, fig=fig)
return fig
def binodal_proj_fixed_conc(data, mu_salt_folder, rho_salt_M_list, color_list,
T_range, sigma, z_name, beads_2_M, lB_list,
lB_color_list, T_cels=False, marker='o', show_lB=False,
fill_left='none', fill_right='full', lw_sup=1, lw_co=3,
lw_lB=2, naming_structure='NA(100)NB(100)*', ext='PD',
figsize=None, vertical=True):
"""
Computes binodal projected onto three different planes (polymer-temperature,
salt-temperature, and polymer-salt) at fixed concentration of salt in a
saltwater reservoir.
show_lB : bool, optional
If True, will show Bjerrum length in legend
"""
### Formats Figure
# creates figure to plot the three 2D projections in a single row
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize)
### Creates Axes
if vertical:
h = 3 # 3 plots high
w = 1 # 1 plot wide
else:
h = 1 # 1 plot high
w = 3 # 3 plots wide
# polymer-T projection
ax_pT = fig.add_subplot(h, w, 1)
# salt-T projection
ax_sT = fig.add_subplot(h, w, 2)
# polymer-salt projection
ax_ps = fig.add_subplot(h, w, 3)
# computes binodal at different saltwater reservoir concentrations
# and plots on each of the three projections
for rho_salt_M, color in zip(rho_salt_M_list, color_list):
# converts mol/L to beads/sigma^3
rho_salt = rho_salt_M / beads_2_M
# makes dataframe of binodal for fixed salt reservoir concentration
df_mu = nacl.make_df_mu(data, mu_salt_folder, rho_salt, T_range, sigma,
naming_structure=naming_structure, ext=ext)
rho_p_I, rho_s_I, T_arr, rho_p_II, rho_s_II, _ = nacl.extract_df_mu_data(df_mu, z_name)
# computes temperature and identifies data within range
liq_h2o = np.logical_and(T_arr >= T_range[0], T_arr <= T_range[1])
# converts temperature from Kelvin to Celsius
if T_cels:
T_arr -= K_2_C
# creates labels
label_sup = r'$\rho_s^{res} = $' + '{0:.2f} M, supernatant'.format(rho_salt_M)
label_co = r'$\rho_s^{res} = $' + '{0:.2f} M, coacervate'.format(rho_salt_M)
# polymer-T projection
ax_pT.plot(rho_p_I[liq_h2o], T_arr[liq_h2o], color=color, marker=marker,
fillstyle=fill_left, label=label_sup, lw=lw_sup)
ax_pT.plot(rho_p_II[liq_h2o], T_arr[liq_h2o], color=color, marker=marker,
fillstyle=fill_right, label=label_co, lw=lw_co)
# salt-T projection
ax_sT.plot(rho_s_I[liq_h2o], T_arr[liq_h2o], color=color, marker=marker,
fillstyle=fill_left, label=label_sup, lw=lw_sup)
ax_sT.plot(rho_s_II[liq_h2o], T_arr[liq_h2o], color=color, marker=marker,
fillstyle=fill_right, label=label_co, lw=lw_co)
# polymer-salt projection
ax_ps.plot(rho_p_I[liq_h2o], rho_s_I[liq_h2o], color=color, label=label_sup, lw=lw_sup, zorder=10)
ax_ps.plot(rho_p_II[liq_h2o], rho_s_II[liq_h2o], color=color, label=label_co, lw=lw_co, zorder=10)
# plots isothermal binodal slices in polymer-salt plane
for lB, lB_color in zip(lB_list, lB_color_list):
df = data[lB]
T = pe.lB_2_T(lB, sigma=sigma)
# loads binodal data for supernatant (I) and coacervate (II)
# doubles polycation concentration to include polyanion in polymer
# concentration
ion_I_list = list(beads_2_M*df['rhoCI'])
ion_II_list = list(beads_2_M*df['rhoCII'])
polymer_I_list = list(2*beads_2_M*df['rhoPCI'])
polymer_II_list = list(2*beads_2_M*df['rhoPCII'])
# critical points
polymer_c = polymer_I_list[-1]
ion_c = ion_I_list[-1]
# units for temperature
if T_cels:
T_unit = r'$^{\circ}$C'
T -= K_2_C
else:
T_unit = ' K'
# plots binodal, flipping coacervate order to be in order
label = r'$T = $' + '{0:d}{1:s} '.format(int(T), T_unit)
if show_lB:
label += r'$l_B = $ ' + '{0:.3f}'.format(lB)
ax_ps.plot(polymer_I_list + polymer_II_list[::-1],
ion_I_list + ion_II_list[::-1], color=lB_color, lw=lw_lB,
label=label, zorder=0)
# plots critical point
ax_ps.plot(polymer_c, ion_c, marker='o',
fillstyle='full', color=lB_color)
return fig, ax_pT, ax_sT, ax_ps
def binodal_rho_rho(data, lB_list, rho_var_list, rho_fix,
ch_var, beads_2_M, show_tie_line=True,
cmap_name='plasma', colors=None, sigma=None, title=None,
marker='o', fill_left='none', fill_right='full',
lgnd_out=True, tol=1E-4, ms=10, T_cels=False, show_lB=False,
T_range=[273.15, 373.15], lw=2, square_box=False, ax=None):
"""
Plots the binodal as a function of salt density and polyelectrolyte
density. Different Bjerrum lengths/temperatures are represented by
different trend lines.
Returns
-------
None.
"""
# variables defining order of plotted objects
back = 0
front = 10
# lists symbols for plotting overall composition
sym_list = ['*', '^', 's', '<', '>', 'v', '+', 'x']
# creates dictionary to order fixed and varied densities properly
d = get_plot_dict_p_s(ch_var)
# creates list of colors for each value of rho_p
if colors is None:
colors = get_colors(cmap_name, len(lB_list))
# determines units
if beads_2_M != 1:
units_rho = 'mol/L'
else:
units_rho = r'beads/$\sigma^3$'
# creates figure
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
# loops through each temperature / Bjerrum length in data
for i, lB in enumerate(lB_list):
df = data[lB]
# loads binodal data for supernatant (I) and coacervate (II)
# doubles polycation concentration to include polyanion in polymer
# concentration
ion_I_list = list(beads_2_M*df['rhoCI'])
ion_II_list = list(beads_2_M*df['rhoCII'])
polymer_I_list = list(2*beads_2_M*df['rhoPCI'])
polymer_II_list = list(2*beads_2_M*df['rhoPCII'])
# critical points
polymer_c = polymer_I_list[-1]
ion_c = ion_I_list[-1]
# computes temperature
T = pe.lB_2_T(lB, sigma=sigma)
if T_cels:
T_unit = r'$^{\circ}$C'
T -= K_2_C
else:
T_unit = ' K'
# plots tie lines and overall composition
for j, rho_var in enumerate(rho_var_list):
rho_pair = np.array([rho_var, rho_fix])
rho_p, rho_s = rho_pair[d['order']]
results = nacl.fixed_rho_total(data, rho_p, rho_s, beads_2_M)
rho_PCI_list = results['rho_PCI']
rho_PCII_list = results['rho_PCII']
rho_CI_list = results['rho_CI']
rho_CII_list = results['rho_CII']
lB_arr = np.asarray(results['lB'])
alpha = results['alpha']
# converts to arrays of polymer and salt concentrations
rho_p_I = 2*np.asarray(rho_PCI_list)
rho_s_I = np.asarray(rho_CI_list)
rho_p_II = 2*np.asarray(rho_PCII_list)
rho_s_II = np.asarray(rho_CII_list)
# continues if no T in range has 2 phases for concentration
# finds closest match given Bjerrum length
try:
i_tie = np.where(np.abs(lB_arr - lB) < tol)[0][0]
except:
print('lB = {0:.3f} gives 1 phase for'.format(lB) + \
' rho_p = {0:.3f} [{1:s}],'.format(rho_p, units_rho) + \
'rho_s = {0:.3f} [{1:s}].'.format(rho_s, units_rho))
continue
# tie line
if show_tie_line:
ax.plot([rho_p_I[i_tie], rho_p_II[i_tie]],
[rho_s_I[i_tie], rho_s_II[i_tie]], '--',
color='k', lw=lw, zorder=back)
# supernatant
ax.plot(rho_p_I[i_tie], rho_s_I[i_tie], color=colors[i],
marker='o', fillstyle='none', zorder=front)
# coacervate
ax.plot(rho_p_II[i_tie], rho_s_II[i_tie], color=colors[i],
marker='o', fillstyle='none', zorder=front)
# plots overall composition last time through
if i == len(lB_list)-1:
short = {'mol/L' : 'M', 'beads/sigma^3' : r'$\sigma^{-3}$'}
if sym_list[j] == '*':
ms_boost = 4
else:
ms_boost = 0
ax.plot(rho_p, rho_s, marker=sym_list[j], markerfacecolor='w',
ms=ms+ms_boost, markeredgecolor='k',
markeredgewidth=1.5, lw=0,
label=r'$\rho_p = $ ' + '{0:.2f} {1:s}'.format(rho_p,
short[units_rho]) + r', $\rho_s = $ ' + \
'{0:.2f} {1:s}'.format(rho_s, short[units_rho]),
zorder=front)
# plots binodal, flipping coacervate order to be in order
label = r'$T = $' + '{0:d}{1:s}'.format(int(T), T_unit)
if show_lB:
label += r', $l_B = $ ' + '{0:.3f}'.format(lB)
ax.plot(polymer_I_list + polymer_II_list[::-1],
ion_I_list + ion_II_list[::-1],
color=colors[i], lw=lw,
label=label, zorder=front)
# plots critical point
ax.plot(polymer_c, ion_c, marker='o',
fillstyle='full', color=colors[i], zorder=front)
# formats plot
x_label = r'$\rho_p$'
y_label = r'$\rho_s$ [' + units_rho + ']'
# determines component with varied concentration
name_pair = ['Polymer', 'Salt']
name_var = name_pair[d['order'][0]]
if title is None:
title = 'Vary Overall {0:s} Concentration'.format(name_var)
format_binodal(ax, x_label, units_rho, T_range, y_label=y_label, title=title,
lgnd_out=lgnd_out, square_box=square_box)
return ax
def binodal_surf_3d(data, mode='markers', ms=4, op=0.01,
c1='blue', c2='red', lw=0, fig=None):
"""Plots surface binodal in 3d."""
x1, y1, z1, x2, y2, z2 = data
if fig == None:
fig = go.Figure()
# plots phase I (supernatant) of full binodal
fig = fig.add_trace(go.Scatter3d(
x=x1, y=y1, z=z1,
mode=mode,
marker=dict(
size=ms,
opacity=op,
color=c1
),
line=dict(
color=c1,
width=lw,
),
))
# plots phase II (coacervate) of full binodal
fig.add_trace(go.Scatter3d(
x=x2, y=y2, z=z2,
mode=mode,
marker=dict(
size=ms,
opacity=op,
color=c2
),
line=dict(
color=c2,
width=lw,
),
))
return fig
def binodal_surf_3d_batch(data_3d, op, ms, lw, mode, fig=None, skip=[]):
"""
Plots batch of data for a 3d surface binodal.
"""
# extracts data
x1_coll, y1_coll, z1_coll, x2_coll, y2_coll, z2_coll = data_3d
z_arr = np.unique(z1_coll)
# plots data at each z value
for (i, z) in enumerate(z_arr):
# skips indices requested
if i in skip:
continue
# extracts data corresponding to current z value (T or lB)
x1 = x1_coll[z1_coll==z]
y1 = y1_coll[z1_coll==z]
z1 = z1_coll[z1_coll==z]
x2 = x2_coll[z2_coll==z]
y2 = y2_coll[z2_coll==z]
z2 = z2_coll[z2_coll==z]
# plots dataon 3D plot
fig = binodal_surf_3d((x1, y1, z1, x2, y2, z2), op=op, ms=ms, lw=lw,
mode=mode, fig=fig)
return fig
def binodal_vary_conc(mu_salt_folder, data, rho_salt_list, beads_2_M, qty,
x_var='polycation', x_label=r'$\rho_{PSS}$', sigma=None,
T_range=[273,373], cmap_name='plasma', colors=None,
marker='o', fill_left='none', fill_right='full',
lgnd_out=True):
"""
LEGACY
Plots the binodal for different average densities of polymer.
qty : string
The quantity from df to return. Options include 'rhoPC', 'rhoPA',
'rhoC', and 'rhoA'.
"""
# creates list of colors for each value of rho_p
if colors is None:
colors = get_colors(cmap_name, len(rho_salt_list))
# creates figure
fig = plt.figure()
ax = fig.add_subplot(111)
for i, rho_salt in enumerate(rho_salt_list):
# plots binodal for low polymer concentration [M]
mu_conc = nacl.get_mu_conc(mu_salt_folder, data, rho_salt, beads_2_M=beads_2_M)
try:
lB_arr, rho_PCI_list, rho_PCII_list = nacl.fixed_conc(mu_conc, data, qty, beads_2_M=beads_2_M)
except:
continue
# selects the x-axis data
left_arr = np.array(rho_PCI_list)
right_arr = np.array(rho_PCII_list)
# computes temperature and identifies data within range
T_arr = pe.lB_2_T_arr(lB_arr, T_range, sigma=sigma)
liq_h2o = np.logical_and(T_arr >= T_range[0], T_arr <= T_range[1])
# determines units
if beads_2_M is not None:
units_rho = '[mol/L]'
else:
units_rho = '[beads/sigma^3]'
# left binodal
ax.plot(left_arr[liq_h2o], T_arr[liq_h2o], color=colors[i],
marker=marker, fillstyle=fill_left,
label=r'$\rho_{salt} = $' + '{0:.2f} {1:s}, supernatant' \
.format(rho_salt, units_rho))
# right binodal
ax.plot(right_arr[liq_h2o], T_arr[liq_h2o], color=colors[i],
marker=marker, fillstyle=fill_right,
label=r'$\rho_{salt} = $' + \
'{0:.2f} {1:s}, coacervate'.format(rho_salt, units_rho))
# formats plot
ax.set_ylim(T_range)
ax.set_xlabel(x_label + ' ' + units_rho, fontsize=16)
ax.set_ylabel(r'$T$ [K]', fontsize=16)
ax.tick_params(axis='both', labelsize=14)
ax.set_title('Effect of Salt Reservoir on Binodal', fontsize=16)
# put legend outside of plot box
if lgnd_out:
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width, box.height])
legend_x = 1
legend_y = 0.5
plt.legend(loc='center left', bbox_to_anchor=(legend_x, legend_y), fontsize=12)
else:
plt.legend(fontsize=12)
return ax
def binodal_vary_f(data, f_list, color_list, T_cels=True, x_label=r'$\rho_p$',
units_rho='M', T_range=[273.15, 373.15], lw1=1, lw2=4,
square_box=True, show_lgnd=False, ax=None):
"""
Plots binodal projected onto coordinate plane for different charge fractions
f.
"""
# creates figure
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
for f, color in zip(f_list, color_list):
# creates labels
label_sup = r'$f$ =' + ' {0:.2f} supernatant'.format(f)
label_co = r'$f$ =' + ' {0:.2f} coacervate'.format(f)
# extracts data
T_arr, rho_p_I, rho_p_II = data[f]
# polymer-T projection
ax.plot(rho_p_I, T_arr, color=color, label=label_sup, lw=lw1)
ax.plot(rho_p_II, T_arr, color=color, label=label_co, lw=lw2)
# formats plot
format_binodal(ax, x_label, units_rho, T_range, T_cels=T_cels,
square_box=square_box, show_lgnd=show_lgnd)
return ax
def binodal_vary_N(data, N_list, color_list, T_cels=True, x_label=r'$\rho_p$',
units_rho='M', T_range=[273.15, 373.15], lw1=1, lw2=4,
square_box=True, show_lgnd=False, ax=None):
"""
Plots binodal projected onto coordinate plane for different degrees of
polymerization N.
"""
# creates figure
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(131)
for N, color in zip(N_list, color_list):
# extracts data for given N
T_arr, rho_p_I, rho_p_II = data[N]
# creates labels
label_sup = r'$N$ =' + ' {0:d} supernatant'.format(N)
label_co = r'$N$ =' + ' {0:d} coacervate'.format(N)
# polymer-T projection
ax.plot(rho_p_I, T_arr, color=color, label=label_sup, lw=lw1)
ax.plot(rho_p_II, T_arr, color=color, label=label_co, lw=lw2)
# formats plot
format_binodal(ax, x_label, units_rho, T_range, T_cels=T_cels,
square_box=square_box, show_lgnd=show_lgnd)
return ax
def binodal_vary_sigma(data, sigma_list, color_list,
T_cels=True, x_label=r'$\rho_p$', units_rho='M',
T_range=[273.15, 373.15], lw1=1, lw2=4, square_box=True,
show_lgnd=False, x_lim=None, ax=None):
"""
Plots binodal projected onto coordinate plane for different charge fractions
f.
"""
# creates figure
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
for sigma, color in zip(sigma_list, color_list):
# creates labels
label_sup = r'$\sigma$ =' + ' {0:.1f}'.format(sigma*m_2_A) + r' $\AA$ supernatant'
label_co = r'$\sigma$ =' + ' {0:.1f}'.format(sigma*m_2_A) + r' $\AA$ coacervate'
# extracts data
T_arr, rho_p_I, rho_p_II = data[sigma]
# polymer-T projection
ax.plot(rho_p_I, T_arr, color=color, label=label_sup, lw=lw1)
ax.plot(rho_p_II, T_arr, color=color, label=label_co, lw=lw2)
# formats plot
format_binodal(ax, x_label, units_rho, T_range, T_cels=T_cels, x_lim=x_lim,
square_box=square_box, show_lgnd=show_lgnd)
return ax
def binodal_vary_rho(data, rho_var_list, rho_fix, ch_var, beads_2_M,
x_var='polycation', x_label=r'$\rho_{PSS}$', sigma=None,
T_range=[273.15,373.15], cmap_name='plasma', colors=None,
marker='o', fill_left='none', fill_right='full', lw_sup=1,
lw_co=3, lgnd_out=True, lw=1, x_lim=None, T_cels=False,
title=None, c_sup='#1414FF', c_co='#FF0000', ls_sup='-',
square_box=False, ax=None):
"""
Plots the binodal for different average densities of polymer.
If T_cels is True, converts the temperature from Kelvin to Celsius
"""
# creates dictionary of values based on which component's density is varied
d = get_plot_dict_p_s(ch_var)
# creates list of colors for each value of rho_p
if colors is None:
if cmap_name is not None:
colors = get_colors(cmap_name, len(rho_var_list))
# creates figure
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
### Plots figure
for i, rho_var in enumerate(rho_var_list):
# plots binodal for low polymer concentration [M]
rho_pair = np.array([rho_var, rho_fix])
rho_p, rho_s = rho_pair[d['order']]
results = nacl.fixed_rho_total(data, rho_p, rho_s, beads_2_M)
rho_PCI_list = results['rho_PCI']
rho_PCII_list = results['rho_PCII']
rho_CI_list = results['rho_CI']
rho_CII_list = results['rho_CII']
rho_PAI_list = results['rho_PAI']
rho_PAII_list = results['rho_PAII']
rho_AI_list = results['rho_AI']
rho_AII_list = results['rho_AII']
lB_arr = results['lB']
alpha = results['alpha']
# selects the x-axis data
if x_var == 'polycation':
left_arr = np.array(rho_PCI_list)
right_arr = np.array(rho_PCII_list)
elif x_var == 'polyanion':
left_arr = np.array(rho_PAI_list)
right_arr = np.array(rho_PAII_list)
elif x_var == 'cation':
left_arr = np.array(rho_CI_list)
right_arr = np.array(rho_CII_list)
elif x_var == 'anion':
left_arr = np.array(rho_AI_list)
right_arr = np.array(rho_AII_list)
elif x_var == 'solvent':
left_arr = pe.calc_rho_solv(rho_PCI_list,
rho_CI_list,
beads_2_M)
right_arr = pe.calc_rho_solv(rho_PCII_list,
rho_CII_list,
beads_2_M)
elif x_var == 'polyelectrolyte':
left_arr = np.array(rho_PCI_list) + np.array(rho_PAI_list)
right_arr = np.array(rho_PCII_list) + np.array(rho_PAII_list)
elif x_var == 'salt':
left_arr = np.array(rho_CI_list)
right_arr = np.array(rho_CII_list)
else:
print('Error. Invalid x variable in plot.binodal_vary_rho().')
# computes temperature and identifies data within range
T_arr = pe.lB_2_T_arr(lB_arr, T_range, sigma=sigma)
liq_h2o = np.logical_and(T_arr >= T_range[0], T_arr <= T_range[1])
# converts temperature from Kelvin to Celsius
if T_cels:
T_arr -= K_2_C
# assigns separate colors to coacervate and supernatant if not specified
if colors is not None:
c_sup = colors[i]
c_co = colors[i]
# supernatant
ax.plot(left_arr[liq_h2o], T_arr[liq_h2o], color=c_sup,
marker=marker, fillstyle=fill_left, ls=ls_sup,
label=r'$\rho_' + d['ch_var'] + ' = $' + '{0:.2f} M, supernatant' \
.format(rho_var), lw=lw_sup)
# coacervate
ax.plot(right_arr[liq_h2o], T_arr[liq_h2o], color=c_co,
marker=marker, fillstyle=fill_right,
label=r'$\rho_' + d['ch_var'] + ' = $' + \
'{0:.2f} M, coacervate'.format(rho_var), lw=lw_co)
# determines units of density to display on plot
if beads_2_M is not None:
units_rho = 'mol/L'
else:
units_rho = 'beads/sigma^3'
# formats plot
if title is None:
title = 'Effect of {0:s} on Binodal, {1:s} = {2:.2f} M' \
.format(d['name_var'], r'$\rho_' + d['ch_fix'] + '$', rho_fix)
format_binodal(ax, x_label, units_rho, T_range, title=title, x_lim=x_lim,
T_cels=T_cels, square_box=square_box)
return ax
def fig4(data_pred, df_exp, rho_s_raw_list, rho_p_raw, sigma, T_range,
lw=3, c_sup='#1414FF', c_co='#FF0000', ms=11,
mfc='w', mew=1.5, x_lim=None, x_label=r'$\rho_{PSS}$',
conv_vals=False, tol=1E-6, show_lgnd=False,
figsize=None, pad=3, vertical=False, plot_errorbars=False):
"""
Validates fit of sigma to experiments.
"""
# computes conversion from beads/sigma^3 to mol/L
beads_2_M = pe.get_beads_2_M(sigma, SI=True)
# creates figure
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize)
# determines arrangement of subplots
if vertical:
h = len(rho_s_raw_list) # many plots high
w = 1 # 1 plot wide
else:
h = 1 # 1 plot high
w = len(rho_s_raw_list) # many plots wide
# Plots figure
for i, rho_s_raw in enumerate(rho_s_raw_list):
if conv_vals:
rho_p, rho_s = nacl.conv_ali_conc(df_exp, rho_p_raw, rho_s_raw)
# creates subplot
ax = fig.add_subplot(h, w, i+1)
# polymer-temperature plane
ax = binodal_custom_rho(data_pred, [rho_p], [rho_s], beads_2_M,
x_var='polycation', x_label=x_label,
x_lim=x_lim, sigma=sigma, T_range=T_range,
marker='', lw=lw, lw_sup=lw, lw_co=lw,
colors=None, cmap_name=None, T_cels=True,
c_sup=c_sup, c_co=c_co, ls_sup='--',
square_box=True, show_lgnd=show_lgnd, ax=ax)
# plots experimental results
for i in range(len(df_exp)):
rho_p_exp, rho_s_exp, T_exp, \
rho_p_sup, rho_p_co, s_rho_p_sup, \
s_rho_p_co = nacl.read_df_exp(df_exp, i, conv_vals=conv_vals,
read_sigma=plot_errorbars)
if (rho_p_exp == rho_p) and (rho_s_exp == rho_s):
# plots supernatant and coacervate compositions
rho_pss_sup = rho_p_sup/2
rho_pss_co = rho_p_co/2
if plot_errorbars:
s_rho_pss_sup = s_rho_p_sup/2
s_rho_pss_co = s_rho_p_co/2
ax.errorbar(rho_pss_sup, T_exp, xerr=s_rho_pss_sup, lw=0, marker='o', ms=ms,
markerfacecolor=mfc, markeredgewidth=mew, elinewidth=1,
markeredgecolor=c_sup, label='Ali et al. (2019), supernatant')
ax.errorbar(rho_pss_co, T_exp, xerr=s_rho_pss_co, lw=0, marker='o', ms=ms,
markerfacecolor=c_co, markeredgewidth=mew, elinewidth=1,
markeredgecolor=c_co, label='Ali et al. (2019), coacervate')
else:
ax.plot(rho_pss_sup, T_exp, lw=0, marker='o', ms=ms,
markerfacecolor=mfc, markeredgewidth=mew,
markeredgecolor=c_sup, label='Ali et al. (2019), supernatant')
ax.plot(rho_pss_co, T_exp, lw=0, marker='o', ms=ms,
markerfacecolor=c_co, markeredgewidth=mew,
markeredgecolor=c_co, label='Ali et al. (2019), coacervate')
# pads subplots with whitespace
fig.tight_layout(pad=pad)
return fig
def figs3(data_folder_N, data_folder_f, data_folder_sigma,
mu_salt_folder_N, mu_salt_folder_f, mu_salt_folder_sigma,
rho_s_M_N, rho_s_M_f, rho_s_M_sigma, ext_N, ext_f, ext_sigma,
N_list, f_list, sigma_list, color_list_N, color_list_f,
color_list_sigma, sigma_fixed, x_lim_sigma=[0,6], figsize=None, pad=3,
naming_structure_sigma='NA(100)NB(100)lB(*)', lB_lo=1.3, lB_hi=2.398):
"""Plots Figure S3 of SI showing effects of N, f, and sigma on
binodal projections in polymer-temperature plane."""
# creates figure
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize)
### Effect of varying N
print('loading N data')
# adds subplot
axN = fig.add_subplot(131)
# extracts data
data_vary_N = nacl.binodal_vary_N_data(data_folder_N, mu_salt_folder_N,
rho_s_M_N, N_list, sigma=sigma_fixed, ext=ext_N)
# plots data
print('plotting N data')
_ = binodal_vary_N(data_vary_N, N_list, color_list_N, ax=axN)
### Effect of varying charge fraction f
# adds subplot
axf = fig.add_subplot(132)
# extracts data
print('loading f data')
data_vary_f = nacl.binodal_vary_f_data(data_folder_f, mu_salt_folder_f,
rho_s_M_f, f_list,
sigma=sigma_fixed, ext=ext_f)
# plots data
print('plotting f data')
_ = binodal_vary_f(data_vary_f, f_list, color_list_f, ax=axf)
### Effect of varying sigma
axsigma = fig.add_subplot(133)
# laads all data
print('loading sigma data')
data = nacl.load_data(data_folder_sigma, ext=ext_sigma,
naming_structure=naming_structure_sigma, lB_lo=lB_lo, lB_hi=lB_hi)
# extracts relevant data
data_vary_sigma = nacl.binodal_vary_sigma_data(data, mu_salt_folder_sigma,
rho_s_M_sigma, sigma_list, ext=ext_sigma)
# plots data
print('plotting sigma data')
_ = binodal_vary_sigma(data_vary_sigma, sigma_list,
color_list_sigma, ax=axsigma, x_lim=x_lim_sigma)
# pads subplots with whitespace
fig.tight_layout(pad=pad)
return fig
def compare_to_exp(data, beads_2_M, rho_p_list=[0.3], rho_s_list=[1.6, 1.85, 1.9],
N=100, f=1, sigma=4, t_fs=12, T_range=[273.15, 323.15]):
"""
Compares predictions from data to the experiment in the Prabhu group.
"""
# sets x and y axis limits
x_lim = (-0.05, 1.3) # [mol/L]
y_lim = (0, 60) # [C]
# sets temperature range
T_range = [273, 333]
for rho_s in rho_s_list:
for rho_p in rho_p_list:
# computes polycation concentrations at different temperatures for fixed polymer and salt [mol/L]
results = nacl.fixed_rho_total(data, rho_p, rho_s, beads_2_M)
rho_PCI_list = results['rho_PCI']
rho_PCII_list = results['rho_PCII']
lB_arr = results['lB']
# plots binodal
title = '{0:.2f} M Salt, {1:.2f} M Polymer, N = {2:d}, f = {3:.2f}, sig = {4:.2f} A'.format(rho_s, rho_p, N, f, sigma)
p = binodal(lB_arr, rho_PCI_list, rho_PCII_list, title=title,
beads_2_M=1, n_tie_lines=0, deg_C=True, T_range=T_range,
x_lim=x_lim, y_lim=y_lim, marker=False, line=True)
p.title.text_font_size = '{0:d}pt'.format(t_fs)
show(p)
return
def crit_line_3d(data_cp, c_crit, lw_crit, fig):
"""
Plots critical line in 3D, typically for 3D surface binodal plot.
LEGACY
"""
polymer_c_list, salt_c_list, z_arr = data_cp
fig.add_trace(go.Scatter3d(
x=polymer_c_list,
y=salt_c_list,
z=z_arr,
mode='lines',
line=dict(
color=c_crit,
width=lw_crit,
),
),
)
return fig
def fig1(data_3d, data_cp, data_z, data_mu, plot_params, fixed_T=True,
fixed_salt=True, crit_line=True, fixed_comp=False,
data_comp=None, data_outlines=None, skip=[], plot_axes=True,
outline_scale_factor=1.02, toc_fig=False, has_ucst=False,
show_labels=True):
"""
Plots Figure 1 from CCLS paper: 3d surface binodal, fixed T 2d line binodal,
fixed salt reservoir concentration 2d line binodal, and critical line.
"""
# if Table of Contents (TOC) figure, removes all but LCST
if toc_fig:
fixed_salt = True
crit_line = True
fixed_comp = False
fixed_T = False
x_range, y_range, z_range, eye_xyz, op, ms_bin, lw_bin, \
lw_fix, lw_crit, lw_outline, c1_T, c2_T, c1_fix, c2_fix, \
c_crit, c_outline, mode, width, height, fs, offset = plot_params
x, y, z = eye_xyz
# plots 3d surface binodal
fig = binodal_surf_3d_batch(data_3d, op, ms_bin, lw_bin, mode, skip=skip)
if crit_line:
# plots critical line
fig = line_3d(*data_cp, c=c_crit, lw=lw_crit, fig=fig)
if fixed_T:
# plots binodal at fixed z value (temperature or Bjerrum length)
fig = binodal_line_3d(data_z, fig=fig, lw=lw_fix, c1=c1_T, c2=c2_T)
if fixed_salt:
### FIXED SALT CONCENTRATION ###
# if there is a UCST, split the binodal in two
if has_ucst:
# identifies threshold between UCST and LCST by largest gap in z
z1 = data_mu[2]
z1_diff = np.diff(z1)
i_thresh = np.argmax(z1_diff)
thresh_ucst = (z1[i_thresh] + z1[i_thresh+1])/2
# splits data below UCST and above LCST
ucst_data = list(zip(*[(x1, y1, z1, x2, y2, z2) for x1, y1, z1, x2, y2, z2 in zip(*data_mu) if z1 < thresh_ucst]))
lcst_data = list(zip(*[(x1, y1, z1, x2, y2, z2) for x1, y1, z1, x2, y2, z2 in zip(*data_mu) if z1 > thresh_ucst]))
# plots UCST and LCST data separately
fig = binodal_line_3d(ucst_data, fig=fig, lw=lw_fix, c1=c1_fix, c2=c2_fix)
fig = binodal_line_3d(lcst_data, fig=fig, lw=lw_fix, c1=c1_fix, c2=c2_fix)
else:
# plots data for fixed saltwater reservoir concentration
fig = binodal_line_3d(data_mu, fig=fig, lw=lw_fix, c1=c1_fix, c2=c2_fix)
if fixed_comp:
# plots binodal at fixed overall salt, polymer concentration #
fig = binodal_line_3d(data_comp, fig=fig, lw=lw_fix, c1=c1_fix, c2=c2_fix)
# plots outlines of the surface for definition
if data_outlines is not None:
for data_outline in data_outlines:
data_outline_scaled = []
for coord in data_outline:
coord = outline_scale_factor*np.asarray(coord)
data_outline_scaled += [coord]
fig = binodal_line_3d(data_outline_scaled, c1=c_outline,
c2=c_outline, fig=fig)
if plot_axes:
# x-axis
fig = line_3d(x_range, [offset, offset], [z_range[0] + offset,
z_range[0] + offset], lw=12, c=c_outline, fig=fig)
# y-axis
fig = line_3d([offset, offset], y_range, [z_range[0] + offset,
z_range[0] + offset], lw=12, c=c_outline, fig=fig)
# z-axis
fig = line_3d([offset, offset], [offset, offset], z_range,
c=c_outline, lw=12, fig=fig)
### FORMATS FIGURE ###
fig.update_layout(
scene = dict(xaxis = dict(range=x_range,),
yaxis = dict(range=y_range,),
zaxis = dict(range=z_range,),
),
width = width,
height = height,
# changes initial view of figure
scene_camera = dict(
eye=dict(x=x, y=y, z=z),
# center=dict(x=0, y=0.3, z=0.3),
# up=dict(x=0, y=0, z=1)
),
font = dict(
family='Arial',
color='black',
size=fs)
)
### Cleanup
# removes legend (too crowded to be off use)
fig.update_layout(showlegend=False)
#removes tick labels and axis titles (so I can add them myself)
if not show_labels:
fig.update_layout(
scene = dict(xaxis = dict(showticklabels=False, title=''),
yaxis = dict(showticklabels=False, title=''),
zaxis = dict(showticklabels=False, title='',
tickmode = 'linear',
tick0 = 0,
dtick = 50),
),
)
return fig
def fig2a(rho_salt_M_list_list, data, mu_salt_folder,
color_list, T_range, sigma, z_name,
beads_2_M, lB_list, lB_color_list, pad,
kwargs, units_rho='mol/L', show_lgnd=False, y_lim_T=(0, 100),
rho_p_label=r'$\rho_p$', rho_s_label=r'$\rho_s$',
y_lim_s=[0, 2.25]):
"""Plots Figure 2a of binodal projections at different saltwater concentrations."""
for rho_salt_M_list in rho_salt_M_list_list:
# plots binodal projections
fig, ax_pT, ax_sT, \
ax_ps = binodal_proj_fixed_conc(data, mu_salt_folder, rho_salt_M_list,
color_list, T_range, sigma, z_name,
beads_2_M, lB_list, lB_color_list,
**kwargs)
# formats plots
ax_pT = format_binodal(ax_pT, rho_p_label, units_rho, T_range,
T_cels=kwargs['T_cels'], y_lim=y_lim_T,
show_lgnd=show_lgnd)
ax_sT = format_binodal(ax_sT, rho_s_label, units_rho, T_range,
T_cels=kwargs['T_cels'], y_lim=y_lim_T,
show_lgnd=show_lgnd)
ax_ps = format_binodal(ax_ps, rho_p_label, units_rho, T_range,
y_label=rho_s_label + ' [' + units_rho + ']',
show_lgnd=show_lgnd, y_lim=y_lim_s)
# pads plots with whitespace
fig.tight_layout(pad=pad)
return fig
def fig2b(data, rho_p_list, rho_s_list, beads_2_M, lB_list, color_list,
lB_color_list, kwargs, alpha_y_lim=(0.5,1.05),
alpha_yticks=(0.5,0.75,1), figsize=None, pad=3, mew=0.5,
show_lgnd=False):
"""Plots Figure 2b of binodal projections at different overall compositions."""
### Formats Figure
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize)
### polymer-temperature plane ###
ax1 = fig.add_subplot(221)
_ = binodal_custom_rho(data, rho_p_list, rho_s_list, beads_2_M,
x_var='polyelectrolyte', x_label=r'$\rho_p$',
marker='', colors=color_list,
plot_fixed_rho=True, ax=ax1, show_lgnd=show_lgnd,
**kwargs)
### salt-temperature plane ###
ax2 = fig.add_subplot(222)
_ = binodal_custom_rho(data, rho_p_list, rho_s_list, beads_2_M,
x_var='salt', x_label=r'$\rho_s$', marker='',
colors=color_list, plot_fixed_rho=True,
ax=ax2, show_lgnd=show_lgnd, **kwargs)
### polymer-salt plane ###
ax3 = fig.add_subplot(223)
_ = binodal_custom_rho_rho(data, lB_list, rho_p_list, rho_s_list,
beads_2_M, colors=lB_color_list, mew=mew, ax=ax3,
show_lgnd=show_lgnd, colors_symbols=color_list,
**kwargs)
### volume fraction of supernatant vs. temperature ###
ax4 = fig.add_subplot(224)
_ = alpha_custom_rho(data, rho_p_list, rho_s_list, beads_2_M,
y_lim=alpha_y_lim, marker='',
colors=color_list, ax=ax4, show_lgnd=show_lgnd,
**kwargs)
# customizes tick mark locations
ax4.set_yticks(alpha_yticks)
# pads subplots with whitespace
fig.tight_layout(pad=pad)
return fig
def fig3(data, lB_list, rho_p_fixed, rho_s_fixed, rho_p_varied, rho_s_varied,
beads_2_M, kwargs, figsize=None, pad=3, vertical=True):
"""Plots Figure 3 of tie lines in polymer-salt plane."""
# formats figure
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize)
# determines arrangement of subplots
if vertical:
h = 2 # 2 plots high
w = 1 # 1 plot wide
else:
h = 1 # 1 plot high
w = 2 # 2 plots wide
################ VARIES SALT CONCENTRATION ###############
# creates subplot
ax1 = fig.add_subplot(h, w, 1)
# plots binodal
rho_p_list = rho_p_fixed*np.ones([len(rho_s_varied)])
rho_s_list = rho_s_varied
_ = binodal_custom_rho_rho(data, lB_list, rho_p_list, rho_s_list,
beads_2_M, ax=ax1, show_lgnd=False, **kwargs)
############ VARIES POLYMER CONCENTRATION ####################
# creates subplot
ax2 = fig.add_subplot(h, w, 2)
# plots binodal
rho_p_list = rho_p_varied
rho_s_list = rho_s_fixed*np.ones([len(rho_p_varied)])
ax = binodal_custom_rho_rho(data, lB_list, rho_p_list, rho_s_list,
beads_2_M, ax=ax2, show_lgnd=False, **kwargs)
# pads subplots with whitespace
fig.tight_layout(pad=pad)
return fig
def figs1(T_range, sigma, T_room_C=20, T_cels=True, figsize=(5,5),
gridspec=10, lw=3, y_lim=[5.5,9.5], y_ticks=[6,7,8,9], d=0.5,
ax_fs=16, tk_fs=16):
"""Plots Figure S1 of the SI of Bjerrum length vs. T for fixed and
T-dependent dielectric constant."""
# computes Bjerrum lengths
T_arr, lB_A_arr, lB_0_A_arr = nacl.lB_comparison(T_range, sigma,
T_room_C=T_room_C)
# creates figure
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=figsize,
gridspec_kw={'height_ratios': [gridspec,1]}, sharex=True)
# adjusts temperature based on requested unit
if T_cels:
T_arr -= K_2_C
unit_T = r'$^{\circ}C$'
else:
unit_T = 'K'
# plots Bjerrum lengths
ax1.plot(T_arr, lB_A_arr, lw=lw, label=r'$\epsilon(T)$')
ax1.plot(T_arr, lB_0_A_arr, lw=lw,
label=r'$\epsilon(T) = \epsilon($' + \
'{0:d}'.format(int(T_room_C)) + r'$^{\circ}C)$')
# formats plot
ax2.set_xlabel(r'$T$ [' + unit_T + ']', fontsize=ax_fs)
ax1.set_ylabel(r'$l_B$ $[\AA]$', fontsize=ax_fs)
ax1.tick_params(axis='both', labelsize=tk_fs)
ax2.tick_params(axis='both', labelsize=tk_fs)
### Creates broken axis
# see: https://matplotlib.org/stable/gallery/subplots_axes_and_figures/broken_axis.html
# set limits and ticks on upper axis
ax1.set_ylim(y_lim)
ax1.set_yticks(y_ticks)
# lower axis
ax2.set_ylim([0, 0.5])
ax2.set_yticks([0])
# hide the spines between ax and ax2
ax1.spines['bottom'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax1.xaxis.tick_top()
ax1.tick_params(top=False, labeltop=False) # don't put ticks or labels at top
ax2.xaxis.tick_bottom()
# plots diagonal hatch marks on y-axis--"d" is ratio of height to length
kwargs = dict(marker=[(-1, -d), (1, d)], markersize=12,
linestyle="none", color='k', mec='k', mew=1, clip_on=False)
ax1.plot([0, 1], [0, 0], transform=ax1.transAxes, **kwargs)
ax2.plot([0, 1], [1, 1], transform=ax2.transAxes, **kwargs)
return fig
def format_binodal(ax, x_label, units_rho, T_range, y_label=None, title=None,
x_lim=None, y_lim=None, T_cels=False, lgnd_out=True,
square_box=True, show_lgnd=True):
"""
Formats axes of a plot of the binodal projected onto a plane with
temperature as the vertical axis.
"""
if x_lim is not None:
ax.set_xlim(x_lim)
ax.set_xlabel('{0:s} [{1:s}]'.format(x_label, units_rho), fontsize=18)
# assumes that the y axis is temperature if another label is not given
if y_label is None:
T_unit = 'K'
if T_cels:
T_unit = r'$^{\circ}$C'
T_range = [T - K_2_C for T in T_range]
if y_lim is None:
ax.set_ylim(T_range)
else:
ax.set_ylim(y_lim)
ax.set_ylabel(r'$T$' + ' [{0:s}]'.format(T_unit), fontsize=18)
else:
ax.set_ylabel(y_label, fontsize=18)
ax.set_ylim(y_lim)
ax.tick_params(axis='both', labelsize=16)
if title is not None:
ax.set_title(title, fontsize=16)
# makes box of plot square
if square_box:
ax.set_aspect(np.diff(ax.get_xlim()) / np.diff(ax.get_ylim()))
# places legend outside of plot box
if show_lgnd:
if lgnd_out:
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width, box.height])
legend_x = 1
legend_y = 0.5
ax.legend(loc='center left', bbox_to_anchor=(legend_x, legend_y),
fontsize=14, frameon=False)
else:
ax.legend(fontsize=12, frameon=False)
return ax
def get_colors(cmap_name, n):
"""Returns list of colors using given colormap."""
cmap = plt.get_cmap(cmap_name)
return [cmap(val) for val in np.linspace(0, 1, n)]
def get_lgnd_labels(handles, labels, key):
"""Returns zipped handles and labels for which labels contains key."""
return [pair for pair in zip(handles, labels) if key in pair[1]]
def get_plot_dict_p_s(ch_var):
"""Returns a dictionary of key parameters for plotting based on varied component."""
d = {}
# polyelectrolyte density varied
if ch_var == 'p':
d = {'ch_var':'p', 'ch_fix':'s', 'order':[0,1], 'name_var':'Polymer'}
# salt density varied
elif ch_var == 's':
d = {'ch_var':'s', 'ch_fix':'p', 'order':[1,0], 'name_var':'Salt'}
else:
print('invalid ch_var character: choose s or p.')
return d
def line_3d(x, y, z, mode='lines', ms=8, op=0.1,
c='black', lw=8, fig=None):
"""
Plots line in 3D plot (plotly).
"""
if fig == None:
fig = go.Figure()
# plots phase I (supernatant) of fixed salt binodal
fig.add_trace(go.Scatter3d(
x=x, y=y, z=z,
mode=mode,
marker=dict(
size=ms,
opacity=op,
color=c
),
line=dict(
color=c,
width=lw,
),
))
return fig
def no_salt(df, n_plot, left='rhoPCI', right='rhoPCII', x_label='polycation density',
p=None, n_tie_lines=0, plot_T=False, title='', line=False, marker=True,
w=500, h=500, units_rho='[beads/sigma^3]', deg_C=False,
leg1='supernatant', c1='blue', leg2='coacervate', c2='red'):
"""
Plots the binodal for a polyelectrolyte in solution without
salt.
"""
if plot_T:
y = 'T'
if deg_C:
y_label = 'Temperature [' + r'$^{\circ}$' + 'C]'
else:
y_label = 'Temperature [K]'
else:
y = 'BJ'
y_label = 'Bjerrum length'
# samples a uniform subset of the data
n = len(df)
skip = int(n / n_plot)
sample = df.iloc[::skip]
# creates figure object if not provided
if p is None:
p = figure(plot_width=w, plot_height=h)
# loads source for plot data
source = ColumnDataSource(sample)
if marker:
# creates circle glyph of polycation concentration in dilute phase
p.circle(x=left, y=y, source=source, size=10, color=c1,
legend_label=leg1)
# creates circle glyph of polycation concentration in coacervate phase
p.circle(x=right, y=y, source=source, size=10, color=c2,
legend_label=leg2)
if line:
# creates circle glyph of polycation concentration in dilute phase
p.line(x=left, y=y, source=source, line_width=6, line_color=c1,
legend_label=leg1)
# creates circle glyph of polycation concentration in coacervate phase
p.line(x=right, y=y, source=source, line_width=6, line_color=c2,
legend_label=leg2)
# adds tie lines
if n_tie_lines > 0:
skip_tie_lines = int(n / n_tie_lines)
df_tie_lines = df.iloc[::skip_tie_lines]
for t in range(len(df_tie_lines)):
p.line([df_tie_lines[left].iloc[t], df_tie_lines[right].iloc[t]],
[df_tie_lines[y].iloc[t], df_tie_lines[y].iloc[t]],
color='black')
# adds plot labels
p.xaxis.axis_label = x_label + ' ' + units_rho
p.xaxis.axis_label_text_font_size = '18pt'
p.xaxis.major_label_text_font_size = '14pt'
p.yaxis.axis_label = y_label
p.yaxis.axis_label_text_font_size = '18pt'
p.yaxis.major_label_text_font_size = '14pt'
# adds title
p.title.text = title
p.title.text_font_size = '16pt'
# formats legend
p.legend.location = "bottom_right"
p.legend.label_text_font_size = '14pt'
p.legend.click_policy = 'hide'
# creates hover feature to read data
hover = HoverTool()
hover.tooltips=[
(y_label, '@' + y),
(x_label + ' (I)', '@' + left),
(x_label + ' (II)', '@' + right)
]
p.add_tools(hover)
return p
def pt_3d(x, y, z, mode='markers', ms=8, op=1,
c='black', fig=None):
"""
Plots line in 3D plot (plotly).
"""
if fig == None:
fig = go.Figure()
# plots phase I (supernatant) of fixed salt binodal
fig.add_trace(go.Scatter3d(
x=[x], y=[y], z=[z],
mode=mode,
marker=dict(
size=ms,
opacity=op,
color=c
),
))
return fig
def salt(df, n_plot, p=None, n_tie_lines=0):
"""
Plots the binodal for a polyelectrolyte in solution with salt
at a fixed Bjerrum length on rho_p vs. rho_s axes.
"""
# samples a uniform subset of the data
n = len(df)
skip = int(n / n_plot)
sample = df.iloc[::skip]
# creates figure object if not provided
if p is None:
p = figure()
# loads source for plot data
source = ColumnDataSource(sample)
# creates circle glyph of polycation concentration in dilute phase
p.circle(x='rhoPAI', y='rhoAI', source=source, size=10, color='red', legend_label='dilute phase (I)')
# creates circle glyph of polycation concentration in coacervate phase
p.circle(x='rhoPAII', y='rhoAII', source=source, size=10, color='blue', legend_label='coacervate phase (II)')
# draws tie lines
if n_tie_lines > 0:
skip_tie_lines = int(n / n_tie_lines)
df_tie_lines = df.iloc[::skip_tie_lines]
for t in range(len(df_tie_lines)):
x = [df_tie_lines['rhoPAI'].iloc[t], df_tie_lines['rhoPAII'].iloc[t]]
y = [df_tie_lines['rhoAI'].iloc[t], df_tie_lines['rhoAII'].iloc[t]]
p.line(x, y, color='black')
# adds plot labels
p.xaxis.axis_label = 'polyanion number density'
p.xaxis.axis_label_text_font_size = '18pt'
p.xaxis.major_label_text_font_size = '14pt'
p.yaxis.axis_label = 'anion number density'
p.yaxis.axis_label_text_font_size = '18pt'
p.yaxis.major_label_text_font_size = '14pt'
# formats legend
p.legend.location = "top_right"
p.legend.label_text_font_size = '16pt'
p.legend.click_policy = 'hide'
# creates hover feature to read data
hover = HoverTool()
hover.tooltips=[
('Anion Density (I)', '@rhoAI'),
('Anion Density (II)', '@rhoAII'),
('Polyanion density (I)', '@rhoPAI'),
('Polyanion density (II)', '@rhoPAII')
]
p.add_tools(hover)
return p
def sort_lgnd_labels(ax, sorted_keys):
"""Sorts legend labels based on order of keywords."""
# gets handles and labels from legend
handles, labels = ax.get_legend_handles_labels()
# sorts by keywords
lgnd_sorted = []
for key in sorted_keys:
lgnd_sorted += get_lgnd_labels(handles, labels, key)
# removes redundant entries
lgnd_unique = [(0,0)] # primer entry
[lgnd_unique.append(pair) for pair in lgnd_sorted if pair[1] \
not in list(zip(*lgnd_unique))[1]]
# removes primer entry
lgnd_unique = lgnd_unique[1:]
# unzips
handles_sorted, labels_sorted = zip(*lgnd_unique)
# adds legend outside plot
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width, box.height])
legend_x = 1
legend_y = 0.5
ax.legend(handles_sorted, labels_sorted, loc='center left',
bbox_to_anchor=(legend_x, legend_y),
fontsize=14, frameon=False)
return ax
def validate_fit(data_pred, df_exp, ch_var, rho_var_list, rho_fix, colors,
beads_2_M_opt, T_range=[273.15, 323.15], lw=2, sigma=None,
conv_vals=False, x_var='polyelectrolyte'):
"""
Validates fit of sigma to experiments.
"""
if conv_vals:
rho_p = df_exp['rho_p [M]'].to_numpy(dtype=float)
rho_p_conv = df_exp['rho_p (conv) [M]'].to_numpy(dtype=float)
rho_s = df_exp['rho_s [M]'].to_numpy(dtype=float)
rho_s_conv = df_exp['rho_s (conv) [M]'].to_numpy(dtype=float)
# matches polymer and salt values with fixed and varied concentrations
rho_var_list_conv = []
if ch_var == 'p':
for rho_var in rho_var_list:
i = np.where(rho_var == rho_p)[0][0]
rho_var_list_conv += [rho_p_conv[i]]
rho_fix_conv = rho_s_conv[np.where(rho_fix == rho_s)[0][0]]
elif ch_var == 's':
for rho_var in rho_var_list:
i = np.where(rho_var == rho_s)[0][0]
rho_var_list_conv += [rho_s_conv[i]]
rho_fix_conv = rho_p_conv[np.where(rho_fix == rho_p)[0][0]]
# polymer-temperature plane
if conv_vals:
ax = binodal_vary_rho(data_pred, rho_var_list_conv, rho_fix_conv, ch_var,
beads_2_M_opt,
x_var=x_var, x_label=r'$\rho_p$',
sigma=sigma, T_range=T_range, marker='', lw=lw,
colors=colors, T_cels=True)
else:
ax = binodal_vary_rho(data_pred, rho_var_list, rho_fix, ch_var,
beads_2_M_opt,
x_var=x_var, x_label=r'$\rho_p$',
sigma=sigma, T_range=T_range, marker='', lw=lw,
colors=colors, T_cels=True)
# plots experimental results
for i in range(len(df_exp)):
rho_p, rho_s, T_exp, rho_p_sup, rho_p_co = nacl.read_df_exp(df_exp, i)
if ch_var == 'p':
rho_var_exp = rho_p
rho_fix_exp = rho_s
elif ch_var == 's':
rho_var_exp = rho_s
rho_fix_exp = rho_p
else:
print('Please select s or p as ch_var')
if (rho_var_exp in rho_var_list) and (rho_fix_exp == rho_fix):
# determines color
color = [colors[i] for i in range(len(colors)) if rho_var_list[i] == rho_var_exp][0]
# plots desired species concentration
if x_var == 'polyanion' or x_var == 'polycation':
# if just plotting polyanion, divides total polymer
# concentration in half (assumes symmetric solution)
rho_sup = rho_p_sup / 2
rho_co = rho_p_co / 2
elif x_var == 'polyelectrolyte':
rho_sup = rho_p_sup
rho_co = rho_p_co
# plots supernatant and coacervate compositions
ax.plot(rho_sup, T_exp, color=color, marker='o', label='supernatant')
ax.plot(rho_co, T_exp, color=color, marker='^', label='coacervate')
|
[
"bokeh.models.ColumnDataSource",
"pe.get_beads_2_M",
"numpy.abs",
"numpy.argmax",
"salt.extract_df_mu_data",
"numpy.argmin",
"matplotlib.pyplot.figure",
"salt.fixed_conc",
"numpy.unique",
"pandas.DataFrame",
"numpy.copy",
"salt.binodal_vary_f_data",
"pe.lB_2_T",
"numpy.max",
"bokeh.plotting.show",
"numpy.linspace",
"salt.lB_comparison",
"matplotlib.pyplot.subplots",
"salt.read_df_exp",
"salt.fixed_rho_total",
"salt.make_df_mu",
"matplotlib.pyplot.get_cmap",
"plotly.graph_objects.Figure",
"salt.get_mu_conc",
"matplotlib.pyplot.legend",
"bokeh.models.Range1d",
"numpy.asarray",
"pe.calc_rho_solv",
"numpy.min",
"bokeh.models.tools.HoverTool",
"pe.lB_2_T_arr",
"bokeh.plotting.figure",
"salt.binodal_vary_N_data",
"numpy.logical_and",
"salt.binodal_vary_sigma_data",
"salt.conv_ali_conc",
"numpy.diff",
"numpy.array",
"numpy.where",
"salt.load_data"
] |
[((5262, 5274), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5272, 5274), True, 'import matplotlib.pyplot as plt\n'), ((9297, 9315), 'numpy.copy', 'np.copy', (['left_list'], {}), '(left_list)\n', (9304, 9315), True, 'import numpy as np\n'), ((9332, 9351), 'numpy.copy', 'np.copy', (['right_list'], {}), '(right_list)\n', (9339, 9351), True, 'import numpy as np\n'), ((9652, 9712), 'pe.lB_2_T_arr', 'pe.lB_2_T_arr', (['lB_arr', 'T_range'], {'fix_eps': 'fix_eps', 'sigma': 'sigma'}), '(lB_arr, T_range, fix_eps=fix_eps, sigma=sigma)\n', (9665, 9712), False, 'import pe\n'), ((9773, 9819), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['BJ', 'T', left, right]"}), "(columns=['BJ', 'T', left, right])\n", (9785, 9819), True, 'import pandas as pd\n'), ((9834, 9890), 'numpy.logical_and', 'np.logical_and', (['(T_arr >= T_range[0])', '(T_arr <= T_range[1])'], {}), '(T_arr >= T_range[0], T_arr <= T_range[1])\n', (9848, 9890), True, 'import numpy as np\n'), ((33758, 33776), 'numpy.unique', 'np.unique', (['z1_coll'], {}), '(z1_coll)\n', (33767, 33776), True, 'import numpy as np\n'), ((35080, 35092), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (35090, 35092), True, 'import matplotlib.pyplot as plt\n'), ((45434, 45466), 'pe.get_beads_2_M', 'pe.get_beads_2_M', (['sigma'], {'SI': '(True)'}), '(sigma, SI=True)\n', (45450, 45466), False, 'import pe\n'), ((49207, 49317), 'salt.binodal_vary_N_data', 'nacl.binodal_vary_N_data', (['data_folder_N', 'mu_salt_folder_N', 'rho_s_M_N', 'N_list'], {'sigma': 'sigma_fixed', 'ext': 'ext_N'}), '(data_folder_N, mu_salt_folder_N, rho_s_M_N, N_list,\n sigma=sigma_fixed, ext=ext_N)\n', (49231, 49317), True, 'import salt as nacl\n'), ((49611, 49721), 'salt.binodal_vary_f_data', 'nacl.binodal_vary_f_data', (['data_folder_f', 'mu_salt_folder_f', 'rho_s_M_f', 'f_list'], {'sigma': 'sigma_fixed', 'ext': 'ext_f'}), '(data_folder_f, mu_salt_folder_f, rho_s_M_f, f_list,\n sigma=sigma_fixed, ext=ext_f)\n', (49635, 49721), True, 'import salt as nacl\n'), ((50050, 50170), 'salt.load_data', 'nacl.load_data', (['data_folder_sigma'], {'ext': 'ext_sigma', 'naming_structure': 'naming_structure_sigma', 'lB_lo': 'lB_lo', 'lB_hi': 'lB_hi'}), '(data_folder_sigma, ext=ext_sigma, naming_structure=\n naming_structure_sigma, lB_lo=lB_lo, lB_hi=lB_hi)\n', (50064, 50170), True, 'import salt as nacl\n'), ((50229, 50331), 'salt.binodal_vary_sigma_data', 'nacl.binodal_vary_sigma_data', (['data', 'mu_salt_folder_sigma', 'rho_s_M_sigma', 'sigma_list'], {'ext': 'ext_sigma'}), '(data, mu_salt_folder_sigma, rho_s_M_sigma,\n sigma_list, ext=ext_sigma)\n', (50257, 50331), True, 'import salt as nacl\n'), ((62350, 62403), 'salt.lB_comparison', 'nacl.lB_comparison', (['T_range', 'sigma'], {'T_room_C': 'T_room_C'}), '(T_range, sigma, T_room_C=T_room_C)\n', (62368, 62403), True, 'import salt as nacl\n'), ((62508, 62606), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': 'figsize', 'gridspec_kw': "{'height_ratios': [gridspec, 1]}", 'sharex': '(True)'}), "(2, 1, figsize=figsize, gridspec_kw={'height_ratios': [gridspec,\n 1]}, sharex=True)\n", (62520, 62606), True, 'import matplotlib.pyplot as plt\n'), ((65892, 65915), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap_name'], {}), '(cmap_name)\n', (65904, 65915), True, 'import matplotlib.pyplot as plt\n'), ((68060, 68084), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['sample'], {}), '(sample)\n', (68076, 68084), False, 'from bokeh.models import ColumnDataSource, Title, Range1d\n'), ((69778, 69789), 'bokeh.models.tools.HoverTool', 'HoverTool', ([], {}), '()\n', (69787, 69789), False, 'from bokeh.models.tools import HoverTool\n'), ((70839, 70863), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['sample'], {}), '(sample)\n', (70855, 70863), False, 'from bokeh.models import ColumnDataSource, Title, Range1d\n'), ((72122, 72133), 'bokeh.models.tools.HoverTool', 'HoverTool', ([], {}), '()\n', (72131, 72133), False, 'from bokeh.models.tools import HoverTool\n'), ((1727, 1739), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1737, 1739), True, 'import matplotlib.pyplot as plt\n'), ((2010, 2061), 'salt.fixed_rho_total', 'nacl.fixed_rho_total', (['data', 'rho_p', 'rho_s', 'beads_2_M'], {}), '(data, rho_p, rho_s, beads_2_M)\n', (2030, 2061), True, 'import salt as nacl\n'), ((2310, 2353), 'pe.lB_2_T_arr', 'pe.lB_2_T_arr', (['lB_arr', 'T_range'], {'sigma': 'sigma'}), '(lB_arr, T_range, sigma=sigma)\n', (2323, 2353), False, 'import pe\n'), ((3028, 3041), 'numpy.min', 'np.min', (['T_arr'], {}), '(T_arr)\n', (3034, 3041), True, 'import numpy as np\n'), ((5429, 5457), 'numpy.array', 'np.array', (['[rho_var, rho_fix]'], {}), '([rho_var, rho_fix])\n', (5437, 5457), True, 'import numpy as np\n'), ((5520, 5571), 'salt.fixed_rho_total', 'nacl.fixed_rho_total', (['data', 'rho_p', 'rho_s', 'beads_2_M'], {}), '(data, rho_p, rho_s, beads_2_M)\n', (5540, 5571), True, 'import salt as nacl\n'), ((5820, 5863), 'pe.lB_2_T_arr', 'pe.lB_2_T_arr', (['lB_arr', 'T_range'], {'sigma': 'sigma'}), '(lB_arr, T_range, sigma=sigma)\n', (5833, 5863), False, 'import pe\n'), ((10594, 10609), 'bokeh.models.Range1d', 'Range1d', (['*x_lim'], {}), '(*x_lim)\n', (10601, 10609), False, 'from bokeh.models import ColumnDataSource, Title, Range1d\n'), ((10656, 10671), 'bokeh.models.Range1d', 'Range1d', (['*y_lim'], {}), '(*y_lim)\n', (10663, 10671), False, 'from bokeh.models import ColumnDataSource, Title, Range1d\n'), ((11641, 11653), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11651, 11653), True, 'import matplotlib.pyplot as plt\n'), ((11860, 11911), 'salt.fixed_rho_total', 'nacl.fixed_rho_total', (['data', 'rho_p', 'rho_s', 'beads_2_M'], {}), '(data, rho_p, rho_s, beads_2_M)\n', (11880, 11911), True, 'import salt as nacl\n'), ((13691, 13734), 'pe.lB_2_T_arr', 'pe.lB_2_T_arr', (['lB_arr', 'T_range'], {'sigma': 'sigma'}), '(lB_arr, T_range, sigma=sigma)\n', (13704, 13734), False, 'import pe\n'), ((13753, 13809), 'numpy.logical_and', 'np.logical_and', (['(T_arr >= T_range[0])', '(T_arr <= T_range[1])'], {}), '(T_arr >= T_range[0], T_arr <= T_range[1])\n', (13767, 13809), True, 'import numpy as np\n'), ((17449, 17461), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (17459, 17461), True, 'import matplotlib.pyplot as plt\n'), ((18141, 18167), 'pe.lB_2_T', 'pe.lB_2_T', (['lB'], {'sigma': 'sigma'}), '(lB, sigma=sigma)\n', (18150, 18167), False, 'import pe\n'), ((23245, 23257), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (23255, 23257), True, 'import matplotlib.pyplot as plt\n'), ((23282, 23309), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (23292, 23309), True, 'import matplotlib.pyplot as plt\n'), ((24026, 24137), 'salt.make_df_mu', 'nacl.make_df_mu', (['data', 'mu_salt_folder', 'rho_salt', 'T_range', 'sigma'], {'naming_structure': 'naming_structure', 'ext': 'ext'}), '(data, mu_salt_folder, rho_salt, T_range, sigma,\n naming_structure=naming_structure, ext=ext)\n', (24041, 24137), True, 'import salt as nacl\n'), ((24223, 24261), 'salt.extract_df_mu_data', 'nacl.extract_df_mu_data', (['df_mu', 'z_name'], {}), '(df_mu, z_name)\n', (24246, 24261), True, 'import salt as nacl\n'), ((24345, 24401), 'numpy.logical_and', 'np.logical_and', (['(T_arr >= T_range[0])', '(T_arr <= T_range[1])'], {}), '(T_arr >= T_range[0], T_arr <= T_range[1])\n', (24359, 24401), True, 'import numpy as np\n'), ((25751, 25777), 'pe.lB_2_T', 'pe.lB_2_T', (['lB'], {'sigma': 'sigma'}), '(lB, sigma=sigma)\n', (25760, 25777), False, 'import pe\n'), ((28207, 28219), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (28217, 28219), True, 'import matplotlib.pyplot as plt\n'), ((28899, 28925), 'pe.lB_2_T', 'pe.lB_2_T', (['lB'], {'sigma': 'sigma'}), '(lB, sigma=sigma)\n', (28908, 28925), False, 'import pe\n'), ((32871, 32882), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (32880, 32882), True, 'import plotly.graph_objects as go\n'), ((35249, 35318), 'salt.get_mu_conc', 'nacl.get_mu_conc', (['mu_salt_folder', 'data', 'rho_salt'], {'beads_2_M': 'beads_2_M'}), '(mu_salt_folder, data, rho_salt, beads_2_M=beads_2_M)\n', (35265, 35318), True, 'import salt as nacl\n'), ((35530, 35552), 'numpy.array', 'np.array', (['rho_PCI_list'], {}), '(rho_PCI_list)\n', (35538, 35552), True, 'import numpy as np\n'), ((35573, 35596), 'numpy.array', 'np.array', (['rho_PCII_list'], {}), '(rho_PCII_list)\n', (35581, 35596), True, 'import numpy as np\n'), ((35678, 35721), 'pe.lB_2_T_arr', 'pe.lB_2_T_arr', (['lB_arr', 'T_range'], {'sigma': 'sigma'}), '(lB_arr, T_range, sigma=sigma)\n', (35691, 35721), False, 'import pe\n'), ((35740, 35796), 'numpy.logical_and', 'np.logical_and', (['(T_arr >= T_range[0])', '(T_arr <= T_range[1])'], {}), '(T_arr >= T_range[0], T_arr <= T_range[1])\n', (35754, 35796), True, 'import numpy as np\n'), ((36953, 37032), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""center left"""', 'bbox_to_anchor': '(legend_x, legend_y)', 'fontsize': '(12)'}), "(loc='center left', bbox_to_anchor=(legend_x, legend_y), fontsize=12)\n", (36963, 37032), True, 'import matplotlib.pyplot as plt\n'), ((37051, 37074), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (37061, 37074), True, 'import matplotlib.pyplot as plt\n'), ((37468, 37480), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (37478, 37480), True, 'import matplotlib.pyplot as plt\n'), ((38525, 38537), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (38535, 38537), True, 'import matplotlib.pyplot as plt\n'), ((39620, 39632), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (39630, 39632), True, 'import matplotlib.pyplot as plt\n'), ((41377, 41389), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (41387, 41389), True, 'import matplotlib.pyplot as plt\n'), ((41571, 41599), 'numpy.array', 'np.array', (['[rho_var, rho_fix]'], {}), '([rho_var, rho_fix])\n', (41579, 41599), True, 'import numpy as np\n'), ((41662, 41713), 'salt.fixed_rho_total', 'nacl.fixed_rho_total', (['data', 'rho_p', 'rho_s', 'beads_2_M'], {}), '(data, rho_p, rho_s, beads_2_M)\n', (41682, 41713), True, 'import salt as nacl\n'), ((43493, 43536), 'pe.lB_2_T_arr', 'pe.lB_2_T_arr', (['lB_arr', 'T_range'], {'sigma': 'sigma'}), '(lB_arr, T_range, sigma=sigma)\n', (43506, 43536), False, 'import pe\n'), ((43555, 43611), 'numpy.logical_and', 'np.logical_and', (['(T_arr >= T_range[0])', '(T_arr <= T_range[1])'], {}), '(T_arr >= T_range[0], T_arr <= T_range[1])\n', (43569, 43611), True, 'import numpy as np\n'), ((45527, 45539), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (45537, 45539), True, 'import matplotlib.pyplot as plt\n'), ((45564, 45591), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (45574, 45591), True, 'import matplotlib.pyplot as plt\n'), ((48997, 49009), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (49007, 49009), True, 'import matplotlib.pyplot as plt\n'), ((49034, 49061), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (49044, 49061), True, 'import matplotlib.pyplot as plt\n'), ((59037, 59049), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (59047, 59049), True, 'import matplotlib.pyplot as plt\n'), ((59074, 59101), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (59084, 59101), True, 'import matplotlib.pyplot as plt\n'), ((60928, 60940), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (60938, 60940), True, 'import matplotlib.pyplot as plt\n'), ((60965, 60992), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (60975, 60992), True, 'import matplotlib.pyplot as plt\n'), ((66822, 66833), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (66831, 66833), True, 'import plotly.graph_objects as go\n'), ((67977, 68012), 'bokeh.plotting.figure', 'figure', ([], {'plot_width': 'w', 'plot_height': 'h'}), '(plot_width=w, plot_height=h)\n', (67983, 68012), False, 'from bokeh.plotting import figure, output_file, show\n'), ((70140, 70151), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (70149, 70151), True, 'import plotly.graph_objects as go\n'), ((70783, 70791), 'bokeh.plotting.figure', 'figure', ([], {}), '()\n', (70789, 70791), False, 'from bokeh.plotting import figure, output_file, show\n'), ((75340, 75367), 'salt.read_df_exp', 'nacl.read_df_exp', (['df_exp', 'i'], {}), '(df_exp, i)\n', (75356, 75367), True, 'import salt as nacl\n'), ((2577, 2592), 'numpy.array', 'np.array', (['alpha'], {}), '(alpha)\n', (2585, 2592), True, 'import numpy as np\n'), ((12404, 12426), 'numpy.array', 'np.array', (['rho_PCI_list'], {}), '(rho_PCI_list)\n', (12412, 12426), True, 'import numpy as np\n'), ((12451, 12474), 'numpy.array', 'np.array', (['rho_PCII_list'], {}), '(rho_PCII_list)\n', (12459, 12474), True, 'import numpy as np\n'), ((18463, 18514), 'salt.fixed_rho_total', 'nacl.fixed_rho_total', (['data', 'rho_p', 'rho_s', 'beads_2_M'], {}), '(data, rho_p, rho_s, beads_2_M)\n', (18483, 18514), True, 'import salt as nacl\n'), ((18720, 18745), 'numpy.asarray', 'np.asarray', (["results['lB']"], {}), "(results['lB'])\n", (18730, 18745), True, 'import numpy as np\n'), ((18922, 18945), 'numpy.asarray', 'np.asarray', (['rho_CI_list'], {}), '(rho_CI_list)\n', (18932, 18945), True, 'import numpy as np\n'), ((19020, 19044), 'numpy.asarray', 'np.asarray', (['rho_CII_list'], {}), '(rho_CII_list)\n', (19030, 19044), True, 'import numpy as np\n'), ((29169, 29197), 'numpy.array', 'np.array', (['[rho_var, rho_fix]'], {}), '([rho_var, rho_fix])\n', (29177, 29197), True, 'import numpy as np\n'), ((29270, 29321), 'salt.fixed_rho_total', 'nacl.fixed_rho_total', (['data', 'rho_p', 'rho_s', 'beads_2_M'], {}), '(data, rho_p, rho_s, beads_2_M)\n', (29290, 29321), True, 'import salt as nacl\n'), ((29527, 29552), 'numpy.asarray', 'np.asarray', (["results['lB']"], {}), "(results['lB'])\n", (29537, 29552), True, 'import numpy as np\n'), ((29729, 29752), 'numpy.asarray', 'np.asarray', (['rho_CI_list'], {}), '(rho_CI_list)\n', (29739, 29752), True, 'import numpy as np\n'), ((29827, 29851), 'numpy.asarray', 'np.asarray', (['rho_CII_list'], {}), '(rho_CII_list)\n', (29837, 29851), True, 'import numpy as np\n'), ((35382, 35438), 'salt.fixed_conc', 'nacl.fixed_conc', (['mu_conc', 'data', 'qty'], {'beads_2_M': 'beads_2_M'}), '(mu_conc, data, qty, beads_2_M=beads_2_M)\n', (35397, 35438), True, 'import salt as nacl\n'), ((42206, 42228), 'numpy.array', 'np.array', (['rho_PCI_list'], {}), '(rho_PCI_list)\n', (42214, 42228), True, 'import numpy as np\n'), ((42253, 42276), 'numpy.array', 'np.array', (['rho_PCII_list'], {}), '(rho_PCII_list)\n', (42261, 42276), True, 'import numpy as np\n'), ((45937, 45985), 'salt.conv_ali_conc', 'nacl.conv_ali_conc', (['df_exp', 'rho_p_raw', 'rho_s_raw'], {}), '(df_exp, rho_p_raw, rho_s_raw)\n', (45955, 45985), True, 'import salt as nacl\n'), ((46805, 46880), 'salt.read_df_exp', 'nacl.read_df_exp', (['df_exp', 'i'], {'conv_vals': 'conv_vals', 'read_sigma': 'plot_errorbars'}), '(df_exp, i, conv_vals=conv_vals, read_sigma=plot_errorbars)\n', (46821, 46880), True, 'import salt as nacl\n'), ((51226, 51277), 'salt.fixed_rho_total', 'nacl.fixed_rho_total', (['data', 'rho_p', 'rho_s', 'beads_2_M'], {}), '(data, rho_p, rho_s, beads_2_M)\n', (51246, 51277), True, 'import salt as nacl\n'), ((51869, 51876), 'bokeh.plotting.show', 'show', (['p'], {}), '(p)\n', (51873, 51876), False, 'from bokeh.plotting import figure, output_file, show\n'), ((53964, 53975), 'numpy.diff', 'np.diff', (['z1'], {}), '(z1)\n', (53971, 53975), True, 'import numpy as np\n'), ((53999, 54017), 'numpy.argmax', 'np.argmax', (['z1_diff'], {}), '(z1_diff)\n', (54008, 54017), True, 'import numpy as np\n'), ((65949, 65969), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (65960, 65969), True, 'import numpy as np\n'), ((2443, 2460), 'numpy.asarray', 'np.asarray', (['alpha'], {}), '(alpha)\n', (2453, 2460), True, 'import numpy as np\n'), ((3127, 3143), 'numpy.argmin', 'np.argmin', (['T_arr'], {}), '(T_arr)\n', (3136, 3143), True, 'import numpy as np\n'), ((3243, 3273), 'numpy.abs', 'np.abs', (['(alpha_single_phase - 1)'], {}), '(alpha_single_phase - 1)\n', (3249, 3273), True, 'import numpy as np\n'), ((3523, 3555), 'numpy.abs', 'np.abs', (['(alpha_single_phase - 0.5)'], {}), '(alpha_single_phase - 0.5)\n', (3529, 3555), True, 'import numpy as np\n'), ((5953, 5970), 'numpy.asarray', 'np.asarray', (['alpha'], {}), '(alpha)\n', (5963, 5970), True, 'import numpy as np\n'), ((6099, 6114), 'numpy.array', 'np.array', (['alpha'], {}), '(alpha)\n', (6107, 6114), True, 'import numpy as np\n'), ((12533, 12555), 'numpy.array', 'np.array', (['rho_PAI_list'], {}), '(rho_PAI_list)\n', (12541, 12555), True, 'import numpy as np\n'), ((12580, 12603), 'numpy.array', 'np.array', (['rho_PAII_list'], {}), '(rho_PAII_list)\n', (12588, 12603), True, 'import numpy as np\n'), ((18875, 18899), 'numpy.asarray', 'np.asarray', (['rho_PCI_list'], {}), '(rho_PCI_list)\n', (18885, 18899), True, 'import numpy as np\n'), ((18971, 18996), 'numpy.asarray', 'np.asarray', (['rho_PCII_list'], {}), '(rho_PCII_list)\n', (18981, 18996), True, 'import numpy as np\n'), ((29682, 29706), 'numpy.asarray', 'np.asarray', (['rho_PCI_list'], {}), '(rho_PCI_list)\n', (29692, 29706), True, 'import numpy as np\n'), ((29778, 29803), 'numpy.asarray', 'np.asarray', (['rho_PCII_list'], {}), '(rho_PCII_list)\n', (29788, 29803), True, 'import numpy as np\n'), ((42335, 42357), 'numpy.array', 'np.array', (['rho_PAI_list'], {}), '(rho_PAI_list)\n', (42343, 42357), True, 'import numpy as np\n'), ((42382, 42405), 'numpy.array', 'np.array', (['rho_PAII_list'], {}), '(rho_PAII_list)\n', (42390, 42405), True, 'import numpy as np\n'), ((12659, 12680), 'numpy.array', 'np.array', (['rho_CI_list'], {}), '(rho_CI_list)\n', (12667, 12680), True, 'import numpy as np\n'), ((12705, 12727), 'numpy.array', 'np.array', (['rho_CII_list'], {}), '(rho_CII_list)\n', (12713, 12727), True, 'import numpy as np\n'), ((15402, 15427), 'numpy.max', 'np.max', (['left_arr[liq_h2o]'], {}), '(left_arr[liq_h2o])\n', (15408, 15427), True, 'import numpy as np\n'), ((15450, 15476), 'numpy.min', 'np.min', (['right_arr[liq_h2o]'], {}), '(right_arr[liq_h2o])\n', (15456, 15476), True, 'import numpy as np\n'), ((42461, 42482), 'numpy.array', 'np.array', (['rho_CI_list'], {}), '(rho_CI_list)\n', (42469, 42482), True, 'import numpy as np\n'), ((42507, 42529), 'numpy.array', 'np.array', (['rho_CII_list'], {}), '(rho_CII_list)\n', (42515, 42529), True, 'import numpy as np\n'), ((55198, 55215), 'numpy.asarray', 'np.asarray', (['coord'], {}), '(coord)\n', (55208, 55215), True, 'import numpy as np\n'), ((12782, 12803), 'numpy.array', 'np.array', (['rho_AI_list'], {}), '(rho_AI_list)\n', (12790, 12803), True, 'import numpy as np\n'), ((12828, 12850), 'numpy.array', 'np.array', (['rho_AII_list'], {}), '(rho_AII_list)\n', (12836, 12850), True, 'import numpy as np\n'), ((42584, 42605), 'numpy.array', 'np.array', (['rho_AI_list'], {}), '(rho_AI_list)\n', (42592, 42605), True, 'import numpy as np\n'), ((42630, 42652), 'numpy.array', 'np.array', (['rho_AII_list'], {}), '(rho_AII_list)\n', (42638, 42652), True, 'import numpy as np\n'), ((74127, 74153), 'numpy.where', 'np.where', (['(rho_var == rho_p)'], {}), '(rho_var == rho_p)\n', (74135, 74153), True, 'import numpy as np\n'), ((74251, 74277), 'numpy.where', 'np.where', (['(rho_fix == rho_s)'], {}), '(rho_fix == rho_s)\n', (74259, 74277), True, 'import numpy as np\n'), ((12907, 12961), 'pe.calc_rho_solv', 'pe.calc_rho_solv', (['rho_PCI_list', 'rho_CI_list', 'beads_2_M'], {}), '(rho_PCI_list, rho_CI_list, beads_2_M)\n', (12923, 12961), False, 'import pe\n'), ((13070, 13126), 'pe.calc_rho_solv', 'pe.calc_rho_solv', (['rho_PCII_list', 'rho_CII_list', 'beads_2_M'], {}), '(rho_PCII_list, rho_CII_list, beads_2_M)\n', (13086, 13126), False, 'import pe\n'), ((42709, 42763), 'pe.calc_rho_solv', 'pe.calc_rho_solv', (['rho_PCI_list', 'rho_CI_list', 'beads_2_M'], {}), '(rho_PCI_list, rho_CI_list, beads_2_M)\n', (42725, 42763), False, 'import pe\n'), ((42872, 42928), 'pe.calc_rho_solv', 'pe.calc_rho_solv', (['rho_PCII_list', 'rho_CII_list', 'beads_2_M'], {}), '(rho_PCII_list, rho_CII_list, beads_2_M)\n', (42888, 42928), False, 'import pe\n'), ((74374, 74400), 'numpy.where', 'np.where', (['(rho_var == rho_s)'], {}), '(rho_var == rho_s)\n', (74382, 74400), True, 'import numpy as np\n'), ((74498, 74524), 'numpy.where', 'np.where', (['(rho_fix == rho_p)'], {}), '(rho_fix == rho_p)\n', (74506, 74524), True, 'import numpy as np\n'), ((19223, 19242), 'numpy.abs', 'np.abs', (['(lB_arr - lB)'], {}), '(lB_arr - lB)\n', (19229, 19242), True, 'import numpy as np\n'), ((30030, 30049), 'numpy.abs', 'np.abs', (['(lB_arr - lB)'], {}), '(lB_arr - lB)\n', (30036, 30049), True, 'import numpy as np\n'), ((13277, 13299), 'numpy.array', 'np.array', (['rho_PCI_list'], {}), '(rho_PCI_list)\n', (13285, 13299), True, 'import numpy as np\n'), ((13302, 13324), 'numpy.array', 'np.array', (['rho_PAI_list'], {}), '(rho_PAI_list)\n', (13310, 13324), True, 'import numpy as np\n'), ((13349, 13372), 'numpy.array', 'np.array', (['rho_PCII_list'], {}), '(rho_PCII_list)\n', (13357, 13372), True, 'import numpy as np\n'), ((13375, 13398), 'numpy.array', 'np.array', (['rho_PAII_list'], {}), '(rho_PAII_list)\n', (13383, 13398), True, 'import numpy as np\n'), ((13452, 13473), 'numpy.array', 'np.array', (['rho_CI_list'], {}), '(rho_CI_list)\n', (13460, 13473), True, 'import numpy as np\n'), ((13498, 13520), 'numpy.array', 'np.array', (['rho_CII_list'], {}), '(rho_CII_list)\n', (13506, 13520), True, 'import numpy as np\n'), ((43079, 43101), 'numpy.array', 'np.array', (['rho_PCI_list'], {}), '(rho_PCI_list)\n', (43087, 43101), True, 'import numpy as np\n'), ((43104, 43126), 'numpy.array', 'np.array', (['rho_PAI_list'], {}), '(rho_PAI_list)\n', (43112, 43126), True, 'import numpy as np\n'), ((43151, 43174), 'numpy.array', 'np.array', (['rho_PCII_list'], {}), '(rho_PCII_list)\n', (43159, 43174), True, 'import numpy as np\n'), ((43177, 43200), 'numpy.array', 'np.array', (['rho_PAII_list'], {}), '(rho_PAII_list)\n', (43185, 43200), True, 'import numpy as np\n'), ((43254, 43275), 'numpy.array', 'np.array', (['rho_CI_list'], {}), '(rho_CI_list)\n', (43262, 43275), True, 'import numpy as np\n'), ((43300, 43322), 'numpy.array', 'np.array', (['rho_CII_list'], {}), '(rho_CII_list)\n', (43308, 43322), True, 'import numpy as np\n')]
|
import typing
import sys
import numpy as np
import numba as nb
@nb.njit
def csgraph_to_directed(g: np.ndarray) -> np.ndarray:
m = len(g)
g = np.vstack((g, g))
g[m:, :2] = g[m:, 1::-1]
return g
@nb.njit
def sort_csgraph(
n: int,
g: np.ndarray,
) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]:
sort_idx = np.argsort(g[:, 0], kind='mergesort')
g = g[sort_idx]
edge_idx = np.searchsorted(g[:, 0], np.arange(n + 1))
original_idx = np.arange(len(g))[sort_idx]
return g, edge_idx, original_idx
@nb.njit
def euler_tour_edge(
g: np.ndarray,
edge_idx: np.ndarray,
root: int,
) -> typing.Tuple[(np.ndarray, ) * 3]:
n = g[:, :2].max() + 1
parent = np.full(n, -1, np.int64)
depth = np.zeros(n, np.int64)
tour = np.empty(n * 2, np.int64)
st = [root]
for i in range(2 * n):
u = st.pop()
tour[i] = u
if u < 0: continue
st.append(-u - 1)
for v in g[edge_idx[u]:edge_idx[u + 1], 1][::-1]:
if v == parent[u]: continue
parent[v] = u
depth[v] = depth[u] + 1
st.append(v)
return tour, parent, depth
@nb.njit
def euler_tour_node(
g: np.ndarray,
edge_idx: np.ndarray,
root: int,
) -> typing.Tuple[(np.ndarray, ) * 4]:
tour, parent, depth = euler_tour_edge(g, edge_idx, root)
n = len(tour) >> 1
tour = tour[:-1]
first_idx = np.full(n, -1, np.int64)
for i in range(2 * n - 1):
u = tour[i]
if u < 0:
tour[i] = parent[~u]
continue
first_idx[u] = i
return tour, first_idx, parent, depth
@nb.njit
def uf_build(n: int) -> np.ndarray:
return np.full(n, -1, np.int64)
@nb.njit
def uf_find(uf: np.ndarray, u: int) -> int:
if uf[u] < 0: return u
uf[u] = uf_find(uf, uf[u])
return uf[u]
@nb.njit
def uf_unite(
uf: np.ndarray,
u: int,
v: int,
) -> typing.NoReturn:
u, v = uf_find(uf, u), uf_find(uf, v)
if u == v: return
if uf[u] > uf[v]: u, v = v, u
uf[u] += uf[v]
uf[v] = u
@nb.njit
def lca(
g: np.ndarray,
edge_idx: np.ndarray,
vu: np.ndarray,
) -> np.ndarray:
m = len(vu)
tour, parent, _ = euler_tour_edge(g, edge_idx, 0)
n = len(tour) >> 1
first_idx = np.full(n, -1, np.int64)
for i in range(len(tour)):
u = tour[i]
if u < 0: continue
first_idx[u] = i
for i in range(m):
v, u = vu[i]
if first_idx[v] < first_idx[u]: vu[i] = vu[i, ::-1]
vu, query_idx, original_idx = sort_csgraph(n, vu)
_lca = np.empty(m, np.int64)
uf = uf_build(n)
ancestor = np.arange(n)
for v in tour[:-1]:
if v >= 0: continue
v = ~v
for j in range(query_idx[v], query_idx[v + 1]):
u = vu[j, 1]
_lca[original_idx[j]] = ancestor[uf_find(uf, u)]
p = parent[v]
uf_unite(uf, v, p)
ancestor[uf_find(uf, p)] = p
return _lca
@nb.njit((nb.i8[:, :], nb.i8[:, :]), cache=True)
def solve(xy: np.ndarray, ab: np.ndarray) -> typing.NoReturn:
n = len(xy) + 1
g = csgraph_to_directed(xy)
g, edge_idx, _ = sort_csgraph(n, g)
_, _, depth = euler_tour_edge(g, edge_idx, 0)
_lca = lca(g, edge_idx, ab)
for i in range(len(ab)):
u, v = ab[i]
l = _lca[i]
d = depth[u] + depth[v] - 2 * depth[l] + 1
print(d)
def main() -> typing.NoReturn:
n = int(input())
I = np.array(
sys.stdin.read().split(),
dtype=np.int64,
)
xy = I[:2 * (n - 1)].reshape(n - 1, 2) - 1
ab = I[2 * n - 1:].reshape(-1, 2) - 1
solve(xy, ab)
main()
|
[
"numpy.full",
"sys.stdin.read",
"numpy.empty",
"numba.njit",
"numpy.zeros",
"numpy.argsort",
"numpy.arange",
"numpy.vstack"
] |
[((2746, 2793), 'numba.njit', 'nb.njit', (['(nb.i8[:, :], nb.i8[:, :])'], {'cache': '(True)'}), '((nb.i8[:, :], nb.i8[:, :]), cache=True)\n', (2753, 2793), True, 'import numba as nb\n'), ((154, 171), 'numpy.vstack', 'np.vstack', (['(g, g)'], {}), '((g, g))\n', (163, 171), True, 'import numpy as np\n'), ((337, 374), 'numpy.argsort', 'np.argsort', (['g[:, 0]'], {'kind': '"""mergesort"""'}), "(g[:, 0], kind='mergesort')\n", (347, 374), True, 'import numpy as np\n'), ((691, 715), 'numpy.full', 'np.full', (['n', '(-1)', 'np.int64'], {}), '(n, -1, np.int64)\n', (698, 715), True, 'import numpy as np\n'), ((726, 747), 'numpy.zeros', 'np.zeros', (['n', 'np.int64'], {}), '(n, np.int64)\n', (734, 747), True, 'import numpy as np\n'), ((757, 782), 'numpy.empty', 'np.empty', (['(n * 2)', 'np.int64'], {}), '(n * 2, np.int64)\n', (765, 782), True, 'import numpy as np\n'), ((1326, 1350), 'numpy.full', 'np.full', (['n', '(-1)', 'np.int64'], {}), '(n, -1, np.int64)\n', (1333, 1350), True, 'import numpy as np\n'), ((1571, 1595), 'numpy.full', 'np.full', (['n', '(-1)', 'np.int64'], {}), '(n, -1, np.int64)\n', (1578, 1595), True, 'import numpy as np\n'), ((2130, 2154), 'numpy.full', 'np.full', (['n', '(-1)', 'np.int64'], {}), '(n, -1, np.int64)\n', (2137, 2154), True, 'import numpy as np\n'), ((2402, 2423), 'numpy.empty', 'np.empty', (['m', 'np.int64'], {}), '(m, np.int64)\n', (2410, 2423), True, 'import numpy as np\n'), ((2456, 2468), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (2465, 2468), True, 'import numpy as np\n'), ((431, 447), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (440, 447), True, 'import numpy as np\n'), ((3213, 3229), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (3227, 3229), False, 'import sys\n')]
|
"""
=============================================
Multiclass Classification with NumPy and TMVA
=============================================
"""
from array import array
import numpy as np
from numpy.random import RandomState
from root_numpy.tmva import add_classification_events, evaluate_reader
from root_numpy import ROOT_VERSION
import matplotlib.pyplot as plt
from ROOT import TMVA, TFile, TCut
plt.style.use('ggplot')
RNG = RandomState(42)
# Construct an example multiclass dataset
n_events = 1000
class_0 = RNG.multivariate_normal(
[-2, -2], np.diag([1, 1]), n_events)
class_1 = RNG.multivariate_normal(
[0, 2], np.diag([1, 1]), n_events)
class_2 = RNG.multivariate_normal(
[2, -2], np.diag([1, 1]), n_events)
X = np.concatenate([class_0, class_1, class_2])
y = np.ones(X.shape[0])
w = RNG.randint(1, 10, n_events * 3)
y[:class_0.shape[0]] *= 0
y[-class_2.shape[0]:] *= 2
permute = RNG.permutation(y.shape[0])
X = X[permute]
y = y[permute]
# Split into training and test datasets
X_train, y_train, w_train = X[:n_events], y[:n_events], w[:n_events]
X_test, y_test, w_test = X[n_events:], y[n_events:], w[n_events:]
output = TFile('tmva_output.root', 'recreate')
factory = TMVA.Factory('classifier', output,
'AnalysisType=Multiclass:'
'!V:Silent:!DrawProgressBar')
if ROOT_VERSION >= '6.07/04':
data = TMVA.DataLoader('.')
else:
data = factory
for n in range(2):
data.AddVariable('f{0}'.format(n), 'F')
# Call root_numpy's utility functions to add events from the arrays
add_classification_events(data, X_train, y_train, weights=w_train)
add_classification_events(data, X_test, y_test, weights=w_test, test=True)
# The following line is necessary if events have been added individually:
data.PrepareTrainingAndTestTree(TCut('1'), 'NormMode=EqualNumEvents')
# Train an MLP
if ROOT_VERSION >= '6.07/04':
BookMethod = factory.BookMethod
else:
BookMethod = TMVA.Factory.BookMethod
BookMethod(data, 'MLP', 'MLP',
'NeuronType=tanh:NCycles=200:HiddenLayers=N+2,2:'
'TestRate=5:EstimatorType=MSE')
factory.TrainAllMethods()
# Classify the test dataset with the BDT
reader = TMVA.Reader()
for n in range(2):
reader.AddVariable('f{0}'.format(n), array('f', [0.]))
reader.BookMVA('MLP', 'weights/classifier_MLP.weights.xml')
class_proba = evaluate_reader(reader, 'MLP', X_test)
# Plot the decision boundaries
plot_colors = "rgb"
plot_step = 0.02
class_names = "ABC"
cmap = plt.get_cmap('Paired')
fig = plt.figure(figsize=(5, 5))
fig.patch.set_alpha(0)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = evaluate_reader(reader, 'MLP', np.c_[xx.ravel(), yy.ravel()])
Z = np.argmax(Z, axis=1) - 1
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=cmap, alpha=0.5)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(3), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=cmap,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
plt.tight_layout()
plt.show()
|
[
"matplotlib.pyplot.title",
"numpy.argmax",
"numpy.ones",
"ROOT.TFile",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.contourf",
"numpy.arange",
"ROOT.TMVA.DataLoader",
"numpy.diag",
"matplotlib.pyplot.tight_layout",
"root_numpy.tmva.add_classification_events",
"numpy.random.RandomState",
"array.array",
"ROOT.TMVA.Reader",
"matplotlib.pyplot.show",
"root_numpy.tmva.evaluate_reader",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"ROOT.TCut",
"matplotlib.pyplot.ylabel",
"numpy.concatenate",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.scatter",
"ROOT.TMVA.Factory",
"matplotlib.pyplot.axis",
"numpy.where",
"matplotlib.pyplot.xlabel"
] |
[((401, 424), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (414, 424), True, 'import matplotlib.pyplot as plt\n'), ((431, 446), 'numpy.random.RandomState', 'RandomState', (['(42)'], {}), '(42)\n', (442, 446), False, 'from numpy.random import RandomState\n'), ((735, 778), 'numpy.concatenate', 'np.concatenate', (['[class_0, class_1, class_2]'], {}), '([class_0, class_1, class_2])\n', (749, 778), True, 'import numpy as np\n'), ((783, 802), 'numpy.ones', 'np.ones', (['X.shape[0]'], {}), '(X.shape[0])\n', (790, 802), True, 'import numpy as np\n'), ((1147, 1184), 'ROOT.TFile', 'TFile', (['"""tmva_output.root"""', '"""recreate"""'], {}), "('tmva_output.root', 'recreate')\n", (1152, 1184), False, 'from ROOT import TMVA, TFile, TCut\n'), ((1195, 1287), 'ROOT.TMVA.Factory', 'TMVA.Factory', (['"""classifier"""', 'output', '"""AnalysisType=Multiclass:!V:Silent:!DrawProgressBar"""'], {}), "('classifier', output,\n 'AnalysisType=Multiclass:!V:Silent:!DrawProgressBar')\n", (1207, 1287), False, 'from ROOT import TMVA, TFile, TCut\n'), ((1553, 1619), 'root_numpy.tmva.add_classification_events', 'add_classification_events', (['data', 'X_train', 'y_train'], {'weights': 'w_train'}), '(data, X_train, y_train, weights=w_train)\n', (1578, 1619), False, 'from root_numpy.tmva import add_classification_events, evaluate_reader\n'), ((1620, 1694), 'root_numpy.tmva.add_classification_events', 'add_classification_events', (['data', 'X_test', 'y_test'], {'weights': 'w_test', 'test': '(True)'}), '(data, X_test, y_test, weights=w_test, test=True)\n', (1645, 1694), False, 'from root_numpy.tmva import add_classification_events, evaluate_reader\n'), ((2180, 2193), 'ROOT.TMVA.Reader', 'TMVA.Reader', ([], {}), '()\n', (2191, 2193), False, 'from ROOT import TMVA, TFile, TCut\n'), ((2346, 2384), 'root_numpy.tmva.evaluate_reader', 'evaluate_reader', (['reader', '"""MLP"""', 'X_test'], {}), "(reader, 'MLP', X_test)\n", (2361, 2384), False, 'from root_numpy.tmva import add_classification_events, evaluate_reader\n'), ((2481, 2503), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Paired"""'], {}), "('Paired')\n", (2493, 2503), True, 'import matplotlib.pyplot as plt\n'), ((2511, 2537), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (2521, 2537), True, 'import matplotlib.pyplot as plt\n'), ((2899, 2944), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xx', 'yy', 'Z'], {'cmap': 'cmap', 'alpha': '(0.5)'}), '(xx, yy, Z, cmap=cmap, alpha=0.5)\n', (2911, 2944), True, 'import matplotlib.pyplot as plt\n'), ((2945, 2962), 'matplotlib.pyplot.axis', 'plt.axis', (['"""tight"""'], {}), "('tight')\n", (2953, 2962), True, 'import matplotlib.pyplot as plt\n'), ((3182, 3204), 'matplotlib.pyplot.xlim', 'plt.xlim', (['x_min', 'x_max'], {}), '(x_min, x_max)\n', (3190, 3204), True, 'import matplotlib.pyplot as plt\n'), ((3205, 3227), 'matplotlib.pyplot.ylim', 'plt.ylim', (['y_min', 'y_max'], {}), '(y_min, y_max)\n', (3213, 3227), True, 'import matplotlib.pyplot as plt\n'), ((3228, 3257), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (3238, 3257), True, 'import matplotlib.pyplot as plt\n'), ((3258, 3273), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (3268, 3273), True, 'import matplotlib.pyplot as plt\n'), ((3274, 3289), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (3284, 3289), True, 'import matplotlib.pyplot as plt\n'), ((3290, 3320), 'matplotlib.pyplot.title', 'plt.title', (['"""Decision Boundary"""'], {}), "('Decision Boundary')\n", (3299, 3320), True, 'import matplotlib.pyplot as plt\n'), ((3322, 3340), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3338, 3340), True, 'import matplotlib.pyplot as plt\n'), ((3341, 3351), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3349, 3351), True, 'import matplotlib.pyplot as plt\n'), ((555, 570), 'numpy.diag', 'np.diag', (['[1, 1]'], {}), '([1, 1])\n', (562, 570), True, 'import numpy as np\n'), ((629, 644), 'numpy.diag', 'np.diag', (['[1, 1]'], {}), '([1, 1])\n', (636, 644), True, 'import numpy as np\n'), ((704, 719), 'numpy.diag', 'np.diag', (['[1, 1]'], {}), '([1, 1])\n', (711, 719), True, 'import numpy as np\n'), ((1375, 1395), 'ROOT.TMVA.DataLoader', 'TMVA.DataLoader', (['"""."""'], {}), "('.')\n", (1390, 1395), False, 'from ROOT import TMVA, TFile, TCut\n'), ((1801, 1810), 'ROOT.TCut', 'TCut', (['"""1"""'], {}), "('1')\n", (1805, 1810), False, 'from ROOT import TMVA, TFile, TCut\n'), ((2686, 2720), 'numpy.arange', 'np.arange', (['x_min', 'x_max', 'plot_step'], {}), '(x_min, x_max, plot_step)\n', (2695, 2720), True, 'import numpy as np\n'), ((2743, 2777), 'numpy.arange', 'np.arange', (['y_min', 'y_max', 'plot_step'], {}), '(y_min, y_max, plot_step)\n', (2752, 2777), True, 'import numpy as np\n'), ((2850, 2870), 'numpy.argmax', 'np.argmax', (['Z'], {'axis': '(1)'}), '(Z, axis=1)\n', (2859, 2870), True, 'import numpy as np\n'), ((3057, 3073), 'numpy.where', 'np.where', (['(y == i)'], {}), '(y == i)\n', (3065, 3073), True, 'import numpy as np\n'), ((3078, 3149), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[idx, 0]', 'X[idx, 1]'], {'c': 'c', 'cmap': 'cmap', 'label': "('Class %s' % n)"}), "(X[idx, 0], X[idx, 1], c=c, cmap=cmap, label='Class %s' % n)\n", (3089, 3149), True, 'import matplotlib.pyplot as plt\n'), ((2254, 2271), 'array.array', 'array', (['"""f"""', '[0.0]'], {}), "('f', [0.0])\n", (2259, 2271), False, 'from array import array\n')]
|
from drl_negotiation.scenario import BaseScenario
from drl_negotiation.core import TrainWorld, MySCML2020Agent
from drl_negotiation.myagent import MyComponentsBasedAgent
from drl_negotiation.hyperparameters import *
from negmas.helpers import get_class
from scml.scml2020 import (
DecentralizingAgent,
BuyCheapSellExpensiveAgent,
SCML2020World,
is_system_agent,
)
from typing import Union
import numpy as np
class Scenario(BaseScenario):
def make_world(self, config=None) -> TrainWorld:
# configuration, for Scenario scml
if config is None:
agent_types = [get_class(agent_type, ) for agent_type in TRAINING_AGENT_TYPES]
n_steps = N_STEPS
world_configuration = SCML2020World.generate(
agent_types=agent_types,
n_steps=n_steps
)
else:
world_configuration = SCML2020World.generate(
agent_types=config['agent_types'],
agent_params=config['agent_params'][:-2],
n_steps=config['n_steps']
)
world = TrainWorld(configuration=world_configuration)
if config is None:
self.reset_world(world)
return world
def reset_world(self, world):
# callback, reset
# reset world, agents, factories
# fixed position
agent_types = world.configuration['agent_types']
agent_params = world.configuration['agent_params'][:-2]
n_steps = world.configuration['n_steps']
reset_configuration = SCML2020World.generate(
#TODO: [Future work Improvement] could be reset
agent_types=agent_types,
agent_params=agent_params,
n_steps=n_steps
)
world.__init__(configuration=reset_configuration)
def benchmark_data(self, agent, world, seller=True):
#TODO: data for benchmarkign purposes, info_callabck,
# will be rendered when display is true
# how to compare different companies, Ratio Analysis
# https://www.investopedia.com/ask/answers/032315/how-does-ratio-analysis-make-it-easier-compare-different-companies.asp
# price-to-earnings ratio and net profit margin
# Margin Ratios and Return Ratios
# https://corporatefinanceinstitute.com/resources/knowledge/finance/profitability-ratios/
profitability = []
initial_balances = []
factories = [_ for _ in world.factories if not is_system_agent(_.agent_id)]
for i, factory in enumerate(factories):
initial_balances.append(factory.initial_balance)
normalize = all(_ != 0 for _ in initial_balances)
for _ in world.agents:
if world.agents[_].action_callback == "system": continue
if world.agents[_] in world.heuristic_agents:
if normalize:
profitability.append(
(agent.state.f[2] - agent.state.f[0]) / agent.state.f[0] -
([f.current_balance for f in factories if f.agent_id == world.agents[_].id][0] -
[f.initial_balance for f in factories if f.agent_id == world.agents[_].id][0]) /
[f.initial_balance for f in factories if f.agent_id == world.agents[_].id][0]
)
else:
profitability.append(
(agent.state.f[2] - agent.state.f[0]) -
([f.current_balance for f in factories if f.agent_id == world.agents[_].id][0] -
[f.initial_balance for f in factories if f.agent_id == world.agents[_].id][0])
)
return {"profitability": profitability}
def good_agents(self, world):
return [agent for agent in world.agents if not agent.adversary]
def adversaries(self, world):
return [agent for agent in world.agents if agent.adversary]
def reward(self, agent, world, seller=True):
# callback, reward
# Delayed reward problem????
# Keep this in mind when writing reward functions: You get what you incentivize, not what you intend.
# idea 1: external rewards, e.g. balance - initial balance for agent, -(balance - initial balance) for adversary agent
# idea 2: Intrinsic motivation rewards.
# On Learning Intrinsic Rewards for Policy Gradient Methods, https://arxiv.org/abs/1804.06459
return self.adversary_reward(agent, world) if agent.adversary else self.agent_reward(agent, world)
def agent_reward(self, agent, world):
# 1. Difference of balance with the end of last step, begin of current step
# 2. Difference of balance with the other agents
rew = 0
# means in this world step, the agent starts a negotiation except initial state
if agent.state.o_negotiation_step == agent.awi.current_step:
rew = (agent.state.f[2]- agent.state.f[1]) / (agent.state.f[0]) * REW_FACTOR
gap = []
for entity in world.entities:
if entity is agent: continue
if entity.action_callback == "system": continue
if entity.action_callback is None: continue
initial_balance = [_.initial_balance for _ in world.factories if _.agent_id == entity.id][0]
current_balance = [_.current_balance for _ in world.factories if _.agent_id == entity.id][0]
gap.append((current_balance - initial_balance) / initial_balance)
rew -= np.mean(np.array(gap))
return rew
def adversary_reward(self, agent, world):
#TODO: keep the good agents near the intial funds
# neg reward
# pos reward
# agent.init_f - agent.f
rew = 0
return rew
def observation(self, agent: Union[MyComponentsBasedAgent, MySCML2020Agent], world: Union[TrainWorld], seller=True):
# get all observation,
# callback: obrvation
_obs = agent._get_obs(seller=seller)
#2. Economic gap with others, extra information
# economic_gaps = []
#
# for entity in world.entities:
# if entity is agent: continue
# economic_gaps.append(entity.state.f - agent.state.f)
#
# economic_gaps = np.array(economic_gaps)
#return np.concatenate(economic_gaps + o_m.flatten() + o_a + o_u_c + o_u_e + o_u_t + o_q_n.flatten() + o_t_c)
# return np.concatenate((economic_gaps.flatten(), _obs))
return _obs
def done(self, agent, world, seller=True):
# callback of done
# simulation is end
if world.world_done:
return True
import ipdb
# agent is brankrupt
return [_.is_bankrupt for _ in world.factories if _.agent_id == agent.id][0]
|
[
"scml.scml2020.SCML2020World.generate",
"scml.scml2020.is_system_agent",
"drl_negotiation.core.TrainWorld",
"numpy.array",
"negmas.helpers.get_class"
] |
[((1138, 1183), 'drl_negotiation.core.TrainWorld', 'TrainWorld', ([], {'configuration': 'world_configuration'}), '(configuration=world_configuration)\n', (1148, 1183), False, 'from drl_negotiation.core import TrainWorld, MySCML2020Agent\n'), ((1599, 1694), 'scml.scml2020.SCML2020World.generate', 'SCML2020World.generate', ([], {'agent_types': 'agent_types', 'agent_params': 'agent_params', 'n_steps': 'n_steps'}), '(agent_types=agent_types, agent_params=agent_params,\n n_steps=n_steps)\n', (1621, 1694), False, 'from scml.scml2020 import DecentralizingAgent, BuyCheapSellExpensiveAgent, SCML2020World, is_system_agent\n'), ((773, 837), 'scml.scml2020.SCML2020World.generate', 'SCML2020World.generate', ([], {'agent_types': 'agent_types', 'n_steps': 'n_steps'}), '(agent_types=agent_types, n_steps=n_steps)\n', (795, 837), False, 'from scml.scml2020 import DecentralizingAgent, BuyCheapSellExpensiveAgent, SCML2020World, is_system_agent\n'), ((932, 1063), 'scml.scml2020.SCML2020World.generate', 'SCML2020World.generate', ([], {'agent_types': "config['agent_types']", 'agent_params': "config['agent_params'][:-2]", 'n_steps': "config['n_steps']"}), "(agent_types=config['agent_types'], agent_params=\n config['agent_params'][:-2], n_steps=config['n_steps'])\n", (954, 1063), False, 'from scml.scml2020 import DecentralizingAgent, BuyCheapSellExpensiveAgent, SCML2020World, is_system_agent\n'), ((5546, 5559), 'numpy.array', 'np.array', (['gap'], {}), '(gap)\n', (5554, 5559), True, 'import numpy as np\n'), ((645, 666), 'negmas.helpers.get_class', 'get_class', (['agent_type'], {}), '(agent_type)\n', (654, 666), False, 'from negmas.helpers import get_class\n'), ((2523, 2550), 'scml.scml2020.is_system_agent', 'is_system_agent', (['_.agent_id'], {}), '(_.agent_id)\n', (2538, 2550), False, 'from scml.scml2020 import DecentralizingAgent, BuyCheapSellExpensiveAgent, SCML2020World, is_system_agent\n')]
|
# Copyright 2017 The TensorFlow Lattice Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CalibratedEtl tests."""
# Dependency imports
import numpy as np
from tensorflow_lattice.python.estimators import calibrated_etl
from tensorflow_lattice.python.estimators import hparams as tfl_hparams
from tensorflow_lattice.python.lib import keypoints_initialization
from tensorflow_lattice.python.lib import test_data
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column_lib
from tensorflow.python.platform import test
_NUM_KEYPOINTS = 50
class CalibratedEtlHParamsTest(test.TestCase):
def testEmptyMonotonicLatticeRankExpectsError(self):
hparams = tfl_hparams.CalibratedEtlHParams(feature_names=['x'])
hparams.set_param('monotonic_num_lattices', 2)
hparams.set_param('monotonic_lattice_size', 2)
with self.assertRaisesRegexp(
ValueError,
'Hyperparameter configuration cannot be used in the calibrated etl '
'estimator.'):
calibrated_etl.calibrated_etl_classifier(hparams=hparams)
def testEmptyMonotonicLatticeSizeExpectsError(self):
hparams = tfl_hparams.CalibratedEtlHParams(feature_names=['x'])
hparams.set_param('monotonic_num_lattices', 2)
hparams.set_param('monotonic_lattice_rank', 2)
with self.assertRaisesRegexp(
ValueError,
'Hyperparameter configuration cannot be used in the calibrated etl '
'estimator.'):
calibrated_etl.calibrated_etl_classifier(hparams=hparams)
def testEmptyNonMonotonicLatticeRankExpectsError(self):
hparams = tfl_hparams.CalibratedEtlHParams(feature_names=['x'])
hparams.set_param('non_monotonic_num_lattices', 2)
hparams.set_param('non_monotonic_lattice_size', 2)
with self.assertRaisesRegexp(
ValueError,
'Hyperparameter configuration cannot be used in the calibrated etl '
'estimator.'):
calibrated_etl.calibrated_etl_classifier(hparams=hparams)
def testEmptyNonMonotonicLatticeSizeExpectsError(self):
hparams = tfl_hparams.CalibratedEtlHParams(feature_names=['x'])
hparams.set_param('non_monotonic_num_lattices', 2)
hparams.set_param('non_monotonic_lattice_rank', 2)
with self.assertRaisesRegexp(
ValueError,
'Hyperparameter configuration cannot be used in the calibrated etl '
'estimator.'):
calibrated_etl.calibrated_etl_classifier(hparams=hparams)
def testWrongLatticeRegularization(self):
hparams = tfl_hparams.CalibratedEtlHParams(feature_names=['x'])
hparams.set_param('non_monotonic_num_lattices', 2)
hparams.set_param('non_monotonic_lattice_size', 2)
hparams.set_param('nno_monotonic_lattice_rank', 2)
hparams.set_feature_param('x', 'lattice_l1_reg', 0.1)
hparams.set_feature_param('x', 'lattice_l2_reg', 0.1)
hparams.set_feature_param('x', 'lattice_l1_torsion_reg', 0.1)
hparams.set_feature_param('x', 'lattice_l1_torsion_reg', 0.1)
with self.assertRaisesRegexp(
ValueError,
'Hyperparameter configuration cannot be used in the calibrated etl '
'estimator.'):
calibrated_etl.calibrated_etl_classifier(hparams=hparams)
class CalibratedEtlTest(test.TestCase):
def setUp(self):
super(CalibratedEtlTest, self).setUp()
self._test_data = test_data.TestData()
def _CalibratedEtlRegressor(self, feature_names, feature_columns,
**hparams_args):
def init_fn():
return keypoints_initialization.uniform_keypoints_for_signal(
_NUM_KEYPOINTS, -1., 1., 0., 1.)
hparams = tfl_hparams.CalibratedEtlHParams(
feature_names,
num_keypoints=_NUM_KEYPOINTS,
monotonic_num_lattices=1,
monotonic_lattice_rank=1,
monotonic_lattice_size=2,
non_monotonic_num_lattices=1,
non_monotonic_lattice_rank=1,
non_monotonic_lattice_size=2,
**hparams_args)
# Turn off monotonic calibrator.
hparams.set_param('calibration_monotonic', None)
hparams.set_param('learning_rate', 0.1)
return calibrated_etl.calibrated_etl_regressor(
feature_columns=feature_columns,
hparams=hparams,
keypoints_initializers_fn=init_fn)
def _CalibratedEtlClassifier(self, feature_columns, **hparams_args):
def init_fn():
return keypoints_initialization.uniform_keypoints_for_signal(
_NUM_KEYPOINTS, -1., 1., 0., 1.)
hparams = tfl_hparams.CalibratedEtlHParams(
num_keypoints=_NUM_KEYPOINTS,
monotonic_num_lattices=1,
monotonic_lattice_rank=1,
monotonic_lattice_size=2,
non_monotonic_num_lattices=1,
non_monotonic_lattice_rank=1,
non_monotonic_lattice_size=2,
**hparams_args)
# Turn off monotonic calibrator.
hparams.set_param('calibration_monotonic', None)
hparams.set_param('learning_rate', 0.1)
return calibrated_etl.calibrated_etl_classifier(
feature_columns=feature_columns,
hparams=hparams,
keypoints_initializers_fn=init_fn)
def testCalibratedEtlRegressorTraining1D(self):
feature_columns = [
feature_column_lib.numeric_column('x'),
]
estimator = self._CalibratedEtlRegressor(
['x'], feature_columns, interpolation_type='simplex')
estimator.train(input_fn=self._test_data.oned_input_fn())
# Here we only check the successful evaluation.
# Checking the actual number, accuracy, etc, makes the test too flaky.
_ = estimator.evaluate(input_fn=self._test_data.oned_input_fn())
def testCalibratedEtlRegressorTraining2D(self):
feature_columns = [
feature_column_lib.numeric_column('x0'),
feature_column_lib.numeric_column('x1'),
]
estimator = self._CalibratedEtlRegressor(
['x0', 'x1'], feature_columns, interpolation_type='hypercube')
estimator.train(input_fn=self._test_data.twod_input_fn())
# Here we only check the successful evaluation.
# Checking the actual number, accuracy, etc, makes the test too flaky.
_ = estimator.evaluate(input_fn=self._test_data.twod_input_fn())
def testCalibratedEtlRegressorTraining2DWithCalbrationRegularization(self):
feature_columns = [
feature_column_lib.numeric_column('x0'),
feature_column_lib.numeric_column('x1'),
]
estimator = self._CalibratedEtlRegressor(
['x0', 'x1'],
feature_columns,
interpolation_type='simplex',
calibration_l1_reg=1e-2,
calibration_l2_reg=1e-2,
calibration_l1_laplacian_reg=0.05,
calibration_l2_laplacian_reg=0.01)
estimator.train(input_fn=self._test_data.twod_input_fn())
# Here we only check the successful evaluation.
# Checking the actual number, accuracy, etc, makes the test too flaky.
_ = estimator.evaluate(input_fn=self._test_data.twod_input_fn())
def testCalibratedEtlRegressorTraining2DWithLatticeRegularizer(self):
feature_columns = [
feature_column_lib.numeric_column('x0'),
feature_column_lib.numeric_column('x1'),
]
estimator = self._CalibratedEtlRegressor(
['x0', 'x1'],
feature_columns,
interpolation_type='simplex',
lattice_l1_reg=1.0,
lattice_l2_reg=1.0,
lattice_l1_torsion_reg=1.0,
lattice_l2_torsion_reg=1.0,
lattice_l1_laplacian_reg=1.0,
lattice_l2_laplacian_reg=1.0)
estimator.train(input_fn=self._test_data.twod_input_fn())
results = estimator.evaluate(input_fn=self._test_data.twod_input_fn())
# We expect the worse result due to the calibration regularization.
self.assertGreater(results['average_loss'], 3e-3)
self.assertLess(results['average_loss'], 4e-2)
def testCalibratedEtlRegressorTrainingMultiDimensionalFeature(self):
feature_columns = [
feature_column_lib.numeric_column('x', shape=(2,)),
]
estimator = self._CalibratedEtlRegressor(['x'], feature_columns)
estimator.train(input_fn=self._test_data.multid_feature_input_fn())
results = estimator.evaluate(
input_fn=self._test_data.multid_feature_input_fn())
self.assertLess(results['average_loss'], 1e-2)
# Turn-off calibration for feature 'x', it should turn off for both
# dimensions, and the results should get much worse.
estimator = self._CalibratedEtlRegressor(
['x'], feature_columns, feature__x__num_keypoints=0)
estimator.train(input_fn=self._test_data.multid_feature_input_fn())
results = estimator.evaluate(
input_fn=self._test_data.multid_feature_input_fn())
self.assertGreater(results['average_loss'], 1e-2)
def testCalibratedEtlClassifierTraining(self):
feature_columns = [
feature_column_lib.numeric_column('x0'),
feature_column_lib.numeric_column('x1'),
]
estimator = self._CalibratedEtlClassifier(feature_columns)
estimator.train(input_fn=self._test_data.twod_classificer_input_fn())
results = estimator.evaluate(
input_fn=self._test_data.twod_classificer_input_fn())
self.assertGreater(results['accuracy'], 0.97)
def testCalibratedEtlClassifierTrainingWithCalibrationRegularizer(self):
feature_columns = [
feature_column_lib.numeric_column('x0'),
feature_column_lib.numeric_column('x1'),
]
estimator = self._CalibratedEtlClassifier(
feature_columns,
calibration_l1_reg=1e-2,
calibration_l2_reg=1e-2,
calibration_l1_laplacian_reg=1e-1,
calibration_l2_laplacian_reg=1e-1,
interpolation_type='hypercube')
estimator.train(input_fn=self._test_data.twod_classificer_input_fn())
# Here we only check the successful evaluation.
# Checking the actual number, accuracy, etc, makes the test too flaky.
_ = estimator.evaluate(
input_fn=self._test_data.twod_classificer_input_fn())
def testCalibratedEtlClassifierTrainingWithLatticeRegularizer(self):
feature_columns = [
feature_column_lib.numeric_column('x0'),
feature_column_lib.numeric_column('x1'),
]
estimator = self._CalibratedEtlClassifier(
feature_columns,
lattice_l1_reg=1.0,
lattice_l2_reg=1.0,
lattice_l1_torsion_reg=1.0,
lattice_l2_torsion_reg=1.0,
lattice_l1_laplacian_reg=1.0,
lattice_l2_laplacian_reg=1.0,
interpolation_type='hypercube')
estimator.train(input_fn=self._test_data.twod_classificer_input_fn())
results = estimator.evaluate(
input_fn=self._test_data.twod_classificer_input_fn())
# Due to regularizer, we expect the worse performance.
self.assertLess(results['accuracy'], 0.97)
self.assertGreater(results['accuracy'], 0.8)
def testCalibratedEtlMonotonicClassifierTraining(self):
# Construct the following training pair.
#
# Training: (x, y)
# ([0., 0.], 0.0)
# ([0., 1.], 1.0)
# ([1., 0.], 1.0)
# ([1., 1.], 0.0)
#
# which is not a monotonic function. Then check the forcing monotonicity
# resulted in the following monotonicity or not.
# f(0, 0) <= f(0, 1), f(0, 0) <= f(1, 0), f(0, 1) <= f(1, 1),
# f(1, 0) < = f(1, 1).
x0 = np.array([0.0, 0.0, 1.0, 1.0])
x1 = np.array([0.0, 1.0, 0.0, 1.0])
x_samples = {'x0': x0, 'x1': x1}
training_y = np.array([[False], [True], [True], [False]])
train_input_fn = numpy_io.numpy_input_fn(
x=x_samples, y=training_y, batch_size=4, num_epochs=1000, shuffle=False)
test_input_fn = numpy_io.numpy_input_fn(x=x_samples, y=None, shuffle=False)
# Define monotonic lattice classifier.
feature_columns = [
feature_column_lib.numeric_column('x0'),
feature_column_lib.numeric_column('x1'),
]
def init_fn():
return keypoints_initialization.uniform_keypoints_for_signal(
2, 0., 1., 0., 1.)
hparams = tfl_hparams.CalibratedEtlHParams(
num_keypoints=2,
monotonic_num_lattices=2,
monotonic_lattice_rank=2,
monotonic_lattice_size=2)
hparams.set_param('calibration_monotonic', +1)
hparams.set_param('lattice_monotonic', True)
hparams.set_param('learning_rate', 0.1)
estimator = calibrated_etl.calibrated_etl_classifier(
feature_columns=feature_columns,
hparams=hparams,
keypoints_initializers_fn=init_fn)
estimator.train(input_fn=train_input_fn)
predictions = [
results['logits'][0]
for results in estimator.predict(input_fn=test_input_fn)
]
self.assertEqual(len(predictions), 4)
# Check monotonicity. Note that projection has its own precision, so we
# add a small number.
self.assertLess(predictions[0], predictions[1] + 1e-6)
self.assertLess(predictions[0], predictions[2] + 1e-6)
self.assertLess(predictions[1], predictions[3] + 1e-6)
self.assertLess(predictions[2], predictions[3] + 1e-6)
def testCalibratedEtlWithMissingTraining(self):
# x0 is missing with it's own vertex: so it can take very different values,
# while x1 is missing and calibrated, in this case to the middle of the
# lattice.
x0 = np.array([0., 0., 1., 1., -1., -1., 0., 1.])
x1 = np.array([0., 1., 0., 1., 0., 1., -1., -1.])
training_y = np.array([1., 3., 7., 11., 23., 27., 2., 9.])
x_samples = {'x0': x0, 'x1': x1}
train_input_fn = numpy_io.numpy_input_fn(
x=x_samples,
y=training_y,
batch_size=x0.shape[0],
num_epochs=2000,
shuffle=False)
test_input_fn = numpy_io.numpy_input_fn(
x=x_samples, y=training_y, shuffle=False)
feature_columns = [
feature_column_lib.numeric_column('x0'),
feature_column_lib.numeric_column('x1'),
]
def init_fn():
return keypoints_initialization.uniform_keypoints_for_signal(
2, 0., 1., 0., 1.)
hparams = tfl_hparams.CalibratedEtlHParams(
['x0', 'x1'],
num_keypoints=2,
non_monotonic_num_lattices=5,
non_monotonic_lattice_rank=2,
non_monotonic_lattice_size=2,
learning_rate=0.1,
missing_input_value=-1.)
estimator = calibrated_etl.calibrated_etl_regressor(
feature_columns=feature_columns,
hparams=hparams,
keypoints_initializers_fn=init_fn)
estimator.train(input_fn=train_input_fn)
results = estimator.evaluate(input_fn=test_input_fn)
self.assertLess(results['average_loss'], 0.1)
if __name__ == '__main__':
test.main()
|
[
"tensorflow.python.platform.test.main",
"tensorflow_lattice.python.estimators.hparams.CalibratedEtlHParams",
"tensorflow.python.feature_column.feature_column_lib.numeric_column",
"tensorflow_lattice.python.lib.test_data.TestData",
"tensorflow_lattice.python.estimators.calibrated_etl.calibrated_etl_classifier",
"tensorflow_lattice.python.lib.keypoints_initialization.uniform_keypoints_for_signal",
"numpy.array",
"tensorflow.python.estimator.inputs.numpy_io.numpy_input_fn",
"tensorflow_lattice.python.estimators.calibrated_etl.calibrated_etl_regressor"
] |
[((14983, 14994), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (14992, 14994), False, 'from tensorflow.python.platform import test\n'), ((1306, 1359), 'tensorflow_lattice.python.estimators.hparams.CalibratedEtlHParams', 'tfl_hparams.CalibratedEtlHParams', ([], {'feature_names': "['x']"}), "(feature_names=['x'])\n", (1338, 1359), True, 'from tensorflow_lattice.python.estimators import hparams as tfl_hparams\n'), ((1750, 1803), 'tensorflow_lattice.python.estimators.hparams.CalibratedEtlHParams', 'tfl_hparams.CalibratedEtlHParams', ([], {'feature_names': "['x']"}), "(feature_names=['x'])\n", (1782, 1803), True, 'from tensorflow_lattice.python.estimators import hparams as tfl_hparams\n'), ((2197, 2250), 'tensorflow_lattice.python.estimators.hparams.CalibratedEtlHParams', 'tfl_hparams.CalibratedEtlHParams', ([], {'feature_names': "['x']"}), "(feature_names=['x'])\n", (2229, 2250), True, 'from tensorflow_lattice.python.estimators import hparams as tfl_hparams\n'), ((2652, 2705), 'tensorflow_lattice.python.estimators.hparams.CalibratedEtlHParams', 'tfl_hparams.CalibratedEtlHParams', ([], {'feature_names': "['x']"}), "(feature_names=['x'])\n", (2684, 2705), True, 'from tensorflow_lattice.python.estimators import hparams as tfl_hparams\n'), ((3093, 3146), 'tensorflow_lattice.python.estimators.hparams.CalibratedEtlHParams', 'tfl_hparams.CalibratedEtlHParams', ([], {'feature_names': "['x']"}), "(feature_names=['x'])\n", (3125, 3146), True, 'from tensorflow_lattice.python.estimators import hparams as tfl_hparams\n'), ((3906, 3926), 'tensorflow_lattice.python.lib.test_data.TestData', 'test_data.TestData', ([], {}), '()\n', (3924, 3926), False, 'from tensorflow_lattice.python.lib import test_data\n'), ((4189, 4463), 'tensorflow_lattice.python.estimators.hparams.CalibratedEtlHParams', 'tfl_hparams.CalibratedEtlHParams', (['feature_names'], {'num_keypoints': '_NUM_KEYPOINTS', 'monotonic_num_lattices': '(1)', 'monotonic_lattice_rank': '(1)', 'monotonic_lattice_size': '(2)', 'non_monotonic_num_lattices': '(1)', 'non_monotonic_lattice_rank': '(1)', 'non_monotonic_lattice_size': '(2)'}), '(feature_names, num_keypoints=\n _NUM_KEYPOINTS, monotonic_num_lattices=1, monotonic_lattice_rank=1,\n monotonic_lattice_size=2, non_monotonic_num_lattices=1,\n non_monotonic_lattice_rank=1, non_monotonic_lattice_size=2, **hparams_args)\n', (4221, 4463), True, 'from tensorflow_lattice.python.estimators import hparams as tfl_hparams\n'), ((4670, 4798), 'tensorflow_lattice.python.estimators.calibrated_etl.calibrated_etl_regressor', 'calibrated_etl.calibrated_etl_regressor', ([], {'feature_columns': 'feature_columns', 'hparams': 'hparams', 'keypoints_initializers_fn': 'init_fn'}), '(feature_columns=feature_columns,\n hparams=hparams, keypoints_initializers_fn=init_fn)\n', (4709, 4798), False, 'from tensorflow_lattice.python.estimators import calibrated_etl\n'), ((5038, 5296), 'tensorflow_lattice.python.estimators.hparams.CalibratedEtlHParams', 'tfl_hparams.CalibratedEtlHParams', ([], {'num_keypoints': '_NUM_KEYPOINTS', 'monotonic_num_lattices': '(1)', 'monotonic_lattice_rank': '(1)', 'monotonic_lattice_size': '(2)', 'non_monotonic_num_lattices': '(1)', 'non_monotonic_lattice_rank': '(1)', 'non_monotonic_lattice_size': '(2)'}), '(num_keypoints=_NUM_KEYPOINTS,\n monotonic_num_lattices=1, monotonic_lattice_rank=1,\n monotonic_lattice_size=2, non_monotonic_num_lattices=1,\n non_monotonic_lattice_rank=1, non_monotonic_lattice_size=2, **hparams_args)\n', (5070, 5296), True, 'from tensorflow_lattice.python.estimators import hparams as tfl_hparams\n'), ((5496, 5625), 'tensorflow_lattice.python.estimators.calibrated_etl.calibrated_etl_classifier', 'calibrated_etl.calibrated_etl_classifier', ([], {'feature_columns': 'feature_columns', 'hparams': 'hparams', 'keypoints_initializers_fn': 'init_fn'}), '(feature_columns=feature_columns,\n hparams=hparams, keypoints_initializers_fn=init_fn)\n', (5536, 5625), False, 'from tensorflow_lattice.python.estimators import calibrated_etl\n'), ((11721, 11751), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 1.0, 1.0])\n', (11729, 11751), True, 'import numpy as np\n'), ((11761, 11791), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0, 1.0]'], {}), '([0.0, 1.0, 0.0, 1.0])\n', (11769, 11791), True, 'import numpy as np\n'), ((11846, 11890), 'numpy.array', 'np.array', (['[[False], [True], [True], [False]]'], {}), '([[False], [True], [True], [False]])\n', (11854, 11890), True, 'import numpy as np\n'), ((11913, 12014), 'tensorflow.python.estimator.inputs.numpy_io.numpy_input_fn', 'numpy_io.numpy_input_fn', ([], {'x': 'x_samples', 'y': 'training_y', 'batch_size': '(4)', 'num_epochs': '(1000)', 'shuffle': '(False)'}), '(x=x_samples, y=training_y, batch_size=4, num_epochs\n =1000, shuffle=False)\n', (11936, 12014), False, 'from tensorflow.python.estimator.inputs import numpy_io\n'), ((12039, 12098), 'tensorflow.python.estimator.inputs.numpy_io.numpy_input_fn', 'numpy_io.numpy_input_fn', ([], {'x': 'x_samples', 'y': 'None', 'shuffle': '(False)'}), '(x=x_samples, y=None, shuffle=False)\n', (12062, 12098), False, 'from tensorflow.python.estimator.inputs import numpy_io\n'), ((12403, 12534), 'tensorflow_lattice.python.estimators.hparams.CalibratedEtlHParams', 'tfl_hparams.CalibratedEtlHParams', ([], {'num_keypoints': '(2)', 'monotonic_num_lattices': '(2)', 'monotonic_lattice_rank': '(2)', 'monotonic_lattice_size': '(2)'}), '(num_keypoints=2, monotonic_num_lattices=2,\n monotonic_lattice_rank=2, monotonic_lattice_size=2)\n', (12435, 12534), True, 'from tensorflow_lattice.python.estimators import hparams as tfl_hparams\n'), ((12725, 12854), 'tensorflow_lattice.python.estimators.calibrated_etl.calibrated_etl_classifier', 'calibrated_etl.calibrated_etl_classifier', ([], {'feature_columns': 'feature_columns', 'hparams': 'hparams', 'keypoints_initializers_fn': 'init_fn'}), '(feature_columns=feature_columns,\n hparams=hparams, keypoints_initializers_fn=init_fn)\n', (12765, 12854), False, 'from tensorflow_lattice.python.estimators import calibrated_etl\n'), ((13653, 13705), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 1.0, -1.0, -1.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0, 1.0, -1.0, -1.0, 0.0, 1.0])\n', (13661, 13705), True, 'import numpy as np\n'), ((13707, 13759), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0, 1.0, 0.0, 1.0, -1.0, -1.0]'], {}), '([0.0, 1.0, 0.0, 1.0, 0.0, 1.0, -1.0, -1.0])\n', (13715, 13759), True, 'import numpy as np\n'), ((13769, 13822), 'numpy.array', 'np.array', (['[1.0, 3.0, 7.0, 11.0, 23.0, 27.0, 2.0, 9.0]'], {}), '([1.0, 3.0, 7.0, 11.0, 23.0, 27.0, 2.0, 9.0])\n', (13777, 13822), True, 'import numpy as np\n'), ((13874, 13984), 'tensorflow.python.estimator.inputs.numpy_io.numpy_input_fn', 'numpy_io.numpy_input_fn', ([], {'x': 'x_samples', 'y': 'training_y', 'batch_size': 'x0.shape[0]', 'num_epochs': '(2000)', 'shuffle': '(False)'}), '(x=x_samples, y=training_y, batch_size=x0.shape[0],\n num_epochs=2000, shuffle=False)\n', (13897, 13984), False, 'from tensorflow.python.estimator.inputs import numpy_io\n'), ((14042, 14107), 'tensorflow.python.estimator.inputs.numpy_io.numpy_input_fn', 'numpy_io.numpy_input_fn', ([], {'x': 'x_samples', 'y': 'training_y', 'shuffle': '(False)'}), '(x=x_samples, y=training_y, shuffle=False)\n', (14065, 14107), False, 'from tensorflow.python.estimator.inputs import numpy_io\n'), ((14377, 14583), 'tensorflow_lattice.python.estimators.hparams.CalibratedEtlHParams', 'tfl_hparams.CalibratedEtlHParams', (["['x0', 'x1']"], {'num_keypoints': '(2)', 'non_monotonic_num_lattices': '(5)', 'non_monotonic_lattice_rank': '(2)', 'non_monotonic_lattice_size': '(2)', 'learning_rate': '(0.1)', 'missing_input_value': '(-1.0)'}), "(['x0', 'x1'], num_keypoints=2,\n non_monotonic_num_lattices=5, non_monotonic_lattice_rank=2,\n non_monotonic_lattice_size=2, learning_rate=0.1, missing_input_value=-1.0)\n", (14409, 14583), True, 'from tensorflow_lattice.python.estimators import hparams as tfl_hparams\n'), ((14649, 14777), 'tensorflow_lattice.python.estimators.calibrated_etl.calibrated_etl_regressor', 'calibrated_etl.calibrated_etl_regressor', ([], {'feature_columns': 'feature_columns', 'hparams': 'hparams', 'keypoints_initializers_fn': 'init_fn'}), '(feature_columns=feature_columns,\n hparams=hparams, keypoints_initializers_fn=init_fn)\n', (14688, 14777), False, 'from tensorflow_lattice.python.estimators import calibrated_etl\n'), ((1622, 1679), 'tensorflow_lattice.python.estimators.calibrated_etl.calibrated_etl_classifier', 'calibrated_etl.calibrated_etl_classifier', ([], {'hparams': 'hparams'}), '(hparams=hparams)\n', (1662, 1679), False, 'from tensorflow_lattice.python.estimators import calibrated_etl\n'), ((2066, 2123), 'tensorflow_lattice.python.estimators.calibrated_etl.calibrated_etl_classifier', 'calibrated_etl.calibrated_etl_classifier', ([], {'hparams': 'hparams'}), '(hparams=hparams)\n', (2106, 2123), False, 'from tensorflow_lattice.python.estimators import calibrated_etl\n'), ((2521, 2578), 'tensorflow_lattice.python.estimators.calibrated_etl.calibrated_etl_classifier', 'calibrated_etl.calibrated_etl_classifier', ([], {'hparams': 'hparams'}), '(hparams=hparams)\n', (2561, 2578), False, 'from tensorflow_lattice.python.estimators import calibrated_etl\n'), ((2976, 3033), 'tensorflow_lattice.python.estimators.calibrated_etl.calibrated_etl_classifier', 'calibrated_etl.calibrated_etl_classifier', ([], {'hparams': 'hparams'}), '(hparams=hparams)\n', (3016, 3033), False, 'from tensorflow_lattice.python.estimators import calibrated_etl\n'), ((3721, 3778), 'tensorflow_lattice.python.estimators.calibrated_etl.calibrated_etl_classifier', 'calibrated_etl.calibrated_etl_classifier', ([], {'hparams': 'hparams'}), '(hparams=hparams)\n', (3761, 3778), False, 'from tensorflow_lattice.python.estimators import calibrated_etl\n'), ((4076, 4170), 'tensorflow_lattice.python.lib.keypoints_initialization.uniform_keypoints_for_signal', 'keypoints_initialization.uniform_keypoints_for_signal', (['_NUM_KEYPOINTS', '(-1.0)', '(1.0)', '(0.0)', '(1.0)'], {}), '(_NUM_KEYPOINTS, -1.0,\n 1.0, 0.0, 1.0)\n', (4129, 4170), False, 'from tensorflow_lattice.python.lib import keypoints_initialization\n'), ((4925, 5019), 'tensorflow_lattice.python.lib.keypoints_initialization.uniform_keypoints_for_signal', 'keypoints_initialization.uniform_keypoints_for_signal', (['_NUM_KEYPOINTS', '(-1.0)', '(1.0)', '(0.0)', '(1.0)'], {}), '(_NUM_KEYPOINTS, -1.0,\n 1.0, 0.0, 1.0)\n', (4978, 5019), False, 'from tensorflow_lattice.python.lib import keypoints_initialization\n'), ((5730, 5768), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x"""'], {}), "('x')\n", (5763, 5768), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((6225, 6264), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x0"""'], {}), "('x0')\n", (6258, 6264), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((6274, 6313), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x1"""'], {}), "('x1')\n", (6307, 6313), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((6807, 6846), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x0"""'], {}), "('x0')\n", (6840, 6846), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((6856, 6895), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x1"""'], {}), "('x1')\n", (6889, 6895), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((7549, 7588), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x0"""'], {}), "('x0')\n", (7582, 7588), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((7598, 7637), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x1"""'], {}), "('x1')\n", (7631, 7637), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((8398, 8448), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x"""'], {'shape': '(2,)'}), "('x', shape=(2,))\n", (8431, 8448), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((9281, 9320), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x0"""'], {}), "('x0')\n", (9314, 9320), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((9330, 9369), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x1"""'], {}), "('x1')\n", (9363, 9369), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((9768, 9807), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x0"""'], {}), "('x0')\n", (9801, 9807), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((9817, 9856), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x1"""'], {}), "('x1')\n", (9850, 9856), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((10524, 10563), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x0"""'], {}), "('x0')\n", (10557, 10563), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((10573, 10612), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x1"""'], {}), "('x1')\n", (10606, 10612), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((12175, 12214), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x0"""'], {}), "('x0')\n", (12208, 12214), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((12224, 12263), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x1"""'], {}), "('x1')\n", (12257, 12263), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((12304, 12380), 'tensorflow_lattice.python.lib.keypoints_initialization.uniform_keypoints_for_signal', 'keypoints_initialization.uniform_keypoints_for_signal', (['(2)', '(0.0)', '(1.0)', '(0.0)', '(1.0)'], {}), '(2, 0.0, 1.0, 0.0, 1.0)\n', (12357, 12380), False, 'from tensorflow_lattice.python.lib import keypoints_initialization\n'), ((14149, 14188), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x0"""'], {}), "('x0')\n", (14182, 14188), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((14198, 14237), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x1"""'], {}), "('x1')\n", (14231, 14237), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((14278, 14354), 'tensorflow_lattice.python.lib.keypoints_initialization.uniform_keypoints_for_signal', 'keypoints_initialization.uniform_keypoints_for_signal', (['(2)', '(0.0)', '(1.0)', '(0.0)', '(1.0)'], {}), '(2, 0.0, 1.0, 0.0, 1.0)\n', (14331, 14354), False, 'from tensorflow_lattice.python.lib import keypoints_initialization\n')]
|
from models import PatchCore
from save_utils import saveModelPath
import numpy
import torch
import warnings
from torch import tensor
from torchvision import transforms
import json
import numpy
from PIL import Image, ImageFilter
import os
from torch.utils.data import DataLoader,TensorDataset
warnings.filterwarnings("ignore")
class train_patchcore():
def __init__(self,configPath,train_imgs_folder,
resize=None,center_crop=None,
f_coreset=.20,backbone_name="wide_resnet50_2",
TimeStamp=None):
self.configPath=configPath
self.train_imgs_folder=train_imgs_folder
self.resize=resize
self.center_crop=center_crop
self.f_coreset=f_coreset
self.backbone_name=backbone_name
self.TimeStamp=TimeStamp
with open(configPath) as json_file:
self.data = json.load(json_file)
self.model=PatchCore(
f_coreset=f_coreset,
backbone_name=backbone_name,
)
self.train_tar,self.train_path,self.model_path=saveModelPath(self.configPath,self.TimeStamp)
IMAGENET_MEAN = tensor([.485, .456, .406])
IMAGENET_STD = tensor([.229, .224, .225])
transfoms_paras = [
transforms.ToTensor(),
transforms.Normalize(IMAGENET_MEAN, IMAGENET_STD),
]
if resize!=None:
transfoms_paras.append(transforms.Resize(self.resize, interpolation=transforms.InterpolationMode.BICUBIC))
if center_crop!=None:
transfoms_paras.append(transforms.CenterCrop(center_crop))
if self.data!=None:
self.scaling_factor=self.data['scaling_factor']
self.median_blur_size=self.data['smooth']
if self.scaling_factor!=1:
width = int(self.data['original_imgsz'][0]*self.scaling_factor)
height = int(self.data['original_imgsz'][1]*self.scaling_factor)
self.resize=[height,width]
transfoms_paras.append(transforms.Resize(self.resize, interpolation=transforms.InterpolationMode.BICUBIC))
self.loader=transforms.Compose(transfoms_paras)
def genTrainDS(self):
train_ims = []
train_labels = []
for img_id in self.data['train_ids']:
img_path = os.path.join(self.train_imgs_folder, img_id)
train_im = Image.open(img_path).convert('RGB')
if self.median_blur_size!=0:
train_im = train_im.filter(ImageFilter.MedianFilter(size=self.median_blur_size))
print ('Applying median filter on training image with degree of '+ str(self.median_blur_size))
train_im = self.loader(train_im)
train_label = tensor([0])
train_ims.append(train_im.numpy())
train_labels.append(train_label.numpy())
train_ims = numpy.array(train_ims)
train_labels = numpy.array(train_labels)
print ('Training Tensor Shape is' + str(train_ims.shape))
train_ims = torch.from_numpy(train_ims)
train_labels = torch.from_numpy(train_labels)
train_data = TensorDataset(train_ims,train_labels)
train_ds = DataLoader(train_data)
return train_ds
def saveTrainConfig(self):
self.data['configPath'] = self.configPath
self.data['imgsz'] = self.resize
self.data['center_crop'] = self.center_crop
self.data['scaling_factor'] = self.scaling_factor
self.data['train_imgs_folder'] = self.train_imgs_folder
self.data['backbone_name'] = self.backbone_name
self.data['TimeStamp'] = self.TimeStamp
json_string = json.dumps(self.data)
json_filePath = os.path.join(self.model_path,'training_config.json')
with open(json_filePath, 'w') as outfile:
outfile.write(json_string)
def run(self):
train_ds = self.genTrainDS()
tobesaved = self.model.fit(train_ds)
torch.save(tobesaved, self.train_tar)
torch.save(self.model.state_dict(), self.train_path)
self.saveTrainConfig()
|
[
"json.dumps",
"torch.utils.data.TensorDataset",
"torchvision.transforms.Normalize",
"os.path.join",
"torch.utils.data.DataLoader",
"torchvision.transforms.Compose",
"torchvision.transforms.CenterCrop",
"PIL.ImageFilter.MedianFilter",
"torch.from_numpy",
"torchvision.transforms.Resize",
"save_utils.saveModelPath",
"json.load",
"warnings.filterwarnings",
"PIL.Image.open",
"torch.save",
"numpy.array",
"models.PatchCore",
"torch.tensor",
"torchvision.transforms.ToTensor"
] |
[((295, 328), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (318, 328), False, 'import warnings\n'), ((923, 982), 'models.PatchCore', 'PatchCore', ([], {'f_coreset': 'f_coreset', 'backbone_name': 'backbone_name'}), '(f_coreset=f_coreset, backbone_name=backbone_name)\n', (932, 982), False, 'from models import PatchCore\n'), ((1099, 1145), 'save_utils.saveModelPath', 'saveModelPath', (['self.configPath', 'self.TimeStamp'], {}), '(self.configPath, self.TimeStamp)\n', (1112, 1145), False, 'from save_utils import saveModelPath\n'), ((1170, 1199), 'torch.tensor', 'tensor', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (1176, 1199), False, 'from torch import tensor\n'), ((1220, 1249), 'torch.tensor', 'tensor', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (1226, 1249), False, 'from torch import tensor\n'), ((2202, 2237), 'torchvision.transforms.Compose', 'transforms.Compose', (['transfoms_paras'], {}), '(transfoms_paras)\n', (2220, 2237), False, 'from torchvision import transforms\n'), ((2964, 2986), 'numpy.array', 'numpy.array', (['train_ims'], {}), '(train_ims)\n', (2975, 2986), False, 'import numpy\n'), ((3010, 3035), 'numpy.array', 'numpy.array', (['train_labels'], {}), '(train_labels)\n', (3021, 3035), False, 'import numpy\n'), ((3123, 3150), 'torch.from_numpy', 'torch.from_numpy', (['train_ims'], {}), '(train_ims)\n', (3139, 3150), False, 'import torch\n'), ((3174, 3204), 'torch.from_numpy', 'torch.from_numpy', (['train_labels'], {}), '(train_labels)\n', (3190, 3204), False, 'import torch\n'), ((3226, 3264), 'torch.utils.data.TensorDataset', 'TensorDataset', (['train_ims', 'train_labels'], {}), '(train_ims, train_labels)\n', (3239, 3264), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((3283, 3305), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {}), '(train_data)\n', (3293, 3305), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((3755, 3776), 'json.dumps', 'json.dumps', (['self.data'], {}), '(self.data)\n', (3765, 3776), False, 'import json\n'), ((3801, 3854), 'os.path.join', 'os.path.join', (['self.model_path', '"""training_config.json"""'], {}), "(self.model_path, 'training_config.json')\n", (3813, 3854), False, 'import os\n'), ((4057, 4094), 'torch.save', 'torch.save', (['tobesaved', 'self.train_tar'], {}), '(tobesaved, self.train_tar)\n', (4067, 4094), False, 'import torch\n'), ((882, 902), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (891, 902), False, 'import json\n'), ((1299, 1320), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1318, 1320), False, 'from torchvision import transforms\n'), ((1346, 1395), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['IMAGENET_MEAN', 'IMAGENET_STD'], {}), '(IMAGENET_MEAN, IMAGENET_STD)\n', (1366, 1395), False, 'from torchvision import transforms\n'), ((2392, 2436), 'os.path.join', 'os.path.join', (['self.train_imgs_folder', 'img_id'], {}), '(self.train_imgs_folder, img_id)\n', (2404, 2436), False, 'import os\n'), ((2830, 2841), 'torch.tensor', 'tensor', (['[0]'], {}), '([0])\n', (2836, 2841), False, 'from torch import tensor\n'), ((1479, 1566), 'torchvision.transforms.Resize', 'transforms.Resize', (['self.resize'], {'interpolation': 'transforms.InterpolationMode.BICUBIC'}), '(self.resize, interpolation=transforms.InterpolationMode.\n BICUBIC)\n', (1496, 1566), False, 'from torchvision import transforms\n'), ((1628, 1662), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['center_crop'], {}), '(center_crop)\n', (1649, 1662), False, 'from torchvision import transforms\n'), ((2097, 2184), 'torchvision.transforms.Resize', 'transforms.Resize', (['self.resize'], {'interpolation': 'transforms.InterpolationMode.BICUBIC'}), '(self.resize, interpolation=transforms.InterpolationMode.\n BICUBIC)\n', (2114, 2184), False, 'from torchvision import transforms\n'), ((2460, 2480), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (2470, 2480), False, 'from PIL import Image, ImageFilter\n'), ((2593, 2645), 'PIL.ImageFilter.MedianFilter', 'ImageFilter.MedianFilter', ([], {'size': 'self.median_blur_size'}), '(size=self.median_blur_size)\n', (2617, 2645), False, 'from PIL import Image, ImageFilter\n')]
|
import cv2
import numpy as np
from .fs_access import FSAccess
def read_image_file(fname_url):
with FSAccess(fname_url, True) as image_f:
img_buf = image_f.read()
np_arr = np.frombuffer(img_buf, np.uint8)
img = cv2.imdecode(np_arr, 0)
return img
def write_image_file(fname_url, img):
np_arr = np.getbuffer(img)
img_buf = cv2.imencode(os.path.splitext(fname_url)[1], np_arr)
with FSAccess(fname_url, True, read=False) as image_f:
image_f.write(img_buf)
|
[
"numpy.frombuffer",
"cv2.imdecode",
"numpy.getbuffer"
] |
[((330, 347), 'numpy.getbuffer', 'np.getbuffer', (['img'], {}), '(img)\n', (342, 347), True, 'import numpy as np\n'), ((192, 224), 'numpy.frombuffer', 'np.frombuffer', (['img_buf', 'np.uint8'], {}), '(img_buf, np.uint8)\n', (205, 224), True, 'import numpy as np\n'), ((239, 262), 'cv2.imdecode', 'cv2.imdecode', (['np_arr', '(0)'], {}), '(np_arr, 0)\n', (251, 262), False, 'import cv2\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 11 14:03:30 2020
@author: acpotter
"""
#%% -- IMPORTS --
import sys
sys.path.append("..") # import one subdirectory up in files
# external packages
import numpy as np
import qiskit as qk
import networkx as nx
#import tenpy
# custom things
#import mps
#%%
class ParamCircuit(object):
"""
Parameterized circuit
Circuit + parameters
"""
def __init__(self,circ,param_names):
self.circ=circ
self.param_names = param_names
def bind_parameters(self,params):
self.circ.bind_parameters(params)
return self.circ
# def bind_from_array(self,param_vals):
# """
# input: param_vals, np.array of values, must be same length as self.param_names
# """
# params = dict(zip(self.param_names,param_vals))
# return self.bind_from_array(params)
class QKParamCircuit(ParamCircuit):
"""
ParamCircuit implemented with qiskit
"""
def __init__(self,circ,param_names):
self.circ=circ
self.param_names = param_names
self.circuit_format='qiskit'
def bind_parameters(self,params):
cres = self.circ.bind_parameters(params)
return cres
def unitary(self,params):
"""
input: params = dictionary of qiskit circuit parameters
output: returns unitary for circuit
"""
bound_circ = self.bind_parameters(params)
simulator = qk.Aer.get_backend('unitary_simulator')
result = qk.execute(bound_circ,simulator).result()
u = result.get_unitary(bound_circ)
return u
# def bind_from_array(self,params):
# """
# sets named parameters to particular values
# input:
# params: dictionary {parameter name: numerical value}
# output:
# circuit with parameters resolved
# """
# return self.circ.bind_parameters(params)
#%% -- ISOTENSOR CLASS --
class IsoTensor(object):
"""
node of an isometric tensor-network, generated by parameterized cirq unitary
works equally for tensor network state (TNS) or operator (TNO);
for TNS: physical register implicitly assumed to start from reference state: |00..0>
Intention: circuit object intended to be easily adaptable to work equally with cirq, qiskit, etc...
"""
def __init__(self,
name, # label for the tensor
qregs, # listof quantum registers
pcirc, # parameterized circuit object
#param_names, # list of circuit parameter names (str's)
meas_list=[], # list of tuples: (qreg, creg, measurement circuit)
circuit_format:str='qiskit', # string specifying circuit type
thermal = False,
thermal_prob = 0 #the chance of flipping a physical site
):
self.name=name
self.qregs=qregs
self.regdims = [2**len(reg) for reg in qregs]
self.circ= pcirc.circ
self.param_names = pcirc.param_names
# self.param_names=param_names
self.circuit_format=circuit_format
self.meas_list=meas_list
self.p =thermal_prob
self.thermal = thermal
def __str__(self):
return self.name
def __rep__(self):
return self.name
## Resolve Circuit Parameters ##
def resolve_circuit(self,params,include_measurements=True):
"""
resolves parameters in circuit
inputs:
params: dictionary of parameter names and values
include_measurements, bool, whether or not to include measurement and reset
outputs:
resolved circuit
"""
if self.circuit_format == 'qiskit':
cres = self.circ.bind_parameters(params)
if include_measurements:
for qreg,creg,mcirc,cbits in self.meas_list:
cres = cres.combine(mcirc)
cres.add_register(creg)
# add the measurement circuit
cres.measure(qreg,cbits)
cres.reset(qreg)
if self.thermal: #do a pre-measurement circuit to flip a site to |1> with prob. p
pre_cir = qk.QuantumCircuit()
for reg in self.qregs: pre_cir.add_register(reg)
if include_measurements:
for qreg,creg,mcirc,cbits in self.meas_list:
pre_cir.add_register(creg)
cdict = {}
for i in range(len(self.qregs[0])):#need to match register to combine
cdict['c_pre'+str(i)] = qk.ClassicalRegister(1,'c_pre'+str(i))
cres.add_register(cdict['c_pre'+str(i)])
pre_cir.add_register(cdict['c_pre'+str(i)])
pre_cir.rx(2*np.arcsin(np.sqrt(abs(self.p[i]))),self.qregs[0][i])
pre_cir.measure(self.qregs[0][i],cdict['c_pre'+str(i)])
pre_cir.reset(self.qregs[0][i])
pre_cir.x(self.qregs[0][i]).c_if(cdict['c_pre'+str(i)], 1)
cres = pre_cir.combine(cres)
return cres
else:
raise NotImplementedError()
def bind_params(self,params):
"""
inputs:
- params: dictionary {'name':value} for parameters in circuit
outputs:
- circuit with symbolic parameters set to numerical values
"""
if self.circuit_format == 'qiskit':
return self.circ.bind_parameters(params)
else:
raise NotImplementedError()
## Compute unitaries ##
def unitary(self,params):
"""
inputs:
- params: dictionary {'name':value} for parameters in circuit
outputs:
- unitary for circuit, as numpy array with shape regdims (output legs),regdims (input legs)
"""
if self.circuit_format == 'qiskit':
return self.unitary_qiskit(params)
elif self.circuit_format == 'cirq':
return self.unitary_cirq(params)
else:
raise NotImplementedError('only qiskit implemented')
def unitary_qiskit(self,params):
"""
inputs:
- params, dictionary {parameter:value} for parameters in circuit
note: parameter key type depends on type of circuit
for qiskit: parameter keys are qiskit circuit parameters
for cirq: they are sympy symbols
"""
# setup unitary simulator and compute unitary
bound_circ = self.circ.bind_parameters(params)
simulator = qk.Aer.get_backend('unitary_simulator')
result = qk.execute(bound_circ,simulator).result()
u = result.get_unitary(bound_circ)
# need to re-size and re-order to be ampatible with expected indexing
# note: qiskit writes bases in opposite order of usual convention
# e.g. for 3-qubit register: [q0,q1,q2],
# the state 011 refers to: q0=1, q1=1, q2=0
u = u.reshape(self.regdims[::-1]+self.regdims[::-1]) # reshape as tensor
nreg = len(self.qregs)
old_order = list(range(2*nreg))
new_order = old_order.copy()
new_order[0:nreg] = old_order[0:nreg][::-1]
new_order[nreg::] = old_order[nreg::][::-1]
u = np.moveaxis(u,old_order,new_order)
return u
def unitary_cirq(self,params):
""" unitary constructor for cirq-based circuits """
qubit_order = [q for qreg in self.qregs for q in qreg] # order to return the qubit unitary
# resolve the symbolic circuit parameters to numerical values
resolver = cirq.ParamResolver(params)
resolved_circuit = cirq.resolve_parameters(self.circuit, resolver)
u = resolved_circuit.unitary(qubit_order = qubit_order)
return u.reshape(self.regdims) # reshape as a multi-l
#%%
class IsoNetwork(object):
"""
NetworkX directed graph with:
nodes = IsoTensors
edges have list of qubits
To Do:
- add global measurement register names list
- create to_qasm function that traverses the grapha and assembles
together the qasm files for each node, adding the appropriate header
and defining qubits and measurement registers one time in the beginning
"""
def __init__(self,nodes=[],
edges=[],
qregs=[],
circuit_format='qiskit'
):
"""
nodes, list of IsoTensors
edges, list of tuples (output node, input node, list of qubits passed along edge)
qregs, list of qubit registers
(for cirq: each qubit register is list of qubits,
for qiskit, each qreg is a QuantumRegister object)
cregs, list of classical registers
# meas_dict, dictionary of classical registers to
# hold measurement values for each node that gets measured
# keys=MeasurementNode, values = list of tuples:
# (qreg to be measured, creg that stores outcome, circuit to transform qubits to measurement basis)
# note: keys of this define which nodes get measured
param_assignments,
dict with key = node, value = list of parameter objects for that node
for qiskit: parameters are inbuilt circuit parameter
for cirq: parameters are sympy symbols
measurement_nodes, list of IsoTensors that get measured
i.e. have at least one output leg that terminates in a measurement
actual basis for measurement only specified at qasm output/simulator step
"""
self.circuit_format=circuit_format
# construct graph and check that is a DAG
# check for repeated node names
self.graph = nx.DiGraph()
self.graph.add_nodes_from(nodes)
self.graph.add_edges_from(edges)
# check that graph is directed & acyclic (DAG)
if nx.algorithms.dag.is_directed_acyclic_graph(self.graph) != True:
raise RuntimeError('Graph must be directed and acyclic')
# store node information
self.nodes = nodes
self.qregs = qregs
# self.creg_dict = creg_dict
self.node_names = [node.name for node in nodes]
if len(self.node_names) != len(set(self.node_names)):
raise ValueError('Tensor nodes must have unique names')
# store variational parameter info
self.param_assignments = {}
for node in nodes:
self.param_assignments[node]=node.param_names
# self.param_assignments = param_assignments
# topologically sort nodes in order of execution
self.sorted_nodes = [node for node in nx.topological_sort(self.graph)]
## Circuit Construction Methods ##
def construct_circuit(self,param_dict,include_measurements=True):
"""
input:
param_dict, dict of {parameter:value}
output:
circuit
"""
if self.circuit_format=='qiskit':
return self.construct_cirquit_qiskit(param_dict,include_measurements)
else:
raise NotImplementedError
def construct_cirquit_qiskit(self,param_dict,include_measurements=True):
"""
construct circuit for network using qiskit
"""
self.circ = qk.QuantumCircuit()
# add quantum and classical registers
for reg in self.qregs: self.circ.add_register(reg)
#for reg in list(self.creg_dict.values()): self.circ.add_register(reg)
for node in self.sorted_nodes:
node_dict = {k:param_dict[k] for k in self.param_assignments[node]}
node_circ = node.resolve_circuit(node_dict,include_measurements)
self.circ = self.circ.combine(node_circ)
return self.circ
def to_qasm(self,param_dict):
if self.circuit_format=='qiskit':
return self.construct_circuit(param_dict).qasm()
else:
raise NotImplementedError()
#%%
|
[
"sys.path.append",
"numpy.moveaxis",
"qiskit.QuantumCircuit",
"qiskit.execute",
"networkx.topological_sort",
"networkx.algorithms.dag.is_directed_acyclic_graph",
"networkx.DiGraph",
"qiskit.Aer.get_backend"
] |
[((140, 161), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (155, 161), False, 'import sys\n'), ((1513, 1552), 'qiskit.Aer.get_backend', 'qk.Aer.get_backend', (['"""unitary_simulator"""'], {}), "('unitary_simulator')\n", (1531, 1552), True, 'import qiskit as qk\n'), ((6841, 6880), 'qiskit.Aer.get_backend', 'qk.Aer.get_backend', (['"""unitary_simulator"""'], {}), "('unitary_simulator')\n", (6859, 6880), True, 'import qiskit as qk\n'), ((7544, 7580), 'numpy.moveaxis', 'np.moveaxis', (['u', 'old_order', 'new_order'], {}), '(u, old_order, new_order)\n', (7555, 7580), True, 'import numpy as np\n'), ((10105, 10117), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (10115, 10117), True, 'import networkx as nx\n'), ((11725, 11744), 'qiskit.QuantumCircuit', 'qk.QuantumCircuit', ([], {}), '()\n', (11742, 11744), True, 'import qiskit as qk\n'), ((10275, 10330), 'networkx.algorithms.dag.is_directed_acyclic_graph', 'nx.algorithms.dag.is_directed_acyclic_graph', (['self.graph'], {}), '(self.graph)\n', (10318, 10330), True, 'import networkx as nx\n'), ((1570, 1603), 'qiskit.execute', 'qk.execute', (['bound_circ', 'simulator'], {}), '(bound_circ, simulator)\n', (1580, 1603), True, 'import qiskit as qk\n'), ((4412, 4431), 'qiskit.QuantumCircuit', 'qk.QuantumCircuit', ([], {}), '()\n', (4429, 4431), True, 'import qiskit as qk\n'), ((6898, 6931), 'qiskit.execute', 'qk.execute', (['bound_circ', 'simulator'], {}), '(bound_circ, simulator)\n', (6908, 6931), True, 'import qiskit as qk\n'), ((11065, 11096), 'networkx.topological_sort', 'nx.topological_sort', (['self.graph'], {}), '(self.graph)\n', (11084, 11096), True, 'import networkx as nx\n')]
|
"""
Script that trains Tensorflow singletask models on QM7 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import deepchem as dc
import numpy as np
from qm7_datasets import load_qm7b_from_mat
np.random.seed(123)
qm7_tasks, datasets, transformers = load_qm7b_from_mat(split='stratified')
train_dataset, valid_dataset, test_dataset = datasets
fit_transformers = [dc.trans.CoulombFitTransformer(train_dataset)]
regression_metric = [dc.metrics.Metric(dc.metrics.mean_absolute_error, mode="regression"),
dc.metrics.Metric(dc.metrics.pearson_r2_score, mode="regression")]
model = dc.models.TensorflowMultiTaskFitTransformRegressor(
n_tasks=len(qm7_tasks), n_features=[23, 23], learning_rate=0.001 , momentum=.8, batch_size=25,
weight_init_stddevs=[1/np.sqrt(400),1/np.sqrt(100),1/np.sqrt(100)],
bias_init_consts=[0.,0.,0.], layer_sizes=[400,100,100],
dropouts=[0.01,0.01,0.01], fit_transformers=fit_transformers, n_evals=10, seed=123)
# Fit trained model
model.fit(train_dataset, nb_epoch=50)
model.save()
train_scores = model.evaluate(train_dataset, regression_metric, transformers)
print("Train scores [kcal/mol]")
print(train_scores)
valid_scores = model.evaluate(valid_dataset, regression_metric, transformers)
print("Valid scores [kcal/mol]")
print(valid_scores)
test_scores = model.evaluate(test_dataset, regression_metric, transformers)
print("Test scores [kcal/mol]")
print(test_scores)
|
[
"numpy.random.seed",
"deepchem.trans.CoulombFitTransformer",
"qm7_datasets.load_qm7b_from_mat",
"deepchem.metrics.Metric",
"numpy.sqrt"
] |
[((279, 298), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (293, 298), True, 'import numpy as np\n'), ((335, 373), 'qm7_datasets.load_qm7b_from_mat', 'load_qm7b_from_mat', ([], {'split': '"""stratified"""'}), "(split='stratified')\n", (353, 373), False, 'from qm7_datasets import load_qm7b_from_mat\n'), ((448, 493), 'deepchem.trans.CoulombFitTransformer', 'dc.trans.CoulombFitTransformer', (['train_dataset'], {}), '(train_dataset)\n', (478, 493), True, 'import deepchem as dc\n'), ((516, 584), 'deepchem.metrics.Metric', 'dc.metrics.Metric', (['dc.metrics.mean_absolute_error'], {'mode': '"""regression"""'}), "(dc.metrics.mean_absolute_error, mode='regression')\n", (533, 584), True, 'import deepchem as dc\n'), ((601, 666), 'deepchem.metrics.Metric', 'dc.metrics.Metric', (['dc.metrics.pearson_r2_score'], {'mode': '"""regression"""'}), "(dc.metrics.pearson_r2_score, mode='regression')\n", (618, 666), True, 'import deepchem as dc\n'), ((854, 866), 'numpy.sqrt', 'np.sqrt', (['(400)'], {}), '(400)\n', (861, 866), True, 'import numpy as np\n'), ((869, 881), 'numpy.sqrt', 'np.sqrt', (['(100)'], {}), '(100)\n', (876, 881), True, 'import numpy as np\n'), ((884, 896), 'numpy.sqrt', 'np.sqrt', (['(100)'], {}), '(100)\n', (891, 896), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import sys
from PyQt5.QtWidgets import QVBoxLayout,QWidget
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
import random
import numpy as np
class HistogramWidget(QWidget):
def __init__(self, statNames=None, histograms=None, binWidth=None):
super().__init__()
self._statNames = statNames
self._histograms = histograms
self._binWidth = binWidth
self.figure = plt.figure()
self.canvas = FigureCanvas(self.figure)
self.toolbar = NavigationToolbar(self.canvas, self)
layout = QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
self.setLayout(layout)
self.plot()
def plot(self, statNames=None, histograms=None, binWidth=None):
self._statNames = statNames
self._histograms = histograms
self._binWidth = binWidth
self.figure.clear()
if self._statNames is None or self._histograms is None or len(self._histograms) == 0:
return
ax = self.figure.add_subplot(111)
ax.set_title("Histograms")
legend = []
for i in range(len(self._statNames)):
label = self._statNames[i]
values = self._histograms[i]
first_nonzero_index = next((i for i, x in enumerate(values) if x!=0), None)
last_nonzero_index = next((len(values) - idx for idx, item in enumerate(reversed(values), 1) if item), None)
bins = [j for j in range((first_nonzero_index-10000), last_nonzero_index-10000)]
values = values[first_nonzero_index:last_nonzero_index]
# this is for merging bins according to bin width
mergedValues = [sum(values[i:i + self._binWidth]) for i in range(0, len(values), self._binWidth)]
mergedBins = [bins[i]/10 for i in range(0, len(bins), self._binWidth)]
left, right = mergedBins[:-1], mergedBins[1:]
X = np.array([left, right]).T.flatten()
X = np.append(X, mergedBins[len(mergedBins)-1])
Y = np.array([mergedValues, mergedValues]).T.flatten()[:-1]
legend.append(label)
ax.plot(X, Y)
ax.legend(legend)
self.canvas.draw()
|
[
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
"PyQt5.QtWidgets.QVBoxLayout",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT"
] |
[((584, 596), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (594, 596), True, 'import matplotlib.pyplot as plt\n'), ((619, 644), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg', 'FigureCanvas', (['self.figure'], {}), '(self.figure)\n', (631, 644), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n'), ((668, 704), 'matplotlib.backends.backend_qt5agg.NavigationToolbar2QT', 'NavigationToolbar', (['self.canvas', 'self'], {}), '(self.canvas, self)\n', (685, 704), True, 'from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\n'), ((722, 735), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (733, 735), False, 'from PyQt5.QtWidgets import QVBoxLayout, QWidget\n'), ((2109, 2132), 'numpy.array', 'np.array', (['[left, right]'], {}), '([left, right])\n', (2117, 2132), True, 'import numpy as np\n'), ((2221, 2259), 'numpy.array', 'np.array', (['[mergedValues, mergedValues]'], {}), '([mergedValues, mergedValues])\n', (2229, 2259), True, 'import numpy as np\n')]
|
"""Script containing the abstract policy class."""
import numpy as np
import tensorflow as tf
from hbaselines.utils.tf_util import get_trainable_vars
from hbaselines.utils.tf_util import get_target_updates
class ActorCriticPolicy(object):
"""Base Actor Critic Policy.
Attributes
----------
sess : tf.compat.v1.Session
the current TensorFlow session
ob_space : gym.spaces.*
the observation space of the environment
ac_space : gym.spaces.*
the action space of the environment
co_space : gym.spaces.*
the context space of the environment
buffer_size : int
the max number of transitions to store
batch_size : int
SGD batch size
actor_lr : float
actor learning rate
critic_lr : float
critic learning rate
verbose : int
the verbosity level: 0 none, 1 training information, 2 tensorflow debug
tau : float
target update rate
gamma : float
discount factor
layer_norm : bool
enable layer normalisation
layers : list of int or None
the size of the Neural network for the policy
act_fun : tf.nn.*
the activation function to use in the neural network
use_huber : bool
specifies whether to use the huber distance function as the loss for
the critic. If set to False, the mean-squared error metric is used
instead
"""
def __init__(self,
sess,
ob_space,
ac_space,
co_space,
buffer_size,
batch_size,
actor_lr,
critic_lr,
verbose,
tau,
gamma,
layer_norm,
layers,
act_fun,
use_huber):
"""Instantiate the base policy object.
Parameters
----------
sess : tf.compat.v1.Session
the current TensorFlow session
ob_space : gym.spaces.*
the observation space of the environment
ac_space : gym.spaces.*
the action space of the environment
co_space : gym.spaces.*
the context space of the environment
buffer_size : int
the max number of transitions to store
batch_size : int
SGD batch size
actor_lr : float
actor learning rate
critic_lr : float
critic learning rate
verbose : int
the verbosity level: 0 none, 1 training information, 2 tensorflow
debug
tau : float
target update rate
gamma : float
discount factor
layer_norm : bool
enable layer normalisation
layers : list of int or None
the size of the Neural network for the policy
act_fun : tf.nn.*
the activation function to use in the neural network
use_huber : bool
specifies whether to use the huber distance function as the loss
for the critic. If set to False, the mean-squared error metric is
used instead
"""
self.sess = sess
self.ob_space = ob_space
self.ac_space = ac_space
self.co_space = co_space
self.buffer_size = buffer_size
self.batch_size = batch_size
self.actor_lr = actor_lr
self.critic_lr = critic_lr
self.verbose = verbose
self.layers = layers
self.tau = tau
self.gamma = gamma
self.layer_norm = layer_norm
self.act_fun = act_fun
self.use_huber = use_huber
print(locals())
def initialize(self):
"""Initialize the policy.
This is used at the beginning of training by the algorithm, after the
model parameters have been initialized.
"""
raise NotImplementedError
def update(self, update_actor=True, **kwargs):
"""Perform a gradient update step.
Parameters
----------
update_actor : bool
specifies whether to update the actor policy. The critic policy is
still updated if this value is set to False.
Returns
-------
float
critic loss
float
actor loss
"""
raise NotImplementedError
def get_action(self, obs, context, apply_noise, random_actions, env_num=0):
"""Call the actor methods to compute policy actions.
Parameters
----------
obs : array_like
the observation
context : array_like or None
the contextual term. Set to None if no context is provided by the
environment.
apply_noise : bool
whether to add Gaussian noise to the output of the actor. Defaults
to False
random_actions : bool
if set to True, actions are sampled randomly from the action space
instead of being computed by the policy. This is used for
exploration purposes.
env_num : int
the environment number. Used to handle situations when multiple
parallel environments are being used.
Returns
-------
array_like
computed action by the policy
"""
raise NotImplementedError
def store_transition(self, obs0, context0, action, reward, obs1, context1,
done, is_final_step, env_num=0, evaluate=False):
"""Store a transition in the replay buffer.
Parameters
----------
obs0 : array_like
the last observation
context0 : array_like or None
the last contextual term. Set to None if no context is provided by
the environment.
action : array_like
the action
reward : float
the reward
obs1 : array_like
the current observation
context1 : array_like or None
the current contextual term. Set to None if no context is provided
by the environment.
done : float
is the episode done
is_final_step : bool
whether the time horizon was met in the step corresponding to the
current sample. This is used by the TD3 algorithm to augment the
done mask.
env_num : int
the environment number. Used to handle situations when multiple
parallel environments are being used.
evaluate : bool
whether the sample is being provided by the evaluation environment.
If so, the data is not stored in the replay buffer.
"""
raise NotImplementedError
def get_td_map(self):
"""Return dict map for the summary (to be run in the algorithm)."""
raise NotImplementedError
@staticmethod
def _get_obs(obs, context, axis=0):
"""Return the processed observation.
If the contextual term is not None, this will look as follows:
-----------------
processed_obs = | obs | context |
-----------------
Otherwise, this method simply returns the observation.
Parameters
----------
obs : array_like
the original observation
context : array_like or None
the contextual term. Set to None if no context is provided by the
environment.
axis : int
the axis to concatenate the observations and contextual terms by
Returns
-------
array_like
the processed observation
"""
if context is not None and context[0] is not None:
context = context.flatten() if axis == 0 else context
obs = np.concatenate((obs, context), axis=axis)
return obs
@staticmethod
def _get_ob_dim(ob_space, co_space):
"""Return the processed observation dimension.
If the context space is not None, it is included in the computation of
this term.
Parameters
----------
ob_space : gym.spaces.*
the observation space of the environment
co_space : gym.spaces.*
the context space of the environment
Returns
-------
tuple
the true observation dimension
"""
ob_dim = ob_space.shape
if co_space is not None:
ob_dim = tuple(map(sum, zip(ob_dim, co_space.shape)))
return ob_dim
@staticmethod
def _setup_target_updates(model_scope, target_scope, scope, tau, verbose):
"""Create the soft and initial target updates.
The initial model parameters are assumed to be stored under the scope
name "model", while the target policy parameters are assumed to be
under the scope name "target".
If an additional outer scope was provided when creating the policies,
they can be passed under the `scope` parameter.
Parameters
----------
model_scope : str
the scope of the model parameters
target_scope : str
the scope of the target parameters
scope : str or None
the outer scope, set to None if not available
tau : float
target update rate
verbose : int
the verbosity level: 0 none, 1 training information, 2 tensorflow
debug
Returns
-------
tf.Operation
initial target updates, to match the target with the model
tf.Operation
soft target update operations
"""
if scope is not None:
model_scope = scope + '/' + model_scope
target_scope = scope + '/' + target_scope
return get_target_updates(
get_trainable_vars(model_scope),
get_trainable_vars(target_scope),
tau, verbose)
@staticmethod
def _remove_fingerprint(val, ob_dim, fingerprint_dim, additional_dim):
"""Remove the fingerprint from the input.
This is a hacky procedure to remove the fingerprint elements from the
computation. The fingerprint elements are the last few elements of the
observation dimension, before any additional concatenated observations
(e.g. contexts or actions).
Parameters
----------
val : tf.Variable
the original input
ob_dim : int
number of environmental observation elements
fingerprint_dim : int
number of fingerprint elements
additional_dim : int
number of additional elements that were added to the input variable
Returns
-------
tf.Variable
the input with the fingerprints zeroed out
"""
return val * tf.constant([1.0] * (ob_dim - fingerprint_dim) +
[0.0] * fingerprint_dim +
[1.0] * additional_dim)
|
[
"hbaselines.utils.tf_util.get_trainable_vars",
"tensorflow.constant",
"numpy.concatenate"
] |
[((7880, 7921), 'numpy.concatenate', 'np.concatenate', (['(obs, context)'], {'axis': 'axis'}), '((obs, context), axis=axis)\n', (7894, 7921), True, 'import numpy as np\n'), ((9920, 9951), 'hbaselines.utils.tf_util.get_trainable_vars', 'get_trainable_vars', (['model_scope'], {}), '(model_scope)\n', (9938, 9951), False, 'from hbaselines.utils.tf_util import get_trainable_vars\n'), ((9965, 9997), 'hbaselines.utils.tf_util.get_trainable_vars', 'get_trainable_vars', (['target_scope'], {}), '(target_scope)\n', (9983, 9997), False, 'from hbaselines.utils.tf_util import get_trainable_vars\n'), ((10939, 11042), 'tensorflow.constant', 'tf.constant', (['([1.0] * (ob_dim - fingerprint_dim) + [0.0] * fingerprint_dim + [1.0] *\n additional_dim)'], {}), '([1.0] * (ob_dim - fingerprint_dim) + [0.0] * fingerprint_dim + \n [1.0] * additional_dim)\n', (10950, 11042), True, 'import tensorflow as tf\n')]
|
import numpy as np
import opt_prob
import scipy.optimize
# -- problem setup
name = '2.4 GOLDPR'
problem = opt_prob.Cons(name)
def cns(x):
g = -1.0*np.array(problem.cns(x))
return g.tolist()
# -- start optimization
x0 = ((np.array(problem.lb) + np.array(problem.ub)) / 2.0).tolist()
bounds = []
for lb_i, ub_i in zip(problem.lb, problem.ub):
bounds.append((lb_i, ub_i))
ineq_cons = {'type':'ineq', 'fun': cns}
method = 'SLSQP'
options = {'disp': True}
res = scipy.optimize.minimize(problem.obj, x0, method=method, bounds=bounds,
constraints=ineq_cons, options=options)
print(res)
|
[
"numpy.array",
"opt_prob.Cons"
] |
[((108, 127), 'opt_prob.Cons', 'opt_prob.Cons', (['name'], {}), '(name)\n', (121, 127), False, 'import opt_prob\n'), ((234, 254), 'numpy.array', 'np.array', (['problem.lb'], {}), '(problem.lb)\n', (242, 254), True, 'import numpy as np\n'), ((257, 277), 'numpy.array', 'np.array', (['problem.ub'], {}), '(problem.ub)\n', (265, 277), True, 'import numpy as np\n')]
|
"""Initialisation procedures."""
# pylint: disable=import-outside-toplevel
import numpy as np
import scipy.integrate as sci
import probnum.filtsmooth as pnfs
import probnum.statespace as pnss
from probnum import randvars
# In the initialisation-via-RK function below, this value is added to the marginal stds of the initial derivatives that are known.
# If we put in zero, there are linalg errors (because a zero-cov RV is conditioned on a dirac likelihood).
# This value is chosen such that its square-root is a really small damping factor).
SMALL_VALUE = 1e-28
def initialize_odefilter_with_rk(
f, y0, t0, prior, initrv, df=None, h0=1e-2, method="DOP853"
):
r"""Initialize an ODE filter by fitting the prior process to a few steps of an approximate ODE solution computed with Scipy's RK.
It goes as follows:
1. The ODE integration problem is set up on the interval ``[t0, t0 + (2*order+1)*h0]``
and solved with a call to ``scipy.integrate.solve_ivp``. The solver is uses adaptive steps with ``atol=rtol=1e-12``,
but is forced to pass through the
events ``(t0, t0+h0, t0 + 2*h0, ..., t0 + (2*order+1)*h0)``.
The result is a vector of time points and states, with at least ``(2*order+1)``.
Potentially, the adaptive steps selected many more steps, but because of the events, fewer steps cannot have happened.
2. A prescribed prior is fitted to the first ``(2*order+1)`` (t, y) pairs of the solution. ``order`` is the order of the prior.
3. The value of the resulting posterior at time ``t=t0`` is an estimate of the state and all its derivatives.
The resulting marginal standard deviations estimate the error. This random variable is returned.
Parameters
----------
f
ODE vector field.
y0
Initial value.
t0
Initial time point.
prior
Prior distribution used for the ODE solver. For instance an integrated Brownian motion prior (``IBM``).
initrv
Initial random variable.
df
Jacobian of the ODE vector field. Optional. If specified, more components of the result will be exact.
h0
Maximum step-size to use for computing the approximate ODE solution. The smaller, the more accurate, but also, the smaller, the less stable.
The best value here depends on the ODE problem, and probably the chosen method. Optional. Default is ``1e-2``.
method
Which solver to use. This is communicated as a string that is compatible with ``scipy.integrate.solve_ivp(..., method=method)``.
Optional. Default is `DOP853`.
Returns
-------
Normal
Estimated (improved) initial random variable. Compatible with the specified prior.
Examples
--------
>>> from dataclasses import astuple
>>> from probnum.randvars import Normal
>>> from probnum.statespace import IBM
>>> from probnum.problems.zoo.diffeq import vanderpol
Compute the initial values of the van-der-Pol problem as follows
>>> f, t0, tmax, y0, df, *_ = astuple(vanderpol())
>>> print(y0)
[2. 0.]
>>> prior = IBM(ordint=3, spatialdim=2)
>>> initrv = Normal(mean=np.zeros(prior.dimension), cov=np.eye(prior.dimension))
>>> improved_initrv = initialize_odefilter_with_rk(f, y0, t0, prior=prior, initrv=initrv, df=df)
>>> print(prior.proj2coord(0) @ improved_initrv.mean)
[2. 0.]
>>> print(np.round(improved_initrv.mean, 1))
[ 2. 0. -2. 58.2 0. -2. 60. -1745.7]
>>> print(np.round(np.log10(improved_initrv.std), 1))
[-13.8 -11.3 -9. -1.5 -13.8 -11.3 -9. -1.5]
"""
y0 = np.asarray(y0)
ode_dim = y0.shape[0] if y0.ndim > 0 else 1
order = prior.ordint
proj_to_y = prior.proj2coord(0)
zeros_shift = np.zeros(ode_dim)
zeros_cov = np.zeros((ode_dim, ode_dim))
measmod = pnss.DiscreteLTIGaussian(
proj_to_y,
zeros_shift,
zeros_cov,
proc_noise_cov_cholesky=zeros_cov,
forward_implementation="sqrt",
backward_implementation="sqrt",
)
# order + 1 would suffice in theory, 2*order + 1 is for good measure
# (the "+1" is a safety factor for order=1)
num_steps = 2 * order + 1
t_eval = np.arange(t0, t0 + (num_steps + 1) * h0, h0)
sol = sci.solve_ivp(
f,
(t0, t0 + (num_steps + 1) * h0),
y0=y0,
atol=1e-12,
rtol=1e-12,
t_eval=t_eval,
method=method,
)
ts = sol.t[:num_steps]
ys = sol.y[:, :num_steps].T
initmean = initrv.mean.copy()
initmean[0 :: (order + 1)] = y0
initmean[1 :: (order + 1)] = f(t0, y0)
initcov_diag = np.diag(initrv.cov).copy()
initcov_diag[0 :: (order + 1)] = SMALL_VALUE
initcov_diag[1 :: (order + 1)] = SMALL_VALUE
if df is not None:
if order > 1:
initmean[2 :: (order + 1)] = df(t0, y0) @ f(t0, y0)
initcov_diag[2 :: (order + 1)] = SMALL_VALUE
initcov = np.diag(initcov_diag)
initcov_cholesky = np.diag(np.sqrt(initcov_diag))
initrv = randvars.Normal(initmean, initcov, cov_cholesky=initcov_cholesky)
kalman = pnfs.Kalman(prior, measmod, initrv)
out = kalman.filtsmooth(ys, ts)
estimated_initrv = out.state_rvs[0]
return estimated_initrv
def initialize_odefilter_with_taylormode(f, y0, t0, prior, initrv):
"""Initialize an ODE filter with Taylor-mode automatic differentiation.
This requires JAX. For an explanation of what happens ``under the hood``, see [1]_.
References
----------
.. [1] <NAME>. and <NAME>., Stable implementation of probabilistic ODE solvers,
*arXiv:2012.10106*, 2020.
The implementation is inspired by the implementation in
https://github.com/jacobjinkelly/easy-neural-ode/blob/master/latent_ode.py
Parameters
----------
f
ODE vector field.
y0
Initial value.
t0
Initial time point.
prior
Prior distribution used for the ODE solver. For instance an integrated Brownian motion prior (``IBM``).
initrv
Initial random variable.
Returns
-------
Normal
Estimated initial random variable. Compatible with the specified prior.
Examples
--------
>>> import sys, pytest
>>> if sys.platform.startswith('win'):
... pytest.skip('this doctest does not work on Windows')
>>> from dataclasses import astuple
>>> from probnum.randvars import Normal
>>> from probnum.problems.zoo.diffeq import threebody_jax, vanderpol_jax
>>> from probnum.statespace import IBM
Compute the initial values of the restricted three-body problem as follows
>>> f, t0, tmax, y0, df, *_ = astuple(threebody_jax())
>>> print(y0)
[ 0.994 0. 0. -2.00158511]
>>> prior = IBM(ordint=3, spatialdim=4)
>>> initrv = Normal(mean=np.zeros(prior.dimension), cov=np.eye(prior.dimension))
>>> improved_initrv = initialize_odefilter_with_taylormode(f, y0, t0, prior, initrv)
>>> print(prior.proj2coord(0) @ improved_initrv.mean)
[ 0.994 0. 0. -2.00158511]
>>> print(improved_initrv.mean)
[ 9.94000000e-01 0.00000000e+00 -3.15543023e+02 0.00000000e+00
0.00000000e+00 -2.00158511e+00 0.00000000e+00 9.99720945e+04
0.00000000e+00 -3.15543023e+02 0.00000000e+00 6.39028111e+07
-2.00158511e+00 0.00000000e+00 9.99720945e+04 0.00000000e+00]
Compute the initial values of the van-der-Pol oscillator as follows
>>> f, t0, tmax, y0, df, *_ = astuple(vanderpol_jax())
>>> print(y0)
[2. 0.]
>>> prior = IBM(ordint=3, spatialdim=2)
>>> initrv = Normal(mean=np.zeros(prior.dimension), cov=np.eye(prior.dimension))
>>> improved_initrv = initialize_odefilter_with_taylormode(f, y0, t0, prior, initrv)
>>> print(prior.proj2coord(0) @ improved_initrv.mean)
[2. 0.]
>>> print(improved_initrv.mean)
[ 2. 0. -2. 60. 0. -2. 60. -1798.]
>>> print(improved_initrv.std)
[0. 0. 0. 0. 0. 0. 0. 0.]
"""
try:
import jax.numpy as jnp
from jax.config import config
from jax.experimental.jet import jet
config.update("jax_enable_x64", True)
except ImportError as err:
raise ImportError(
"Cannot perform Taylor-mode initialisation without optional "
"dependencies jax and jaxlib. Try installing them via `pip install jax jaxlib`."
) from err
order = prior.ordint
def total_derivative(z_t):
"""Total derivative."""
z, t = jnp.reshape(z_t[:-1], z_shape), z_t[-1]
dz = jnp.ravel(f(t, z))
dt = jnp.array([1.0])
dz_t = jnp.concatenate((dz, dt))
return dz_t
z_shape = y0.shape
z_t = jnp.concatenate((jnp.ravel(y0), jnp.array([t0])))
derivs = []
derivs.extend(y0)
if order == 0:
all_derivs = pnss.Integrator._convert_derivwise_to_coordwise(
np.asarray(jnp.array(derivs)), ordint=0, spatialdim=len(y0)
)
return randvars.Normal(
np.asarray(all_derivs),
cov=np.asarray(jnp.diag(jnp.zeros(len(derivs)))),
cov_cholesky=np.asarray(jnp.diag(jnp.zeros(len(derivs)))),
)
(dy0, [*yns]) = jet(total_derivative, (z_t,), ((jnp.ones_like(z_t),),))
derivs.extend(dy0[:-1])
if order == 1:
all_derivs = pnss.Integrator._convert_derivwise_to_coordwise(
np.asarray(jnp.array(derivs)), ordint=1, spatialdim=len(y0)
)
return randvars.Normal(
np.asarray(all_derivs),
cov=np.asarray(jnp.diag(jnp.zeros(len(derivs)))),
cov_cholesky=np.asarray(jnp.diag(jnp.zeros(len(derivs)))),
)
for _ in range(1, order):
(dy0, [*yns]) = jet(total_derivative, (z_t,), ((dy0, *yns),))
derivs.extend(yns[-2][:-1])
all_derivs = pnss.Integrator._convert_derivwise_to_coordwise(
jnp.array(derivs), ordint=order, spatialdim=len(y0)
)
return randvars.Normal(
np.asarray(all_derivs),
cov=np.asarray(jnp.diag(jnp.zeros(len(derivs)))),
cov_cholesky=np.asarray(jnp.diag(jnp.zeros(len(derivs)))),
)
|
[
"jax.config.config.update",
"jax.numpy.array",
"jax.numpy.ones_like",
"jax.numpy.reshape",
"jax.experimental.jet.jet",
"probnum.randvars.Normal",
"jax.numpy.concatenate",
"numpy.asarray",
"scipy.integrate.solve_ivp",
"numpy.zeros",
"numpy.arange",
"jax.numpy.ravel",
"numpy.diag",
"probnum.statespace.DiscreteLTIGaussian",
"probnum.filtsmooth.Kalman",
"numpy.sqrt"
] |
[((3620, 3634), 'numpy.asarray', 'np.asarray', (['y0'], {}), '(y0)\n', (3630, 3634), True, 'import numpy as np\n'), ((3762, 3779), 'numpy.zeros', 'np.zeros', (['ode_dim'], {}), '(ode_dim)\n', (3770, 3779), True, 'import numpy as np\n'), ((3796, 3824), 'numpy.zeros', 'np.zeros', (['(ode_dim, ode_dim)'], {}), '((ode_dim, ode_dim))\n', (3804, 3824), True, 'import numpy as np\n'), ((3839, 4004), 'probnum.statespace.DiscreteLTIGaussian', 'pnss.DiscreteLTIGaussian', (['proj_to_y', 'zeros_shift', 'zeros_cov'], {'proc_noise_cov_cholesky': 'zeros_cov', 'forward_implementation': '"""sqrt"""', 'backward_implementation': '"""sqrt"""'}), "(proj_to_y, zeros_shift, zeros_cov,\n proc_noise_cov_cholesky=zeros_cov, forward_implementation='sqrt',\n backward_implementation='sqrt')\n", (3863, 4004), True, 'import probnum.statespace as pnss\n'), ((4217, 4261), 'numpy.arange', 'np.arange', (['t0', '(t0 + (num_steps + 1) * h0)', 'h0'], {}), '(t0, t0 + (num_steps + 1) * h0, h0)\n', (4226, 4261), True, 'import numpy as np\n'), ((4272, 4387), 'scipy.integrate.solve_ivp', 'sci.solve_ivp', (['f', '(t0, t0 + (num_steps + 1) * h0)'], {'y0': 'y0', 'atol': '(1e-12)', 'rtol': '(1e-12)', 't_eval': 't_eval', 'method': 'method'}), '(f, (t0, t0 + (num_steps + 1) * h0), y0=y0, atol=1e-12, rtol=\n 1e-12, t_eval=t_eval, method=method)\n', (4285, 4387), True, 'import scipy.integrate as sci\n'), ((4947, 4968), 'numpy.diag', 'np.diag', (['initcov_diag'], {}), '(initcov_diag)\n', (4954, 4968), True, 'import numpy as np\n'), ((5036, 5101), 'probnum.randvars.Normal', 'randvars.Normal', (['initmean', 'initcov'], {'cov_cholesky': 'initcov_cholesky'}), '(initmean, initcov, cov_cholesky=initcov_cholesky)\n', (5051, 5101), False, 'from probnum import randvars\n'), ((5115, 5150), 'probnum.filtsmooth.Kalman', 'pnfs.Kalman', (['prior', 'measmod', 'initrv'], {}), '(prior, measmod, initrv)\n', (5126, 5150), True, 'import probnum.filtsmooth as pnfs\n'), ((5000, 5021), 'numpy.sqrt', 'np.sqrt', (['initcov_diag'], {}), '(initcov_diag)\n', (5007, 5021), True, 'import numpy as np\n'), ((8173, 8210), 'jax.config.config.update', 'config.update', (['"""jax_enable_x64"""', '(True)'], {}), "('jax_enable_x64', True)\n", (8186, 8210), False, 'from jax.config import config\n'), ((8645, 8661), 'jax.numpy.array', 'jnp.array', (['[1.0]'], {}), '([1.0])\n', (8654, 8661), True, 'import jax.numpy as jnp\n'), ((8677, 8702), 'jax.numpy.concatenate', 'jnp.concatenate', (['(dz, dt)'], {}), '((dz, dt))\n', (8692, 8702), True, 'import jax.numpy as jnp\n'), ((9773, 9818), 'jax.experimental.jet.jet', 'jet', (['total_derivative', '(z_t,)', '((dy0, *yns),)'], {}), '(total_derivative, (z_t,), ((dy0, *yns),))\n', (9776, 9818), False, 'from jax.experimental.jet import jet\n'), ((9930, 9947), 'jax.numpy.array', 'jnp.array', (['derivs'], {}), '(derivs)\n', (9939, 9947), True, 'import jax.numpy as jnp\n'), ((10025, 10047), 'numpy.asarray', 'np.asarray', (['all_derivs'], {}), '(all_derivs)\n', (10035, 10047), True, 'import numpy as np\n'), ((4640, 4659), 'numpy.diag', 'np.diag', (['initrv.cov'], {}), '(initrv.cov)\n', (4647, 4659), True, 'import numpy as np\n'), ((8560, 8590), 'jax.numpy.reshape', 'jnp.reshape', (['z_t[:-1]', 'z_shape'], {}), '(z_t[:-1], z_shape)\n', (8571, 8590), True, 'import jax.numpy as jnp\n'), ((8774, 8787), 'jax.numpy.ravel', 'jnp.ravel', (['y0'], {}), '(y0)\n', (8783, 8787), True, 'import jax.numpy as jnp\n'), ((8789, 8804), 'jax.numpy.array', 'jnp.array', (['[t0]'], {}), '([t0])\n', (8798, 8804), True, 'import jax.numpy as jnp\n'), ((9063, 9085), 'numpy.asarray', 'np.asarray', (['all_derivs'], {}), '(all_derivs)\n', (9073, 9085), True, 'import numpy as np\n'), ((9551, 9573), 'numpy.asarray', 'np.asarray', (['all_derivs'], {}), '(all_derivs)\n', (9561, 9573), True, 'import numpy as np\n'), ((8959, 8976), 'jax.numpy.array', 'jnp.array', (['derivs'], {}), '(derivs)\n', (8968, 8976), True, 'import jax.numpy as jnp\n'), ((9283, 9301), 'jax.numpy.ones_like', 'jnp.ones_like', (['z_t'], {}), '(z_t)\n', (9296, 9301), True, 'import jax.numpy as jnp\n'), ((9447, 9464), 'jax.numpy.array', 'jnp.array', (['derivs'], {}), '(derivs)\n', (9456, 9464), True, 'import jax.numpy as jnp\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 16 12:05:08 2018
@author: Alexandre
"""
###############################################################################
import numpy as np
###############################################################################
from pyro.dynamic import pendulum
from pyro.control import nonlinear
from pyro.control import robotcontrollers
from pyro.planning import plan
from pyro.analysis import simulation
###############################################################################
sys = pendulum.SinglePendulum()
###############################################################################
# Planning
traj = plan.load_trajectory('rrt.npy')
q_goal = np.array([-3.14])
###############################################################################
# P
kp = 5
kd = 0
ki = 0
p_ctl = robotcontrollers.JointPID( 1 , kp , ki, kd)
p_ctl.rbar = q_goal
# PD
kp = 5
kd = 2
ki = 0
pd_ctl = robotcontrollers.JointPID( 1 , kp , ki, kd)
pd_ctl.rbar = q_goal
# PID
kp = 5
kd = 2
ki = 1
pid_ctl = robotcontrollers.JointPID( 1 , kp , ki, kd)
pid_ctl.rbar = q_goal
# Computed Torque
ctc_ctl = nonlinear.ComputedTorqueController( sys )
ctc_ctl.rbar = q_goal
ctc_ctl.w0 = 2.0
ctc_ctl.zeta = 0.8
# Sliding Mode
sld_ctl = nonlinear.SlidingModeController( sys )
sld_ctl.lam = 1
sld_ctl.gain = 5
sld_ctl.rbar = q_goal
# OpenLoop with traj
traj_ctl = plan.OpenLoopController( traj )
# Computed Torque with traj
traj_ctc_ctl = nonlinear.ComputedTorqueController( sys , traj )
traj_ctc_ctl.rbar = q_goal
traj_ctc_ctl.w0 = 2.0
traj_ctc_ctl.zeta = 0.8
# Sliding Mode with traj
traj_sld_ctl = nonlinear.SlidingModeController( sys , traj )
traj_sld_ctl.lam = 1
traj_sld_ctl.gain = 5
traj_sld_ctl.rbar = q_goal
###############################################################################
# Controller selection
#ctl = p_ctl
#ctl = pd_ctl
#ctl = pid_ctl
#ctl = ctc_ctl
#ctl = sld_ctl
#ctl = traj_ctl
#ctl = traj_ctc_ctl
ctl = traj_sld_ctl
###############################################################################
# New cl-dynamic
cl_sys = ctl + sys
# Simultation
q0 = 0
tf = 10
cl_sys.sim = simulation.CLosedLoopSimulation( cl_sys , tf , tf * 1000 + 1 , 'euler' )
cl_sys.sim.x0 = np.array([q0,0])
cl_sys.sim.compute()
cl_sys.sim.plot('xu')
cl_sys.animate_simulation()
cl_sys.sim.phase_plane_trajectory(0,1)
|
[
"pyro.control.nonlinear.ComputedTorqueController",
"pyro.control.nonlinear.SlidingModeController",
"pyro.analysis.simulation.CLosedLoopSimulation",
"pyro.control.robotcontrollers.JointPID",
"pyro.dynamic.pendulum.SinglePendulum",
"pyro.planning.plan.load_trajectory",
"numpy.array",
"pyro.planning.plan.OpenLoopController"
] |
[((537, 562), 'pyro.dynamic.pendulum.SinglePendulum', 'pendulum.SinglePendulum', ([], {}), '()\n', (560, 562), False, 'from pyro.dynamic import pendulum\n'), ((666, 697), 'pyro.planning.plan.load_trajectory', 'plan.load_trajectory', (['"""rrt.npy"""'], {}), "('rrt.npy')\n", (686, 697), False, 'from pyro.planning import plan\n'), ((707, 724), 'numpy.array', 'np.array', (['[-3.14]'], {}), '([-3.14])\n', (715, 724), True, 'import numpy as np\n'), ((845, 885), 'pyro.control.robotcontrollers.JointPID', 'robotcontrollers.JointPID', (['(1)', 'kp', 'ki', 'kd'], {}), '(1, kp, ki, kd)\n', (870, 885), False, 'from pyro.control import robotcontrollers\n'), ((950, 990), 'pyro.control.robotcontrollers.JointPID', 'robotcontrollers.JointPID', (['(1)', 'kp', 'ki', 'kd'], {}), '(1, kp, ki, kd)\n', (975, 990), False, 'from pyro.control import robotcontrollers\n'), ((1058, 1098), 'pyro.control.robotcontrollers.JointPID', 'robotcontrollers.JointPID', (['(1)', 'kp', 'ki', 'kd'], {}), '(1, kp, ki, kd)\n', (1083, 1098), False, 'from pyro.control import robotcontrollers\n'), ((1158, 1197), 'pyro.control.nonlinear.ComputedTorqueController', 'nonlinear.ComputedTorqueController', (['sys'], {}), '(sys)\n', (1192, 1197), False, 'from pyro.control import nonlinear\n'), ((1293, 1329), 'pyro.control.nonlinear.SlidingModeController', 'nonlinear.SlidingModeController', (['sys'], {}), '(sys)\n', (1324, 1329), False, 'from pyro.control import nonlinear\n'), ((1421, 1450), 'pyro.planning.plan.OpenLoopController', 'plan.OpenLoopController', (['traj'], {}), '(traj)\n', (1444, 1450), False, 'from pyro.planning import plan\n'), ((1502, 1547), 'pyro.control.nonlinear.ComputedTorqueController', 'nonlinear.ComputedTorqueController', (['sys', 'traj'], {}), '(sys, traj)\n', (1536, 1547), False, 'from pyro.control import nonlinear\n'), ((1673, 1715), 'pyro.control.nonlinear.SlidingModeController', 'nonlinear.SlidingModeController', (['sys', 'traj'], {}), '(sys, traj)\n', (1704, 1715), False, 'from pyro.control import nonlinear\n'), ((2186, 2253), 'pyro.analysis.simulation.CLosedLoopSimulation', 'simulation.CLosedLoopSimulation', (['cl_sys', 'tf', '(tf * 1000 + 1)', '"""euler"""'], {}), "(cl_sys, tf, tf * 1000 + 1, 'euler')\n", (2217, 2253), False, 'from pyro.analysis import simulation\n'), ((2275, 2292), 'numpy.array', 'np.array', (['[q0, 0]'], {}), '([q0, 0])\n', (2283, 2292), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import paddle.v2 as paddle
import gzip
import sys
import data_provider
import numpy as np
def param():
return paddle.attr.Param(
initial_std=0.01,
initial_mean=0
)
def encoder(x_):
x_ = paddle.layer.fc(
input=x_,
size=512,
act=paddle.activation.Sigmoid(),
param_attr=param(),
bias_attr=param()
)
x_ = paddle.layer.fc(
input=x_,
size=256,
act=paddle.activation.Relu(),
param_attr=param(),
bias_attr=param()
)
x_ = paddle.layer.fc(
input=x_,
size=128,
act=paddle.activation.Relu(),
param_attr=param(),
bias_attr=param()
)
return x_
def decoder(x_):
x_ = paddle.layer.fc(
input=x_,
size=128,
act=paddle.activation.Sigmoid(),
param_attr=param(),
bias_attr=param()
)
x_ = paddle.layer.fc(
input=x_,
size=256,
act=paddle.activation.Relu(),
param_attr=param(),
bias_attr=param()
)
x_ = paddle.layer.fc(
input=x_,
size=512,
act=paddle.activation.Relu(),
param_attr=param(),
bias_attr=param()
)
return x_
def output(x_):
return paddle.layer.fc(
input=x_,
size=784,
act=paddle.activation.Relu(),
param_attr=param(),
bias_attr=param()
)
paddle.init(use_gpu=False, trainer_count=1)
x = paddle.layer.data(
name='x',
type=paddle.data_type.dense_vector(784)
)
y = encoder(x)
y = decoder(y)
y = output(y)
def train():
optimizer = paddle.optimizer.RMSProp(
learning_rate=1e-3,
regularization=paddle.optimizer.L2Regularization(rate=8e-4)
)
loss = paddle.layer.mse_cost(label=x, input=y)
parameters = paddle.parameters.create(loss)
trainer = paddle.trainer.SGD(
cost=loss,
parameters=parameters,
update_equation=optimizer
)
feeding = {'x': 0}
def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 50 == 0:
print ("\n pass %d, Batch: %d cost: %f"
% (event.pass_id, event.batch_id, event.cost))
else:
sys.stdout.write('.')
sys.stdout.flush()
if isinstance(event, paddle.event.EndPass):
with gzip.open('output/params_pass_%d.tar.gz' % event.pass_id, 'w') as f:
parameters.to_tar(f)
reader = data_provider.create_reader('train', 60000)
trainer.train(
paddle.batch(
reader=reader,
batch_size=128
),
feeding=feeding,
num_passes=20,
event_handler=event_handler
)
def test(model_path):
with gzip.open(model_path, 'r') as openFile:
parameters = paddle.parameters.Parameters.from_tar(openFile)
testset = [[x] for x in data_provider.fetch_testingset()['images'][:10]]
# 使用infer进行预测
result = paddle.infer(
input=testset,
parameters=parameters,
output_layer=y,
feeding={'x': 0}
)
return result, np.array(testset)
if __name__ == '__main__':
origin, result = test('output/params_pass_19.tar.gz')
np.save('origin.dat', origin)
np.save('result.dat', result)
|
[
"sys.stdout.write",
"paddle.v2.layer.mse_cost",
"sys.stdout.flush",
"data_provider.create_reader",
"paddle.v2.activation.Sigmoid",
"data_provider.fetch_testingset",
"paddle.v2.activation.Relu",
"paddle.v2.parameters.create",
"paddle.v2.init",
"numpy.save",
"paddle.v2.attr.Param",
"paddle.v2.data_type.dense_vector",
"paddle.v2.infer",
"paddle.v2.batch",
"paddle.v2.optimizer.L2Regularization",
"gzip.open",
"paddle.v2.trainer.SGD",
"numpy.array",
"paddle.v2.parameters.Parameters.from_tar"
] |
[((2494, 2537), 'paddle.v2.init', 'paddle.init', ([], {'use_gpu': '(False)', 'trainer_count': '(1)'}), '(use_gpu=False, trainer_count=1)\n', (2505, 2537), True, 'import paddle.v2 as paddle\n'), ((1207, 1258), 'paddle.v2.attr.Param', 'paddle.attr.Param', ([], {'initial_std': '(0.01)', 'initial_mean': '(0)'}), '(initial_std=0.01, initial_mean=0)\n', (1224, 1258), True, 'import paddle.v2 as paddle\n'), ((2838, 2877), 'paddle.v2.layer.mse_cost', 'paddle.layer.mse_cost', ([], {'label': 'x', 'input': 'y'}), '(label=x, input=y)\n', (2859, 2877), True, 'import paddle.v2 as paddle\n'), ((2896, 2926), 'paddle.v2.parameters.create', 'paddle.parameters.create', (['loss'], {}), '(loss)\n', (2920, 2926), True, 'import paddle.v2 as paddle\n'), ((2942, 3021), 'paddle.v2.trainer.SGD', 'paddle.trainer.SGD', ([], {'cost': 'loss', 'parameters': 'parameters', 'update_equation': 'optimizer'}), '(cost=loss, parameters=parameters, update_equation=optimizer)\n', (2960, 3021), True, 'import paddle.v2 as paddle\n'), ((3611, 3654), 'data_provider.create_reader', 'data_provider.create_reader', (['"""train"""', '(60000)'], {}), "('train', 60000)\n", (3638, 3654), False, 'import data_provider\n'), ((4101, 4190), 'paddle.v2.infer', 'paddle.infer', ([], {'input': 'testset', 'parameters': 'parameters', 'output_layer': 'y', 'feeding': "{'x': 0}"}), "(input=testset, parameters=parameters, output_layer=y, feeding=\n {'x': 0})\n", (4113, 4190), True, 'import paddle.v2 as paddle\n'), ((4352, 4381), 'numpy.save', 'np.save', (['"""origin.dat"""', 'origin'], {}), "('origin.dat', origin)\n", (4359, 4381), True, 'import numpy as np\n'), ((4386, 4415), 'numpy.save', 'np.save', (['"""result.dat"""', 'result'], {}), "('result.dat', result)\n", (4393, 4415), True, 'import numpy as np\n'), ((2584, 2618), 'paddle.v2.data_type.dense_vector', 'paddle.data_type.dense_vector', (['(784)'], {}), '(784)\n', (2613, 2618), True, 'import paddle.v2 as paddle\n'), ((3682, 3725), 'paddle.v2.batch', 'paddle.batch', ([], {'reader': 'reader', 'batch_size': '(128)'}), '(reader=reader, batch_size=128)\n', (3694, 3725), True, 'import paddle.v2 as paddle\n'), ((3884, 3910), 'gzip.open', 'gzip.open', (['model_path', '"""r"""'], {}), "(model_path, 'r')\n", (3893, 3910), False, 'import gzip\n'), ((3945, 3992), 'paddle.v2.parameters.Parameters.from_tar', 'paddle.parameters.Parameters.from_tar', (['openFile'], {}), '(openFile)\n', (3982, 3992), True, 'import paddle.v2 as paddle\n'), ((4243, 4260), 'numpy.array', 'np.array', (['testset'], {}), '(testset)\n', (4251, 4260), True, 'import numpy as np\n'), ((1374, 1401), 'paddle.v2.activation.Sigmoid', 'paddle.activation.Sigmoid', ([], {}), '()\n', (1399, 1401), True, 'import paddle.v2 as paddle\n'), ((1537, 1561), 'paddle.v2.activation.Relu', 'paddle.activation.Relu', ([], {}), '()\n', (1559, 1561), True, 'import paddle.v2 as paddle\n'), ((1697, 1721), 'paddle.v2.activation.Relu', 'paddle.activation.Relu', ([], {}), '()\n', (1719, 1721), True, 'import paddle.v2 as paddle\n'), ((1890, 1917), 'paddle.v2.activation.Sigmoid', 'paddle.activation.Sigmoid', ([], {}), '()\n', (1915, 1917), True, 'import paddle.v2 as paddle\n'), ((2053, 2077), 'paddle.v2.activation.Relu', 'paddle.activation.Relu', ([], {}), '()\n', (2075, 2077), True, 'import paddle.v2 as paddle\n'), ((2213, 2237), 'paddle.v2.activation.Relu', 'paddle.activation.Relu', ([], {}), '()\n', (2235, 2237), True, 'import paddle.v2 as paddle\n'), ((2407, 2431), 'paddle.v2.activation.Relu', 'paddle.activation.Relu', ([], {}), '()\n', (2429, 2431), True, 'import paddle.v2 as paddle\n'), ((2775, 2821), 'paddle.v2.optimizer.L2Regularization', 'paddle.optimizer.L2Regularization', ([], {'rate': '(0.0008)'}), '(rate=0.0008)\n', (2808, 2821), True, 'import paddle.v2 as paddle\n'), ((3365, 3386), 'sys.stdout.write', 'sys.stdout.write', (['"""."""'], {}), "('.')\n", (3381, 3386), False, 'import sys\n'), ((3403, 3421), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3419, 3421), False, 'import sys\n'), ((3491, 3553), 'gzip.open', 'gzip.open', (["('output/params_pass_%d.tar.gz' % event.pass_id)", '"""w"""'], {}), "('output/params_pass_%d.tar.gz' % event.pass_id, 'w')\n", (3500, 3553), False, 'import gzip\n'), ((4021, 4053), 'data_provider.fetch_testingset', 'data_provider.fetch_testingset', ([], {}), '()\n', (4051, 4053), False, 'import data_provider\n')]
|
# -*- coding: utf-8 -*-
import os
import sys
import numpy as np
IMAGE_SIZE = 64
#按照指定图像大小调整尺寸
def resize_image(image, height = IMAGE_SIZE, width = IMAGE_SIZE):
top, bottom, left, right = (0, 0, 0, 0)
#获取图像尺寸
h, w, _ = image.shape
#对于长宽不相等的图片,找到最长的一边
longest_edge = max(h, w)
#计算短边需要增加多上像素宽度使其与长边等长
if h < longest_edge:
dh = longest_edge - h
top = dh // 2
bottom = dh - top
elif w < longest_edge:
dw = longest_edge - w
left = dw // 2
right = dw - left
else:
pass
BLACK = [0, 0, 0]
#给图像增加边界,是图片长、宽等长,cv2.BORDER_CONSTANT指定边界颜色由value指定
constant = cv2.copyMakeBorder(image, top , bottom, left, right, cv2.BORDER_CONSTANT, value = BLACK)
#调整图像大小并返回
return cv2.resize(constant, (height, width))
#读取训练数据
images = []
labels = []
def read_images(path_name):
for dir_item in os.listdir(path_name):
full_path = os.path.abspath(os.path.join(path_name, dir_item))
if os.path.isdir(full_path):
read_images(full_path)
else:
if dir_item.endswith('.jpg'):
print(full_path)
image = cv2.imread(full_path)
image = resize_image(image, IMAGE_SIZE, IMAGE_SIZE)
images.append(image)
labels.append(path_name)
return images,labels
#从指定路径读取训练数据
def load_dataset(path_name):
images,labels = read_images(path_name)
#将输入的所有图片转成四维数组,尺寸为(图片数量*IMAGE_SIZE*IMAGE_SIZE*3)
#图片为64 * 64像素,一个像素3个颜色值(RGB)
images = np.array(images)
labels = np.array([0 if label.endswith('yangwk') else 1 for label in labels])
return images, labels
if __name__ == '__main__':
path_name = './data/'
images, labels = load_dataset(path_name)
print(images.shape)
print(labels.shape)
|
[
"os.path.isdir",
"numpy.array",
"os.listdir",
"os.path.join"
] |
[((798, 819), 'os.listdir', 'os.listdir', (['path_name'], {}), '(path_name)\n', (808, 819), False, 'import os\n'), ((1350, 1366), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (1358, 1366), True, 'import numpy as np\n'), ((891, 915), 'os.path.isdir', 'os.path.isdir', (['full_path'], {}), '(full_path)\n', (904, 915), False, 'import os\n'), ((851, 884), 'os.path.join', 'os.path.join', (['path_name', 'dir_item'], {}), '(path_name, dir_item)\n', (863, 884), False, 'import os\n')]
|
"""
Regularizer class for that also supports GPU code
<NAME> <EMAIL>
<NAME> <EMAIL>
March 04, 2018
"""
import arrayfire as af
import numpy as np
from opticaltomography import settings
np_complex_datatype = settings.np_complex_datatype
np_float_datatype = settings.np_float_datatype
af_float_datatype = settings.af_float_datatype
af_complex_datatype = settings.af_complex_datatype
class Regularizer:
"""
Highest-level Regularizer class that is responsible for parsing user arguments to create proximal operators
All proximal operators operate on complex variables (real & imaginary part separately)
Pure Real:
pure_real: boolean, whether or not to enforce object to be purely real
Pure imaginary:
pure_imag: boolean, whether or not to enforce object to be purely imaginary
Positivity:
positivity_real(positivity_imag): boolean, whether or not to enforce positivity for real(imaginary) part
Negativity:
negativity_real(negativity_imag): boolean, whether or not to enforce negativity for real(imaginary) part
LASSO (L1 regularizer):
lasso: boolean, whether or not to use LASSO proximal operator
lasso_parameter: threshold for LASSO
Total variation (3D only):
total_variation: boolean, whether or not to use total variation regularization
total_variation_gpu: boolean, whether or not to use GPU implementation
total_variation_parameter: scalar, regularization parameter (lambda)
total_variation_maxitr: integer, number of each iteration for total variation
"""
def __init__(self, configs = None, verbose = True, **kwargs):
#Given all parameters, construct all proximal operators
self.prox_list = []
reg_params = kwargs
if configs != None:
reg_params = self._parseConfigs(configs)
#Purely real
if reg_params.get("pure_real", False):
self.prox_list.append(PureReal())
#Purely imaginary
if reg_params.get("pure_imag", False):
self.prox_list.append(Pureimag())
#Total Variation
if reg_params.get("total_variation", False):
if reg_params.get("total_variation_gpu", False):
self.prox_list.append(TotalVariationGPU(**reg_params))
else:
self.prox_list.append(TotalVariationCPU(**reg_params))
#L1 Regularizer (LASSO)
elif reg_params.get("lasso", False):
self.prox_list.append(Lasso(reg_params.get("lasso_parameter", 1.0)))
#Others
else:
#Positivity
positivity_real = reg_params.get("positivity_real", False)
positivity_imag = reg_params.get("positivity_imag", False)
if positivity_real or positivity_imag:
self.prox_list.append(Positivity(positivity_real, positivity_imag))
#Negativity
negativity_real = reg_params.get("negativity_real", False)
negativity_imag = reg_params.get("negativity_imag", False)
if negativity_real or negativity_imag:
self.prox_list.append(Negativity(negativity_real, negativity_imag))
if verbose:
for prox_op in self.prox_list:
print("Regularizer -", prox_op.proximal_name)
def _parseConfigs(self, configs):
params = {}
params["pure_real"] = configs.pure_real
params["pure_imag"] = configs.pure_imag
#Total variation
params["total_variation"] = configs.total_variation
params["total_variation_gpu"] = configs.total_variation_gpu
params["total_variation_maxitr"] = configs.max_iter_tv
params["total_variation_order"] = configs.order_tv
params["total_variation_parameter"] = configs.reg_tv
#LASSO
params["lasso"] = configs.lasso
params["lasso_parameter"] = configs.reg_lasso
#Positivity/Negativity
if configs.positivity_real[0]:
if configs.positivity_real[1] == "larger":
params["positivity_real"] = True
else:
params["negativity_real"] = True
if configs.positivity_imag[0]:
if configs.positivity_imag[1] == "larger":
params["positivity_imag"] = True
else:
params["negativity_imag"] = True
return params
def computeCost(self, x):
cost = 0.0
for prox_op in self.prox_list:
cost_temp = prox_op.computeCost(x)
if cost_temp != None:
cost += cost_temp
return cost
def applyRegularizer(self, x):
for prox_op in self.prox_list:
x = prox_op.computeProx(x)
return x
class ProximalOperator():
def __init__(self, proximal_name):
self.proximal_name = proximal_name
def computeCost(self):
pass
def computeProx(self):
pass
def setParameter(self):
pass
def _boundRealValue(self, x, value = 0, flag_project = True):
"""If flag is true, only values that are greater than 'value' are preserved"""
if flag_project:
x[x < value] = 0
return x
class TotalVariationGPU(ProximalOperator):
def __init__(self, **kwargs):
proximal_name = "Total Variation"
parameter = kwargs.get("total_variation_parameter", 1.0)
maxitr = kwargs.get("total_variation_maxitr", 15)
self.order = kwargs.get("total_variation_order", 1)
self.pure_real = kwargs.get("pure_real", False)
self.pure_imag = kwargs.get("pure_imag", False)
#real part
if kwargs.get("positivity_real", False):
self.realProjector = lambda x: self._boundRealValue(x, 0, True)
proximal_name = "%s+%s" % (proximal_name, "positivity_real")
elif kwargs.get("negativity_real", False):
self.realProjector = lambda x: -1.0 * self._boundRealValue(-1.0 * x, 0, True)
proximal_name = "%s+%s" % (proximal_name, "negativity_real")
else:
self.realProjector = lambda x: x
#imaginary part
if kwargs.get("positivity_imag", False):
self.imagProjector = lambda x: self._boundRealValue(x, 0, True)
proximal_name = "%s+%s" % (proximal_name, "positivity_imag")
elif kwargs.get("negativity_imag", False):
self.imagProjector = lambda x: -1.0 * self._boundRealValue(-1.0 * x, 0, True)
proximal_name = "%s+%s" % (proximal_name, "negativity_imag")
else:
self.imagProjector = lambda x: x
self.setParameter(parameter, maxitr)
super().__init__(proximal_name)
def setParameter(self, parameter, maxitr):
self.parameter = parameter
self.maxitr = maxitr
def computeCost(self, x):
return None
def _computeTVNorm(self, x):
x_norm = x**2
x_norm = af.sum(x_norm, dim = 3)**0.5
x_norm[x_norm<1.0] = 1.0
return x_norm
def computeProx(self, x):
if self.pure_real:
x = self._computeProxReal(af.real(x), self.realProjector) + 1.0j * 0.0
elif self.pure_imag:
x = 1.0j *self._computeProxReal(af.imag(x), self.imagProjector)
else:
x = self._computeProxReal(af.real(x), self.realProjector) \
+ 1.0j * self._computeProxReal(af.imag(x), self.imagProjector)
return x
def _filterD(self, x, axis):
assert axis<3, "This function only supports matrix up to 3 dimension!"
if self.order == 1:
if axis == 0:
Dx = x - af.shift(x, 1, 0, 0)
elif axis == 1:
Dx = x - af.shift(x, 0, 1, 0)
else:
Dx = x - af.shift(x, 0, 0, 1)
elif self.order == 2:
if axis == 0:
Dx = x - 2*af.shift(x, 1, 0, 0) + af.shift(x, 2, 0, 0)
elif axis == 1:
Dx = x - 2*af.shift(x, 0, 1, 0) + af.shift(x, 0, 2, 0)
else:
Dx = x - 2*af.shift(x, 0, 0, 1) + af.shift(x, 0, 0, 2)
elif self.order == 3:
if axis == 0:
Dx = x - 3*af.shift(x, 1, 0, 0) + 3*af.shift(x, 2, 0, 0) - af.shift(x, 3, 0, 0)
elif axis == 1:
Dx = x - 3*af.shift(x, 0, 1, 0) + 3*af.shift(x, 0, 2, 0) - af.shift(x, 0, 3, 0)
else:
Dx = x - 3*af.shift(x, 0, 0, 1) + 3*af.shift(x, 0, 0, 2) - af.shift(x, 0, 0, 3)
else:
raise NotImplementedError("filter orders larger than 1 are not implemented!")
return Dx
def _filterDT(self, x):
if self.order == 1:
DTx = x[:, :, :, 0] - af.shift(x[ :, :, :, 0], -1, 0, 0) + \
x[:, :, :, 1] - af.shift(x[ :, :, :, 1], 0, -1, 0) + \
x[:, :, :, 2] - af.shift(x[ :, :, :, 2], 0, 0, -1)
elif self.order == 2:
DTx = x[:, :, :, 0] - 2*af.shift(x[ :, :, :, 0], -1, 0, 0) + af.shift(x[ :, :, :, 0], -2, 0, 0) + \
x[:, :, :, 1] - 2*af.shift(x[ :, :, :, 1], 0, -1, 0) + af.shift(x[ :, :, :, 1], 0, -2, 0) + \
x[:, :, :, 2] - 2*af.shift(x[ :, :, :, 2], 0, 0, -1) + af.shift(x[ :, :, :, 2], 0, 0, -2)
elif self.order == 3:
DTx = x[:, :, :, 0] - 3*af.shift(x[ :, :, :, 0], -1, 0, 0) + 3*af.shift(x[ :, :, :, 0], -2, 0, 0) - af.shift(x[ :, :, :, 0], -3, 0, 0) + \
x[:, :, :, 1] - 3*af.shift(x[ :, :, :, 1], 0, -1, 0) + 3*af.shift(x[ :, :, :, 1], 0, -2, 0) - af.shift(x[ :, :, :, 1], 0, -3, 0) + \
x[:, :, :, 2] - 3*af.shift(x[ :, :, :, 2], 0, 0, -1) + 3*af.shift(x[ :, :, :, 2], 0, 0, -2) - af.shift(x[ :, :, :, 2], 0, 0, -3)
else:
raise NotImplementedError("filter orders larger than 1 are not implemented!")
return DTx
def _computeProxReal(self, x, projector):
t_k = 1.0
u_k = af.constant(0.0, x.shape[0], x.shape[1], x.shape[2], 3, dtype = af_float_datatype)
u_k1 = af.constant(0.0, x.shape[0], x.shape[1], x.shape[2], 3, dtype = af_float_datatype)
u_hat = af.constant(0.0, x.shape[0], x.shape[1], x.shape[2], 3, dtype = af_float_datatype)
grad_u_hat = af.constant(0.0, x.shape[0], x.shape[1], x.shape[2], dtype = af_float_datatype)
def _gradUpdate():
grad_u_hat = x - self.parameter * self._filterDT(u_hat)
return grad_u_hat
for iteration in range(self.maxitr):
if iteration > 0:
grad_u_hat = _gradUpdate()
else:
grad_u_hat[:, :, :] = x
grad_u_hat = projector(grad_u_hat)
u_k1[ :, :, :, 0] = u_hat[ :, :, :, 0] + (1.0/(12.0)**self.order/self.parameter) * self._filterD(grad_u_hat, axis=0)
u_k1[ :, :, :, 1] = u_hat[ :, :, :, 1] + (1.0/(12.0)**self.order/self.parameter) * self._filterD(grad_u_hat, axis=1)
u_k1[ :, :, :, 2] = u_hat[ :, :, :, 2] + (1.0/(12.0)**self.order/self.parameter) * self._filterD(grad_u_hat, axis=2)
u_k1_norm = self._computeTVNorm(u_k1)
u_k1[ :, :, :, 0] /= u_k1_norm
u_k1[ :, :, :, 1] /= u_k1_norm
u_k1[ :, :, :, 2] /= u_k1_norm
t_k1 = 0.5 * (1.0 + (1.0 + 4.0*t_k**2)**0.5)
beta = (t_k - 1.0)/t_k1
u_hat = (1.0 + beta)*u_k1 - beta*u_k
if iteration < self.maxitr - 1:
u_k = u_k1.copy()
return projector(_gradUpdate())
class TotalVariationCPU(TotalVariationGPU):
def _computeTVNorm(self, x):
u_k1_norm = af.to_array(x)
u_k1_norm[:, :, :, :] *= u_k1_norm
u_k1_norm = af.sum(u_k1_norm, dim = 3)**0.5
u_k1_norm[u_k1_norm<1.0] = 1.0
return np.array(u_k1_norm)
def computeProx(self, x):
if self.pure_real:
x = self._computeProxReal(np.real(x), self.realProjector) + 1.0j * 0.0
elif self.pure_imag:
x = 1.0j *self._computeProxReal(np.imag(x), self.imagProjector)
else:
x = self._computeProxReal(np.real(x), self.realProjector) \
+ 1.0j * self._computeProxReal(np.imag(x), self.imagProjector)
return af.to_array(x)
def _computeProxReal(self, x, projector):
t_k = 1.0
u_k = np.zeros(x.shape + (3,), dtype = np_float_datatype);
u_k1 = u_k.copy()
u_hat = u_k.copy()
def _gradUpdate():
u_hat_af = af.to_array(u_hat)
DTu_hat = u_hat_af[:, :, :, 0] - af.shift(u_hat_af[ :, :, :, 0], -1, 0, 0) + \
u_hat_af[:, :, :, 1] - af.shift(u_hat_af[ :, :, :, 1], 0, -1, 0) + \
u_hat_af[:, :, :, 2] - af.shift(u_hat_af[ :, :, :, 2], 0, 0, -1)
grad_u_hat = x - np.array(self.parameter * DTu_hat)
return grad_u_hat
for iteration in range(self.maxitr):
if iteration > 0:
grad_u_hat = _gradUpdate()
else:
grad_u_hat = x.copy()
grad_u_hat = projector(grad_u_hat)
u_k1[ :, :, :, 0] = u_hat[ :, :, :, 0] + (1.0/12.0/self.parameter) * (grad_u_hat-np.roll(grad_u_hat, 1, axis = 0))
u_k1[ :, :, :, 1] = u_hat[ :, :, :, 1] + (1.0/12.0/self.parameter) * (grad_u_hat-np.roll(grad_u_hat, 1, axis = 1))
u_k1[ :, :, :, 2] = u_hat[ :, :, :, 2] + (1.0/12.0/self.parameter) * (grad_u_hat-np.roll(grad_u_hat, 1, axis = 2))
u_k1_norm = self._computeTVNorm(u_k1)
u_k1[ :, :, :] /= u_k1_norm[:, :, :, np.newaxis]
t_k1 = 0.5 * (1.0 + (1.0 + 4.0*t_k**2)**0.5)
beta = (t_k - 1.0)/t_k1
u_hat = (1.0 + beta)*u_k1 - beta*u_k
if iteration < self.maxitr - 1:
u_k = u_k1.copy()
return projector(_gradUpdate())
class Positivity(ProximalOperator):
"""Enforce positivity constraint on a complex variable's real & imaginary part."""
def __init__(self, positivity_real, positivity_imag, proximal_name = "Positivity"):
super().__init__(proximal_name)
self.real = positivity_real
self.imag = positivity_imag
def computeCost(self, x):
return None
def computeProx(self, x):
if type(x).__module__ == "arrayfire.array":
x = self._boundRealValue(af.real(x), 0, self.real) +\
1.0j * self._boundRealValue(af.imag(x), 0, self.imag)
else:
x = self._boundRealValue(np.real(x), 0, self.real) +\
1.0j * self._boundRealValue(np.imag(x), 0, self.imag)
return x
class Negativity(Positivity):
"""Enforce positivity constraint on a complex variable's real & imaginary part."""
def __init__(self, negativity_real, negativity_imag):
super().__init__(negativity_real, negativity_imag, "Negativity")
def computeProx(self, x):
return (-1.) * super().computeProx((-1.) * x)
class PureReal(ProximalOperator):
"""Enforce real constraint on a complex, imaginary part will be cleared"""
def __init__(self):
super().__init__("Pure real")
def computeCost(self, x):
return None
def computeProx(self, x):
if type(x).__module__ == "arrayfire.array":
x = af.real(x) + 1j*0.0
else:
x = np.real(x) + 1j*0.0
return x
class Pureimag(ProximalOperator):
"""Enforce imaginary constraint on a complex, real part will be cleared"""
def __init__(self):
super().__init__("Pure imaginary")
def computeCost(self, x):
return None
def computeProx(self, x):
if type(x).__module__ == "arrayfire.array":
x = 1j*af.imag(x)
else:
x = 1j*x.imag
return x
class Lasso(ProximalOperator):
"""||x||_1 regularizer, soft thresholding with certain parameter"""
def __init__(self, parameter):
super().__init__("LASSO")
self.setParameter(parameter)
def _softThreshold(self, x):
if type(x).__module__ == "arrayfire.array":
#POTENTIAL BUG: af.sign implementation does not agree with documentation
x = (af.sign(x)-0.5)*(-2.0) * (af.abs(x) - self.parameter) * (af.abs(x) > self.parameter)
else:
x = np.sign(x) * (np.abs(x) - self.parameter) * (np.abs(x) > self.parameter)
return x
def setParameter(self, parameter):
self.parameter = parameter
def computeCost(self, x):
return af.norm(af.moddims(x, np.prod(x.shape)), norm_type = af.NORM.VECTOR_1)
def computeProx(self, x):
if type(x).__module__ == "arrayfire.array":
x = self._softThreshold(af.real(x)) + 1.0j * self._softThreshold(af.imag(x))
else:
x = self._softThreshold(np.real(x)) + 1.0j * self._softThreshold(np.imag(x))
return x
#TODO: implement Tikhonov
class Tikhonov(ProximalOperator):
def __init__(self):
pass
def setParameter(self, parameter):
self.parameter = parameter
def computeCost(self, x):
pass
def computeProx(self, x):
return x
#TODO: implement pure amplitude constraint
class PureAmplitude(ProximalOperator):
def computeCost(self, x):
return None
def computeProx(self, x):
return x
#TODO: implement pure phase constraint
class PurePhase(ProximalOperator):
def computeCost(self, x):
return None
def computeProx(self, x):
return x
|
[
"arrayfire.to_array",
"arrayfire.abs",
"numpy.abs",
"arrayfire.sum",
"arrayfire.shift",
"arrayfire.imag",
"numpy.roll",
"numpy.zeros",
"numpy.prod",
"numpy.imag",
"numpy.array",
"numpy.real",
"numpy.sign",
"arrayfire.sign",
"arrayfire.real",
"arrayfire.constant"
] |
[((8794, 8879), 'arrayfire.constant', 'af.constant', (['(0.0)', 'x.shape[0]', 'x.shape[1]', 'x.shape[2]', '(3)'], {'dtype': 'af_float_datatype'}), '(0.0, x.shape[0], x.shape[1], x.shape[2], 3, dtype=af_float_datatype\n )\n', (8805, 8879), True, 'import arrayfire as af\n'), ((8892, 8977), 'arrayfire.constant', 'af.constant', (['(0.0)', 'x.shape[0]', 'x.shape[1]', 'x.shape[2]', '(3)'], {'dtype': 'af_float_datatype'}), '(0.0, x.shape[0], x.shape[1], x.shape[2], 3, dtype=af_float_datatype\n )\n', (8903, 8977), True, 'import arrayfire as af\n'), ((8990, 9075), 'arrayfire.constant', 'af.constant', (['(0.0)', 'x.shape[0]', 'x.shape[1]', 'x.shape[2]', '(3)'], {'dtype': 'af_float_datatype'}), '(0.0, x.shape[0], x.shape[1], x.shape[2], 3, dtype=af_float_datatype\n )\n', (9001, 9075), True, 'import arrayfire as af\n'), ((9088, 9165), 'arrayfire.constant', 'af.constant', (['(0.0)', 'x.shape[0]', 'x.shape[1]', 'x.shape[2]'], {'dtype': 'af_float_datatype'}), '(0.0, x.shape[0], x.shape[1], x.shape[2], dtype=af_float_datatype)\n', (9099, 9165), True, 'import arrayfire as af\n'), ((10341, 10355), 'arrayfire.to_array', 'af.to_array', (['x'], {}), '(x)\n', (10352, 10355), True, 'import arrayfire as af\n'), ((10498, 10517), 'numpy.array', 'np.array', (['u_k1_norm'], {}), '(u_k1_norm)\n', (10506, 10517), True, 'import numpy as np\n'), ((10884, 10898), 'arrayfire.to_array', 'af.to_array', (['x'], {}), '(x)\n', (10895, 10898), True, 'import arrayfire as af\n'), ((10977, 11026), 'numpy.zeros', 'np.zeros', (['(x.shape + (3,))'], {'dtype': 'np_float_datatype'}), '(x.shape + (3,), dtype=np_float_datatype)\n', (10985, 11026), True, 'import numpy as np\n'), ((6166, 6187), 'arrayfire.sum', 'af.sum', (['x_norm'], {'dim': '(3)'}), '(x_norm, dim=3)\n', (6172, 6187), True, 'import arrayfire as af\n'), ((10424, 10448), 'arrayfire.sum', 'af.sum', (['u_k1_norm'], {'dim': '(3)'}), '(u_k1_norm, dim=3)\n', (10430, 10448), True, 'import arrayfire as af\n'), ((11120, 11138), 'arrayfire.to_array', 'af.to_array', (['u_hat'], {}), '(u_hat)\n', (11131, 11138), True, 'import arrayfire as af\n'), ((7810, 7843), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 2]', '(0)', '(0)', '(-1)'], {}), '(x[:, :, :, 2], 0, 0, -1)\n', (7818, 7843), True, 'import arrayfire as af\n'), ((11348, 11388), 'arrayfire.shift', 'af.shift', (['u_hat_af[:, :, :, 2]', '(0)', '(0)', '(-1)'], {}), '(u_hat_af[:, :, :, 2], 0, 0, -1)\n', (11356, 11388), True, 'import arrayfire as af\n'), ((11410, 11444), 'numpy.array', 'np.array', (['(self.parameter * DTu_hat)'], {}), '(self.parameter * DTu_hat)\n', (11418, 11444), True, 'import numpy as np\n'), ((13669, 13679), 'arrayfire.real', 'af.real', (['x'], {}), '(x)\n', (13676, 13679), True, 'import arrayfire as af\n'), ((13707, 13717), 'numpy.real', 'np.real', (['x'], {}), '(x)\n', (13714, 13717), True, 'import numpy as np\n'), ((14037, 14047), 'arrayfire.imag', 'af.imag', (['x'], {}), '(x)\n', (14044, 14047), True, 'import arrayfire as af\n'), ((14752, 14768), 'numpy.prod', 'np.prod', (['x.shape'], {}), '(x.shape)\n', (14759, 14768), True, 'import numpy as np\n'), ((6321, 6331), 'arrayfire.real', 'af.real', (['x'], {}), '(x)\n', (6328, 6331), True, 'import arrayfire as af\n'), ((6768, 6788), 'arrayfire.shift', 'af.shift', (['x', '(1)', '(0)', '(0)'], {}), '(x, 1, 0, 0)\n', (6776, 6788), True, 'import arrayfire as af\n'), ((8136, 8169), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 2]', '(0)', '(0)', '(-2)'], {}), '(x[:, :, :, 2], 0, 0, -2)\n', (8144, 8169), True, 'import arrayfire as af\n'), ((10599, 10609), 'numpy.real', 'np.real', (['x'], {}), '(x)\n', (10606, 10609), True, 'import numpy as np\n'), ((12808, 12818), 'arrayfire.real', 'af.real', (['x'], {}), '(x)\n', (12815, 12818), True, 'import arrayfire as af\n'), ((12949, 12959), 'numpy.real', 'np.real', (['x'], {}), '(x)\n', (12956, 12959), True, 'import numpy as np\n'), ((14498, 14507), 'arrayfire.abs', 'af.abs', (['x'], {}), '(x)\n', (14504, 14507), True, 'import arrayfire as af\n'), ((14541, 14551), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (14548, 14551), True, 'import numpy as np\n'), ((14586, 14595), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (14592, 14595), True, 'import numpy as np\n'), ((14903, 14913), 'arrayfire.real', 'af.real', (['x'], {}), '(x)\n', (14910, 14913), True, 'import arrayfire as af\n'), ((14994, 15004), 'numpy.real', 'np.real', (['x'], {}), '(x)\n', (15001, 15004), True, 'import numpy as np\n'), ((6424, 6434), 'arrayfire.imag', 'af.imag', (['x'], {}), '(x)\n', (6431, 6434), True, 'import arrayfire as af\n'), ((6493, 6503), 'arrayfire.real', 'af.real', (['x'], {}), '(x)\n', (6500, 6503), True, 'import arrayfire as af\n'), ((6826, 6846), 'arrayfire.shift', 'af.shift', (['x', '(0)', '(1)', '(0)'], {}), '(x, 0, 1, 0)\n', (6834, 6846), True, 'import arrayfire as af\n'), ((6874, 6894), 'arrayfire.shift', 'af.shift', (['x', '(0)', '(0)', '(1)'], {}), '(x, 0, 0, 1)\n', (6882, 6894), True, 'import arrayfire as af\n'), ((6980, 7000), 'arrayfire.shift', 'af.shift', (['x', '(2)', '(0)', '(0)'], {}), '(x, 2, 0, 0)\n', (6988, 7000), True, 'import arrayfire as af\n'), ((7740, 7773), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 1]', '(0)', '(-1)', '(0)'], {}), '(x[:, :, :, 1], 0, -1, 0)\n', (7748, 7773), True, 'import arrayfire as af\n'), ((8579, 8612), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 2]', '(0)', '(0)', '(-3)'], {}), '(x[:, :, :, 2], 0, 0, -3)\n', (8587, 8612), True, 'import arrayfire as af\n'), ((10702, 10712), 'numpy.imag', 'np.imag', (['x'], {}), '(x)\n', (10709, 10712), True, 'import numpy as np\n'), ((10771, 10781), 'numpy.real', 'np.real', (['x'], {}), '(x)\n', (10778, 10781), True, 'import numpy as np\n'), ((11263, 11303), 'arrayfire.shift', 'af.shift', (['u_hat_af[:, :, :, 1]', '(0)', '(-1)', '(0)'], {}), '(u_hat_af[:, :, :, 1], 0, -1, 0)\n', (11271, 11303), True, 'import arrayfire as af\n'), ((11727, 11757), 'numpy.roll', 'np.roll', (['grad_u_hat', '(1)'], {'axis': '(0)'}), '(grad_u_hat, 1, axis=0)\n', (11734, 11757), True, 'import numpy as np\n'), ((11846, 11876), 'numpy.roll', 'np.roll', (['grad_u_hat', '(1)'], {'axis': '(1)'}), '(grad_u_hat, 1, axis=1)\n', (11853, 11876), True, 'import numpy as np\n'), ((11965, 11995), 'numpy.roll', 'np.roll', (['grad_u_hat', '(1)'], {'axis': '(2)'}), '(grad_u_hat, 1, axis=2)\n', (11972, 11995), True, 'import numpy as np\n'), ((12887, 12897), 'arrayfire.imag', 'af.imag', (['x'], {}), '(x)\n', (12894, 12897), True, 'import arrayfire as af\n'), ((13028, 13038), 'numpy.imag', 'np.imag', (['x'], {}), '(x)\n', (13035, 13038), True, 'import numpy as np\n'), ((14467, 14476), 'arrayfire.abs', 'af.abs', (['x'], {}), '(x)\n', (14473, 14476), True, 'import arrayfire as af\n'), ((14555, 14564), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (14561, 14564), True, 'import numpy as np\n'), ((14944, 14954), 'arrayfire.imag', 'af.imag', (['x'], {}), '(x)\n', (14951, 14954), True, 'import arrayfire as af\n'), ((15035, 15045), 'numpy.imag', 'np.imag', (['x'], {}), '(x)\n', (15042, 15045), True, 'import numpy as np\n'), ((6565, 6575), 'arrayfire.imag', 'af.imag', (['x'], {}), '(x)\n', (6572, 6575), True, 'import arrayfire as af\n'), ((7063, 7083), 'arrayfire.shift', 'af.shift', (['x', '(0)', '(2)', '(0)'], {}), '(x, 0, 2, 0)\n', (7071, 7083), True, 'import arrayfire as af\n'), ((7136, 7156), 'arrayfire.shift', 'af.shift', (['x', '(0)', '(0)', '(2)'], {}), '(x, 0, 0, 2)\n', (7144, 7156), True, 'import arrayfire as af\n'), ((7266, 7286), 'arrayfire.shift', 'af.shift', (['x', '(3)', '(0)', '(0)'], {}), '(x, 3, 0, 0)\n', (7274, 7286), True, 'import arrayfire as af\n'), ((8099, 8132), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 2]', '(0)', '(0)', '(-1)'], {}), '(x[:, :, :, 2], 0, 0, -1)\n', (8107, 8132), True, 'import arrayfire as af\n'), ((10843, 10853), 'numpy.imag', 'np.imag', (['x'], {}), '(x)\n', (10850, 10853), True, 'import numpy as np\n'), ((14441, 14451), 'arrayfire.sign', 'af.sign', (['x'], {}), '(x)\n', (14448, 14451), True, 'import arrayfire as af\n'), ((6956, 6976), 'arrayfire.shift', 'af.shift', (['x', '(1)', '(0)', '(0)'], {}), '(x, 1, 0, 0)\n', (6964, 6976), True, 'import arrayfire as af\n'), ((7374, 7394), 'arrayfire.shift', 'af.shift', (['x', '(0)', '(3)', '(0)'], {}), '(x, 0, 3, 0)\n', (7382, 7394), True, 'import arrayfire as af\n'), ((7472, 7492), 'arrayfire.shift', 'af.shift', (['x', '(0)', '(0)', '(3)'], {}), '(x, 0, 0, 3)\n', (7480, 7492), True, 'import arrayfire as af\n'), ((7670, 7703), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 0]', '(-1)', '(0)', '(0)'], {}), '(x[:, :, :, 0], -1, 0, 0)\n', (7678, 7703), True, 'import arrayfire as af\n'), ((8036, 8069), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 1]', '(0)', '(-2)', '(0)'], {}), '(x[:, :, :, 1], 0, -2, 0)\n', (8044, 8069), True, 'import arrayfire as af\n'), ((8542, 8575), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 2]', '(0)', '(0)', '(-2)'], {}), '(x[:, :, :, 2], 0, 0, -2)\n', (8550, 8575), True, 'import arrayfire as af\n'), ((11178, 11218), 'arrayfire.shift', 'af.shift', (['u_hat_af[:, :, :, 0]', '(-1)', '(0)', '(0)'], {}), '(u_hat_af[:, :, :, 0], -1, 0, 0)\n', (11186, 11218), True, 'import arrayfire as af\n'), ((7039, 7059), 'arrayfire.shift', 'af.shift', (['x', '(0)', '(1)', '(0)'], {}), '(x, 0, 1, 0)\n', (7047, 7059), True, 'import arrayfire as af\n'), ((7112, 7132), 'arrayfire.shift', 'af.shift', (['x', '(0)', '(0)', '(1)'], {}), '(x, 0, 0, 1)\n', (7120, 7132), True, 'import arrayfire as af\n'), ((7243, 7263), 'arrayfire.shift', 'af.shift', (['x', '(2)', '(0)', '(0)'], {}), '(x, 2, 0, 0)\n', (7251, 7263), True, 'import arrayfire as af\n'), ((8503, 8536), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 2]', '(0)', '(0)', '(-1)'], {}), '(x[:, :, :, 2], 0, 0, -1)\n', (8511, 8536), True, 'import arrayfire as af\n'), ((7217, 7237), 'arrayfire.shift', 'af.shift', (['x', '(1)', '(0)', '(0)'], {}), '(x, 1, 0, 0)\n', (7225, 7237), True, 'import arrayfire as af\n'), ((7351, 7371), 'arrayfire.shift', 'af.shift', (['x', '(0)', '(2)', '(0)'], {}), '(x, 0, 2, 0)\n', (7359, 7371), True, 'import arrayfire as af\n'), ((7449, 7469), 'arrayfire.shift', 'af.shift', (['x', '(0)', '(0)', '(2)'], {}), '(x, 0, 0, 2)\n', (7457, 7469), True, 'import arrayfire as af\n'), ((7999, 8032), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 1]', '(0)', '(-1)', '(0)'], {}), '(x[:, :, :, 1], 0, -1, 0)\n', (8007, 8032), True, 'import arrayfire as af\n'), ((8440, 8473), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 1]', '(0)', '(-3)', '(0)'], {}), '(x[:, :, :, 1], 0, -3, 0)\n', (8448, 8473), True, 'import arrayfire as af\n'), ((7325, 7345), 'arrayfire.shift', 'af.shift', (['x', '(0)', '(1)', '(0)'], {}), '(x, 0, 1, 0)\n', (7333, 7345), True, 'import arrayfire as af\n'), ((7423, 7443), 'arrayfire.shift', 'af.shift', (['x', '(0)', '(0)', '(1)'], {}), '(x, 0, 0, 1)\n', (7431, 7443), True, 'import arrayfire as af\n'), ((7936, 7969), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 0]', '(-2)', '(0)', '(0)'], {}), '(x[:, :, :, 0], -2, 0, 0)\n', (7944, 7969), True, 'import arrayfire as af\n'), ((8403, 8436), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 1]', '(0)', '(-2)', '(0)'], {}), '(x[:, :, :, 1], 0, -2, 0)\n', (8411, 8436), True, 'import arrayfire as af\n'), ((7899, 7932), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 0]', '(-1)', '(0)', '(0)'], {}), '(x[:, :, :, 0], -1, 0, 0)\n', (7907, 7932), True, 'import arrayfire as af\n'), ((8364, 8397), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 1]', '(0)', '(-1)', '(0)'], {}), '(x[:, :, :, 1], 0, -1, 0)\n', (8372, 8397), True, 'import arrayfire as af\n'), ((8301, 8334), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 0]', '(-3)', '(0)', '(0)'], {}), '(x[:, :, :, 0], -3, 0, 0)\n', (8309, 8334), True, 'import arrayfire as af\n'), ((8264, 8297), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 0]', '(-2)', '(0)', '(0)'], {}), '(x[:, :, :, 0], -2, 0, 0)\n', (8272, 8297), True, 'import arrayfire as af\n'), ((8225, 8258), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 0]', '(-1)', '(0)', '(0)'], {}), '(x[:, :, :, 0], -1, 0, 0)\n', (8233, 8258), True, 'import arrayfire as af\n')]
|
# <NAME>
# PandS project 2020
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
# Import data as pandas dataframe
iris_data = pd.read_csv('iris.data', header=None)
# assign column headers
iris_data.columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species']
# A. Output a summary of each variable to a single txt file.
# Isolate columns according to data type
float_values = iris_data.iloc[:,0:4]
str_values = iris_data.iloc[:,4]
# Use describe function to summarise data
float_summary = float_values.describe()
str_summary = str_values.describe()
# Establish 3 unique values in str_summary.
# This creates an array of each value.
str_summary = str_values.unique()
# Transpose str_summary array and convert to dataframe
str_summary = str_summary[:, None]
str_summary = pd.DataFrame({"Species": str_summary[:, 0]})
# Format string variable summary
# Add column containing quantity of unique values
quantity = ['50', '50', '50']
str_summary['Count'] = quantity
# Rename rows in str_summary
str_summary.index = ['Species_A', 'Species_B', 'Species_C']
# Format summary output and write to text file
with open("iris_summary.txt", "w") as f:
heading = "SUMMARY OF VARIABLES IN IRIS DATASET"
f.write(heading + "\n")
f.write("=" * len(heading) + "\n\n\n\n")
heading2 = "NUMERIC VARIABLE SUMMARY"
f.write(heading2 + "\n")
f.write("=" * len(heading2) + "\n")
f.write(float_summary.to_string() + "\n\n\n\n")
heading3 = "DEPENDENT VARIABLE SUMMARY"
f.write(heading3 + "\n")
f.write("=" * len(heading3) + "\n")
f.write(str_summary.to_string() + "\n\n\n\n\n\n\n")
# B. Save a histogram of each variable to png files
# Assign each column to a variable for easier manipulation
sep_len = iris_data['sepal_length']
sep_width = iris_data['sepal_width']
pet_len = iris_data['petal_length']
pet_width = iris_data['petal_width']
species = iris_data['species']
# Write a function which outputs a histogram for each dataset variable and saves
# it as a png file.
# First for numeric variables
def var_hist(var_data, fig_num, x_label, y_label, title, filepath):
plt.figure(fig_num)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
plt.hist(var_data, rwidth=0.9,)
plt.savefig(filepath)
plt.close() # Close figure so plot won't be displayed later
# Then for string variable
def var_hist2(var_data, fig_num, x_label, y_label, title, filepath):
plt.figure(fig_num)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
plt.hist(var_data, bins=3, rwidth=0.9)
plt.xticks(np.arange(0,3))
plt.savefig(filepath)
plt.close()
# Call function for each variable
var_hist(sep_len, 1, 'sepal_length_cm', 'Frequency', 'Sepal Length', 'sepal_length.png')
var_hist(sep_width, 2, 'sepal_width_cm', 'Frequency', 'Sepal Width', 'sepal_width.png')
var_hist(pet_len, 3, 'petal_length_cm', 'Frequency', 'Petal Length', 'petal_length.png')
var_hist(pet_width, 4, 'petal_width_cm', 'Frequency', 'Petal Width', 'petal_width.png')
var_hist2(species, 5, 'species', 'Frequency', 'Iris Species', 'species.png')
# 4 axes on one figure for better visual comparison
fig, axs = plt.subplots(2, 2)
axs1 = axs[0, 0]
axs1.hist(sep_len, rwidth=0.9)
axs1.set_title('Sepal_Length_Cm')
axs1.set(ylabel='frequency')
axs2 = axs[0, 1]
axs2.hist(sep_width, rwidth=0.9)
axs2.set_title('Sepal_Width_Cm',)
axs2.set(ylabel='frequency')
axs3 = axs[1, 0]
axs3.hist(pet_len, rwidth=0.9)
axs3.set_title('Petal_Length_Cm')
axs3.set(ylabel='frequency')
axs4 = axs[1, 1]
axs4.hist(pet_width, rwidth=0.9)
axs4.set_title('Petal_Width_Cm')
axs4.set(ylabel='frequency')
#plt.show()
plt.close()
# C. Output a scatter plot of each pair of variables
# Scatter plot with matplotlib (no colour separation)
plt.scatter(sep_len, sep_width)
plt.xlabel('sepal_length')
plt.ylabel('sepal_width')
#plt.show()
plt.close()
# Write a function which outputs a scatter plot of each pair of variables.
# Each categorical variable (species of iris flower) is categorized by colour
def scatter(x, y):
sns.set(style="darkgrid", font_scale=1.25)
sns.lmplot(x, y, iris_data, fit_reg=False, hue='species')
plt.show()
plt.close()
# Call function for each pair of variables
scatter('sepal_length', 'sepal_width')
scatter('sepal_length', 'petal_length')
scatter('sepal_length', 'petal_width')
scatter('sepal_width', 'petal_length')
scatter('sepal_width', 'petal_width')
scatter('petal_length', 'petal_width')
# Output pairplot using kde to represent marginal distribution
sns.set(style='ticks', font_scale=1.25, color_codes=True)
sns.pairplot(iris_data, hue='species', diag_kind='kde')
plt.show()
|
[
"pandas.DataFrame",
"matplotlib.pyplot.title",
"seaborn.set",
"seaborn.lmplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"pandas.read_csv",
"matplotlib.pyplot.close",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.figure",
"numpy.arange",
"seaborn.pairplot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] |
[((171, 208), 'pandas.read_csv', 'pd.read_csv', (['"""iris.data"""'], {'header': 'None'}), "('iris.data', header=None)\n", (182, 208), True, 'import pandas as pd\n'), ((849, 893), 'pandas.DataFrame', 'pd.DataFrame', (["{'Species': str_summary[:, 0]}"], {}), "({'Species': str_summary[:, 0]})\n", (861, 893), True, 'import pandas as pd\n'), ((3231, 3249), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {}), '(2, 2)\n', (3243, 3249), True, 'import matplotlib.pyplot as plt\n'), ((3715, 3726), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3724, 3726), True, 'import matplotlib.pyplot as plt\n'), ((3839, 3870), 'matplotlib.pyplot.scatter', 'plt.scatter', (['sep_len', 'sep_width'], {}), '(sep_len, sep_width)\n', (3850, 3870), True, 'import matplotlib.pyplot as plt\n'), ((3871, 3897), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""sepal_length"""'], {}), "('sepal_length')\n", (3881, 3897), True, 'import matplotlib.pyplot as plt\n'), ((3898, 3923), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""sepal_width"""'], {}), "('sepal_width')\n", (3908, 3923), True, 'import matplotlib.pyplot as plt\n'), ((3936, 3947), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3945, 3947), True, 'import matplotlib.pyplot as plt\n'), ((4610, 4667), 'seaborn.set', 'sns.set', ([], {'style': '"""ticks"""', 'font_scale': '(1.25)', 'color_codes': '(True)'}), "(style='ticks', font_scale=1.25, color_codes=True)\n", (4617, 4667), True, 'import seaborn as sns\n'), ((4668, 4723), 'seaborn.pairplot', 'sns.pairplot', (['iris_data'], {'hue': '"""species"""', 'diag_kind': '"""kde"""'}), "(iris_data, hue='species', diag_kind='kde')\n", (4680, 4723), True, 'import seaborn as sns\n'), ((4724, 4734), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4732, 4734), True, 'import matplotlib.pyplot as plt\n'), ((2178, 2197), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_num'], {}), '(fig_num)\n', (2188, 2197), True, 'import matplotlib.pyplot as plt\n'), ((2202, 2221), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_label'], {}), '(x_label)\n', (2212, 2221), True, 'import matplotlib.pyplot as plt\n'), ((2226, 2245), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (2236, 2245), True, 'import matplotlib.pyplot as plt\n'), ((2250, 2266), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2259, 2266), True, 'import matplotlib.pyplot as plt\n'), ((2271, 2301), 'matplotlib.pyplot.hist', 'plt.hist', (['var_data'], {'rwidth': '(0.9)'}), '(var_data, rwidth=0.9)\n', (2279, 2301), True, 'import matplotlib.pyplot as plt\n'), ((2307, 2328), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filepath'], {}), '(filepath)\n', (2318, 2328), True, 'import matplotlib.pyplot as plt\n'), ((2333, 2344), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2342, 2344), True, 'import matplotlib.pyplot as plt\n'), ((2495, 2514), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_num'], {}), '(fig_num)\n', (2505, 2514), True, 'import matplotlib.pyplot as plt\n'), ((2519, 2538), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_label'], {}), '(x_label)\n', (2529, 2538), True, 'import matplotlib.pyplot as plt\n'), ((2543, 2562), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (2553, 2562), True, 'import matplotlib.pyplot as plt\n'), ((2567, 2583), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2576, 2583), True, 'import matplotlib.pyplot as plt\n'), ((2588, 2626), 'matplotlib.pyplot.hist', 'plt.hist', (['var_data'], {'bins': '(3)', 'rwidth': '(0.9)'}), '(var_data, bins=3, rwidth=0.9)\n', (2596, 2626), True, 'import matplotlib.pyplot as plt\n'), ((2662, 2683), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filepath'], {}), '(filepath)\n', (2673, 2683), True, 'import matplotlib.pyplot as plt\n'), ((2688, 2699), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2697, 2699), True, 'import matplotlib.pyplot as plt\n'), ((4126, 4168), 'seaborn.set', 'sns.set', ([], {'style': '"""darkgrid"""', 'font_scale': '(1.25)'}), "(style='darkgrid', font_scale=1.25)\n", (4133, 4168), True, 'import seaborn as sns\n'), ((4173, 4230), 'seaborn.lmplot', 'sns.lmplot', (['x', 'y', 'iris_data'], {'fit_reg': '(False)', 'hue': '"""species"""'}), "(x, y, iris_data, fit_reg=False, hue='species')\n", (4183, 4230), True, 'import seaborn as sns\n'), ((4235, 4245), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4243, 4245), True, 'import matplotlib.pyplot as plt\n'), ((4250, 4261), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4259, 4261), True, 'import matplotlib.pyplot as plt\n'), ((2642, 2657), 'numpy.arange', 'np.arange', (['(0)', '(3)'], {}), '(0, 3)\n', (2651, 2657), True, 'import numpy as np\n')]
|
# <NAME> - github.com/2b-t (2022)
# @file utilities_test.py
# @brief Different testing routines for utility functions for accuracy calculation and file import and export
import numpy as np
from parameterized import parameterized
from typing import Tuple
import unittest
from src.utilities import AccX, IO
class TestAccX(unittest.TestCase):
_shape = (10,20)
_disparities = [ ["disparity = 1", 1],
["disparity = 2", 2],
["disparity = 3", 3]
]
@parameterized.expand(_disparities)
def test_same_image(self, name: str, threshold_disparity: int) -> None:
# Parameterised unit test for testing if two identical images result in an accuracy measure of unity
# @param[in] name: The name of the parameterised test
# @param[in] threshold_disparity: The threshold disparity for the accuracy measure
mag = threshold_disparity*10
groundtruth_image = mag*np.ones(self._shape)
prediction_image = mag*np.ones(groundtruth_image.shape)
mask_image = np.ones(groundtruth_image.shape)
accx = AccX.compute(prediction_image, groundtruth_image, mask_image, threshold_disparity)
self.assertAlmostEqual(accx, 1.0, places=7)
return
@parameterized.expand(_disparities)
def test_slightly_shifted_image(self, name: str, threshold_disparity: int) -> None:
# Parameterised unit test for testing if an image and its slightly shifted counterpart result in an accuracy measure of unity
# @param[in] name: The name of the parameterised test
# @param[in] threshold_disparity: The threshold disparity for the accuracy measure
mag = threshold_disparity*10
groundtruth_image = mag*np.ones(self._shape)
prediction_image = (mag+threshold_disparity-1)*np.ones(groundtruth_image.shape)
mask_image = np.ones(groundtruth_image.shape)
accx = AccX.compute(prediction_image, groundtruth_image, mask_image, threshold_disparity)
self.assertAlmostEqual(accx, 1.0, places=7)
return
@parameterized.expand(_disparities)
def test_no_mask(self, name: str, threshold_disparity: int) -> None:
# Parameterised unit test for testing if two identical images with no given mask result in an accuracy measure of unity
# @param[in] name: The name of the parameterised test
# @param[in] threshold_disparity: The threshold disparity for the accuracy measure
mag = threshold_disparity*10
groundtruth_image = mag*np.ones(self._shape)
prediction_image = mag*np.ones(groundtruth_image.shape)
mask_image = None
accx = AccX.compute(prediction_image, groundtruth_image, mask_image, threshold_disparity)
self.assertAlmostEqual(accx, 1.0, places=7)
return
@parameterized.expand(_disparities)
def test_inverse_image(self, name: str, threshold_disparity: int) -> None:
# Parameterised unit test for testing if two inverse images result in an accuracy measure of zero
# @param[in] name: The name of the parameterised test
# @param[in] threshold_disparity: The threshold disparity for the accuracy measure
mag = threshold_disparity*10
groundtruth_image = mag*np.ones(self._shape)
prediction_image = np.zeros(groundtruth_image.shape)
mask_image = np.ones(groundtruth_image.shape)
accx = AccX.compute(prediction_image, groundtruth_image, mask_image, threshold_disparity)
self.assertAlmostEqual(accx, 0.0, places=7)
return
@parameterized.expand(_disparities)
def test_significantly_shifted_image(self, name: str, threshold_disparity: int) -> None:
# Parameterised unit test for testing if an image and its significantly shifted counterpart result in an accuracy measure of zero
# @param[in] name: The name of the parameterised test
# @param[in] threshold_disparity: The threshold disparity for the accuracy measure
mag = threshold_disparity*10
groundtruth_image = mag*np.ones(self._shape)
prediction_image = (mag+threshold_disparity+1)*np.ones(groundtruth_image.shape)
mask_image = np.ones(groundtruth_image.shape)
accx = AccX.compute(prediction_image, groundtruth_image, mask_image, threshold_disparity)
self.assertAlmostEqual(accx, 0.0, places=7)
return
@parameterized.expand(_disparities)
def test_zero_mask(self, name: str, threshold_disparity: int) -> None:
# Parameterised unit test for testing if two equal images with a mask of zero results in an accuracy measure of zero
# @param[in] name: The name of the parameterised test
# @param[in] threshold_disparity: The threshold disparity for the accuracy measure
mag = threshold_disparity*10
groundtruth_image = mag*np.ones(self._shape)
prediction_image = groundtruth_image
mask_image = np.zeros(groundtruth_image.shape)
accx = AccX.compute(prediction_image, groundtruth_image, mask_image, threshold_disparity)
self.assertAlmostEqual(accx, 0.0, places=7)
return
class TestIO(unittest.TestCase):
_resolutions = [ ["resolution = (10, 20)", (10, 20)],
["resolution = (30, 4)", (30, 4)],
["resolution = (65, 24)", (65, 24)]
]
def test_import_image(self) -> None:
# TODO(tobit): Implement
pass
def test_export_image(self) -> None:
# TODO(tobit): Implement
pass
def test_str_comma(self) -> None:
# Function for testing conversion of numbers to comma-separated numbers
self.assertEqual(IO._str_comma(10, 2), "10")
self.assertEqual(IO._str_comma(9.3, 2), "9,3")
self.assertEqual(IO._str_comma(1.234, 2), "1,23")
return
@parameterized.expand(_resolutions)
def test_normalise_positive_image_no_groundtruth(self, name: str, shape: Tuple[int, int]) -> None:
# Function for testing normalising a positive image with a no ground-truth should result in a positive image
# @param[in] name: The name of the parameterised test
# @param[in] shape: The image resolution to be considered for the test
mag = 13
image = mag*np.ones(shape)
groundtruth_image = None
result = IO.normalise_image(image, groundtruth_image)
self.assertGreaterEqual(np.min(result), 0.0)
self.assertLessEqual(np.max(result), 1.0)
return
@parameterized.expand(_resolutions)
def test_normalise_positive_image_positive_groundtruth(self, name: str, shape: Tuple[int, int]) -> None:
# Function for testing normalising a regular image with a regular ground-truth should result in a positive image
# @param[in] name: The name of the parameterised test
# @param[in] shape: The image resolution to be considered for the test
mag = 13
image = mag*np.ones(shape)
groundtruth_image = 2*image
result = IO.normalise_image(image, groundtruth_image)
self.assertGreaterEqual(np.min(result), 0.0)
self.assertLessEqual(np.max(result), 1.0)
return
@parameterized.expand(_resolutions)
def test_normalise_negative_image_positive_groundtruth(self, name: str, shape: Tuple[int, int]) -> None:
# Function for testing normalising a negative image which should result in a ValueError
# @param[in] name: The name of the parameterised test
# @param[in] shape: The image resolution to be considered for the test
mag = 13
groundtruth_image = mag*np.ones(shape)
image = -2*groundtruth_image
self.assertRaises(ValueError, IO.normalise_image, image, groundtruth_image)
return
@parameterized.expand(_resolutions)
def test_normalise_positive_image_negative_groundtruth(self, name: str, shape: Tuple[int, int]) -> None:
# Function for testing normalising a negative ground-truth which should result in a ValueError
# @param[in] name: The name of the parameterised test
# @param[in] shape: The image resolution to be considered for the test
mag = 13
image = mag*np.ones(shape)
groundtruth_image = -2*image
self.assertRaises(ValueError, IO.normalise_image, image, groundtruth_image)
return
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"src.utilities.IO._str_comma",
"src.utilities.AccX.compute",
"numpy.zeros",
"numpy.ones",
"src.utilities.IO.normalise_image",
"parameterized.parameterized.expand",
"numpy.min",
"numpy.max"
] |
[((509, 543), 'parameterized.parameterized.expand', 'parameterized.expand', (['_disparities'], {}), '(_disparities)\n', (529, 543), False, 'from parameterized import parameterized\n'), ((1222, 1256), 'parameterized.parameterized.expand', 'parameterized.expand', (['_disparities'], {}), '(_disparities)\n', (1242, 1256), False, 'from parameterized import parameterized\n'), ((1998, 2032), 'parameterized.parameterized.expand', 'parameterized.expand', (['_disparities'], {}), '(_disparities)\n', (2018, 2032), False, 'from parameterized import parameterized\n'), ((2699, 2733), 'parameterized.parameterized.expand', 'parameterized.expand', (['_disparities'], {}), '(_disparities)\n', (2719, 2733), False, 'from parameterized import parameterized\n'), ((3409, 3443), 'parameterized.parameterized.expand', 'parameterized.expand', (['_disparities'], {}), '(_disparities)\n', (3429, 3443), False, 'from parameterized import parameterized\n'), ((4192, 4226), 'parameterized.parameterized.expand', 'parameterized.expand', (['_disparities'], {}), '(_disparities)\n', (4212, 4226), False, 'from parameterized import parameterized\n'), ((5563, 5597), 'parameterized.parameterized.expand', 'parameterized.expand', (['_resolutions'], {}), '(_resolutions)\n', (5583, 5597), False, 'from parameterized import parameterized\n'), ((6195, 6229), 'parameterized.parameterized.expand', 'parameterized.expand', (['_resolutions'], {}), '(_resolutions)\n', (6215, 6229), False, 'from parameterized import parameterized\n'), ((6836, 6870), 'parameterized.parameterized.expand', 'parameterized.expand', (['_resolutions'], {}), '(_resolutions)\n', (6856, 6870), False, 'from parameterized import parameterized\n'), ((7394, 7428), 'parameterized.parameterized.expand', 'parameterized.expand', (['_resolutions'], {}), '(_resolutions)\n', (7414, 7428), False, 'from parameterized import parameterized\n'), ((7972, 7987), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7985, 7987), False, 'import unittest\n'), ((1032, 1064), 'numpy.ones', 'np.ones', (['groundtruth_image.shape'], {}), '(groundtruth_image.shape)\n', (1039, 1064), True, 'import numpy as np\n'), ((1076, 1162), 'src.utilities.AccX.compute', 'AccX.compute', (['prediction_image', 'groundtruth_image', 'mask_image', 'threshold_disparity'], {}), '(prediction_image, groundtruth_image, mask_image,\n threshold_disparity)\n', (1088, 1162), False, 'from src.utilities import AccX, IO\n'), ((1806, 1838), 'numpy.ones', 'np.ones', (['groundtruth_image.shape'], {}), '(groundtruth_image.shape)\n', (1813, 1838), True, 'import numpy as np\n'), ((1850, 1936), 'src.utilities.AccX.compute', 'AccX.compute', (['prediction_image', 'groundtruth_image', 'mask_image', 'threshold_disparity'], {}), '(prediction_image, groundtruth_image, mask_image,\n threshold_disparity)\n', (1862, 1936), False, 'from src.utilities import AccX, IO\n'), ((2553, 2639), 'src.utilities.AccX.compute', 'AccX.compute', (['prediction_image', 'groundtruth_image', 'mask_image', 'threshold_disparity'], {}), '(prediction_image, groundtruth_image, mask_image,\n threshold_disparity)\n', (2565, 2639), False, 'from src.utilities import AccX, IO\n'), ((3168, 3201), 'numpy.zeros', 'np.zeros', (['groundtruth_image.shape'], {}), '(groundtruth_image.shape)\n', (3176, 3201), True, 'import numpy as np\n'), ((3219, 3251), 'numpy.ones', 'np.ones', (['groundtruth_image.shape'], {}), '(groundtruth_image.shape)\n', (3226, 3251), True, 'import numpy as np\n'), ((3263, 3349), 'src.utilities.AccX.compute', 'AccX.compute', (['prediction_image', 'groundtruth_image', 'mask_image', 'threshold_disparity'], {}), '(prediction_image, groundtruth_image, mask_image,\n threshold_disparity)\n', (3275, 3349), False, 'from src.utilities import AccX, IO\n'), ((4002, 4034), 'numpy.ones', 'np.ones', (['groundtruth_image.shape'], {}), '(groundtruth_image.shape)\n', (4009, 4034), True, 'import numpy as np\n'), ((4046, 4132), 'src.utilities.AccX.compute', 'AccX.compute', (['prediction_image', 'groundtruth_image', 'mask_image', 'threshold_disparity'], {}), '(prediction_image, groundtruth_image, mask_image,\n threshold_disparity)\n', (4058, 4132), False, 'from src.utilities import AccX, IO\n'), ((4711, 4744), 'numpy.zeros', 'np.zeros', (['groundtruth_image.shape'], {}), '(groundtruth_image.shape)\n', (4719, 4744), True, 'import numpy as np\n'), ((4756, 4842), 'src.utilities.AccX.compute', 'AccX.compute', (['prediction_image', 'groundtruth_image', 'mask_image', 'threshold_disparity'], {}), '(prediction_image, groundtruth_image, mask_image,\n threshold_disparity)\n', (4768, 4842), False, 'from src.utilities import AccX, IO\n'), ((6040, 6084), 'src.utilities.IO.normalise_image', 'IO.normalise_image', (['image', 'groundtruth_image'], {}), '(image, groundtruth_image)\n', (6058, 6084), False, 'from src.utilities import AccX, IO\n'), ((6681, 6725), 'src.utilities.IO.normalise_image', 'IO.normalise_image', (['image', 'groundtruth_image'], {}), '(image, groundtruth_image)\n', (6699, 6725), False, 'from src.utilities import AccX, IO\n'), ((934, 954), 'numpy.ones', 'np.ones', (['self._shape'], {}), '(self._shape)\n', (941, 954), True, 'import numpy as np\n'), ((982, 1014), 'numpy.ones', 'np.ones', (['groundtruth_image.shape'], {}), '(groundtruth_image.shape)\n', (989, 1014), True, 'import numpy as np\n'), ((1684, 1704), 'numpy.ones', 'np.ones', (['self._shape'], {}), '(self._shape)\n', (1691, 1704), True, 'import numpy as np\n'), ((1756, 1788), 'numpy.ones', 'np.ones', (['groundtruth_image.shape'], {}), '(groundtruth_image.shape)\n', (1763, 1788), True, 'import numpy as np\n'), ((2439, 2459), 'numpy.ones', 'np.ones', (['self._shape'], {}), '(self._shape)\n', (2446, 2459), True, 'import numpy as np\n'), ((2487, 2519), 'numpy.ones', 'np.ones', (['groundtruth_image.shape'], {}), '(groundtruth_image.shape)\n', (2494, 2519), True, 'import numpy as np\n'), ((3124, 3144), 'numpy.ones', 'np.ones', (['self._shape'], {}), '(self._shape)\n', (3131, 3144), True, 'import numpy as np\n'), ((3880, 3900), 'numpy.ones', 'np.ones', (['self._shape'], {}), '(self._shape)\n', (3887, 3900), True, 'import numpy as np\n'), ((3952, 3984), 'numpy.ones', 'np.ones', (['groundtruth_image.shape'], {}), '(groundtruth_image.shape)\n', (3959, 3984), True, 'import numpy as np\n'), ((4632, 4652), 'numpy.ones', 'np.ones', (['self._shape'], {}), '(self._shape)\n', (4639, 4652), True, 'import numpy as np\n'), ((5413, 5433), 'src.utilities.IO._str_comma', 'IO._str_comma', (['(10)', '(2)'], {}), '(10, 2)\n', (5426, 5433), False, 'from src.utilities import AccX, IO\n'), ((5462, 5483), 'src.utilities.IO._str_comma', 'IO._str_comma', (['(9.3)', '(2)'], {}), '(9.3, 2)\n', (5475, 5483), False, 'from src.utilities import AccX, IO\n'), ((5513, 5536), 'src.utilities.IO._str_comma', 'IO._str_comma', (['(1.234)', '(2)'], {}), '(1.234, 2)\n', (5526, 5536), False, 'from src.utilities import AccX, IO\n'), ((5983, 5997), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (5990, 5997), True, 'import numpy as np\n'), ((6113, 6127), 'numpy.min', 'np.min', (['result'], {}), '(result)\n', (6119, 6127), True, 'import numpy as np\n'), ((6159, 6173), 'numpy.max', 'np.max', (['result'], {}), '(result)\n', (6165, 6173), True, 'import numpy as np\n'), ((6621, 6635), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (6628, 6635), True, 'import numpy as np\n'), ((6754, 6768), 'numpy.min', 'np.min', (['result'], {}), '(result)\n', (6760, 6768), True, 'import numpy as np\n'), ((6800, 6814), 'numpy.max', 'np.max', (['result'], {}), '(result)\n', (6806, 6814), True, 'import numpy as np\n'), ((7249, 7263), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (7256, 7263), True, 'import numpy as np\n'), ((7802, 7816), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (7809, 7816), True, 'import numpy as np\n')]
|
import Bio.SeqUtils.ProtParam
import os
import ASAP.FeatureExtraction as extract
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# Chothia numbering definition for CDR regions
CHOTHIA_CDR = {'L': {'1': [24, 34], '2': [50, 56], '3': [89, 97]}, 'H':{'1': [26, 32], '2': [52, 56], '3': [95, 102]}}
canonical_direct = '../data/pigs_canonical.txt'
SET_NAME = 'IGHV'
IF_ONLY_HEAVY = True
CNT_DB = 1
CNT_TARGET = 1
REFERENCE_PATH_TESTCASE = '../testCase/IGHV/reference-IGHV/'
TARGETING_PATH_TESTCASE = '../testCase/IGHV/targeting-MMP-IGHV/'
TARGET_DESIRE_SIZE = 134 #44 #IGHV
targeting_direct = TARGETING_PATH_TESTCASE
reference_direct = REFERENCE_PATH_TESTCASE
Amino, Num, Germ, DatasetName, DatasetSize = extract.ReadAminoNumGerm(targeting_direct, reference_direct)
seq_id = []
for i, name in enumerate(DatasetName):
# if i<2:
# continue
tmp= [[] for j in range(int(DatasetSize[i]))]
# for every seq in that dataset
for j in range(int(DatasetSize[i])):
seq_name = name + '_' + str(j)
seq_id.append(seq_name)
# raw sequence
def sequence_raw():
def getSequenceHL(sname):
SH = ''.join(Amino['H'][sname])
SL = ''
if not IF_ONLY_HEAVY:
SL = ''.join(Amino['L'][sname])
return SL, SH
else:
return [SH]
with open('../results/'+SET_NAME +'_Sequence.csv','w') as fi:
fi.write('sequence name, ')
if not IF_ONLY_HEAVY:
fi.write('light chain, ')
fi.write('heavy chain\n')
for sname in seq_id:
fi.write(sname + ',' + ','.join(getSequenceHL(sname))+ '\n')
# sequence with numbering
def sequence_num():
def getSequenceHL_num(sname):
NH = ','.join(Num['H'][sname])
SH = ','.join(Amino['H'][sname])
NL = ','.join(Num['L'][sname])
SL = ','.join(Amino['L'][sname])
return NH, SH, NL, SL
with open('./Sequence_numbered.csv','w') as fi:
for sname in seq_id:
NH, SH, NL, SL = getSequenceHL_num(sname)
fi.write(sname + ' light num,' + NL + '\n')
fi.write(sname + ' light seq,' + SL + '\n')
fi.write(sname + ' heavy num,' + NH + '\n')
fi.write(sname + ' heavy seq,' + SH + '\n')
# sequence with region
def sequence_region():
def getSequenceHL_region(sname):
NH = Num['H'][sname]
HFW1, HCDR1, HFW2, HCDR2, HFW3, HCDR3, HFW4 = '', '', '', '', '', '', ''
for i, number in enumerate(NH):
if number[-1] >= 'A' and number[-1] <= 'Z':
num_i = int(number[:-1])
else:
num_i = int(number)
if num_i < CHOTHIA_CDR['H']['1'][0]:
HFW1 += Amino['H'][sname][i]
elif num_i <= CHOTHIA_CDR['H']['1'][1]:
HCDR1+= Amino['H'][sname][i]
elif num_i < CHOTHIA_CDR['H']['2'][0]:
HFW2 += Amino['H'][sname][i]
elif num_i <= CHOTHIA_CDR['H']['2'][1]:
HCDR2 += Amino['H'][sname][i]
elif num_i < CHOTHIA_CDR['H']['3'][0]:
HFW3 += Amino['H'][sname][i]
elif num_i <= CHOTHIA_CDR['H']['3'][1]:
HCDR3 += Amino['H'][sname][i]
else:
HFW4 += Amino['H'][sname][i]
if IF_ONLY_HEAVY:
return ''.join(HFW1), ''.join(HCDR1), ''.join(HFW2), ''.join(HCDR2), ''.join(HFW3), ''.join(HCDR3), ''.join(
HFW4)
else:
NL = Num['L'][sname]
LFW1, LCDR1, LFW2, LCDR2, LFW3, LCDR3, LFW4 = '', '', '', '', '', '', ''
for i, number in enumerate(NL):
if number[-1] >= 'A' and number[-1] <= 'Z':
num_i = int(number[:-1])
else:
num_i = int(number)
if num_i < CHOTHIA_CDR['L']['1'][0]:
LFW1 += Amino['L'][sname][i]
elif num_i <= CHOTHIA_CDR['L']['1'][1]:
LCDR1 += Amino['L'][sname][i]
elif num_i < CHOTHIA_CDR['L']['2'][0]:
LFW2 += Amino['L'][sname][i]
elif num_i <= CHOTHIA_CDR['L']['2'][1]:
LCDR2 += Amino['L'][sname][i]
elif num_i < CHOTHIA_CDR['L']['3'][0]:
LFW3 += Amino['L'][sname][i]
elif num_i <= CHOTHIA_CDR['L']['3'][1]:
LCDR3 += Amino['L'][sname][i]
else:
LFW4 += Amino['L'][sname][i]
return ''.join(LFW1), ''.join(LCDR1), ''.join(LFW2), ''.join(LCDR2), ''.join(LFW3), ''.join(LCDR3), ''.join(LFW4),\
''.join(HFW1), ''.join(HCDR1), ''.join(HFW2), ''.join(HCDR2), ''.join(HFW3), ''.join(HCDR3), ''.join(HFW4)
with open('../results/'+SET_NAME +'_Sequence_region.csv','w') as fi:
if IF_ONLY_HEAVY:
fi.write(
'sequence id, heavy chain FW1, heavy chain CDR1, heavy chain FW2, heavy chain CDR2, heavy chain FW3, heavy chain CDR3, heavy chain FW4\n')
else:
fi.write('sequence id, light chain FW1, light chain CDR1, light chain FW2, light chain CDR2, light chain FW3, light chain CDR3, light chain FW4, '+
'heavy chain FW1, heavy chain CDR1, heavy chain FW2, heavy chain CDR2, heavy chain FW3, heavy chain CDR3, heavy chain FW4\n')
for sname in seq_id:
fi.write(sname + ',' + ','.join(getSequenceHL_region(sname)) + '\n')
def feature_distribution():
from collections import Counter
write_out = [[] for i in range(len(seq_id))]
for fi in range(1,12):
feat = []
for item in write_out:
feat.append(item[fi])
feat_count = Counter(feat)
sorted_count = sorted(feat_count.items(), key=lambda kv: kv[1], reverse=True)
if fi==11:
feat_type = sorted_count[0][0].split('_')[0]
else:
feat_type = sorted_count[0][0].split('_')[0] + sorted_count[0][0].split('_')[1]
with open('./Features_distribution_'+feat_type+'.csv','w') as fi:
for i in range(len(sorted_count)):
fi.write(sorted_count[i][0]+','+str(sorted_count[i][1])+'\n')
def feature():
write_out = [[] for i in range(len(seq_id))]
for i in range(len(seq_id)):
write_out[i].append(seq_id[i])
for idx, f in enumerate(AllFeatureVectors[i]):
if f == 1:
write_out[i].append(AllFeatureNames[idx])
with open('../results/'+SET_NAME +'_Features.csv', 'w') as fi:
fi.write('sequence id, ')
if not IF_ONLY_HEAVY:
fi.write('light chain V region, light chain J region, ')
fi.write('heavy chain V region, heavy chain J region, ')
if not IF_ONLY_HEAVY:
fi.write('Canonical L1, Canonical L2, Canonical L3, ')
fi.write('Canonical H1, Canonical H2, Canonical H3, ' )
fi.write('PI, frequent positional motif\n')
for i in range(len(write_out)):
fi.write(','.join(write_out[i]) + '\n')
def correlation_feature():
###### plot correlation matrix
data = pd.DataFrame(AllFeatureVectors, columns=AllFeatureNames)
# print(AllFeatureVectors.shape)
corr = data.corr()
import numpy as np
corr = np.array(corr)
with open('../results/Pearson_feature_correlation.csv', 'w') as fi:
fi.write('Feature value 1, Feature value 2, Pearson coefficient\n')
for i in range(len(AllFeatureNames)):
for j in range(i+1, len(AllFeatureNames)):
# if str(corr[i][j])=='nan':
# print('nan', AllFeatureNames[i], AllFeatureNames[j])
fi.write(AllFeatureNames[i]+ ','+AllFeatureNames[j]+','+ str(corr[i][j])+'\n')
# data.to_csv(r'../results/Feature_test.csv', header=True)
# fig = plt.figure(figsize=(100, 70))
# ax = fig.add_subplot(111)
# cax = ax.matshow(corr, cmap='seismic', vmin=-1, vmax =1)
# fig.colorbar(cax)
# ticks = np.arange(0, len(data.columns),1)
# ax.set_xticks(ticks)
# plt.xticks(rotation=90)
# ax.set_yticks(ticks)
# ax.set_xticklabels(data.columns)
# ax.set_yticklabels(data.columns)
# plt.savefig('../results/feature_correlation.png')
# corr = pd.DataFrame(corr, index=AllFeatureNames, columns=AllFeatureNames)
###### display pairwise correlation value
# au_corr = corr.where(np.triu(np.ones(corr.shape), k=1).astype(np.bool))
# au_corr = au_corr.stack().sort_values(ascending=False)
# au_corr = corr.unstack()
# au_corr.columns = [' 1', 'Feature 2', 'Pearson Correlation Value']
# au_corr = pd.DataFrame(au_corr.values, columns = ['Feature 1, Feature 2, Pearson Correlation Value'])
# au_corr.to_csv(r'../results/Pearson_feature_correlation.csv', header=True)
# print(len(au_corr))
# print(AllFeatureVectors[:, AllFeatureNames.index('Germ_LJ_IGKJ3*01')])
# print(AllFeatureVectors[:, AllFeatureNames.index('Canonical_L2_0')])
# def JaccardCoefficientAnalysis():
# df = pd.DataFrame(AllFeatureVectors, columns=AllFeatureNames)
#
# interest_feature=['Germ_HV_IGHV3-23*01', 'Canonical_H2_6', 'Germ_HJ_IGHJ4*02', 'Germ_HJ_IGHJ6*01', 'Germ_LV_IGKV1D-39*01',
# 'Canonical_H2_5', 'Germ_HJ_IGHJ4*01']
# jac_sim = np.eye(len(AllFeatureNames))
# for i in range(len(AllFeatureNames)):
# for j in range(i+1, len(AllFeatureNames)):
# if AllFeatureNames[i].startswith('Motif') or AllFeatureNames[j].startswith('Motif'):
# continue
# a = AllFeatureVectors[:, i]
# b = AllFeatureVectors[:, j]
# aandb =0
# aorb = 0
# for k in range(len(a)):
# if a[k]==b[k] and a[k]==1:
# aandb +=1
# if a[k]==1 or b[k]==1:
# aorb +=1
# if aorb==0:
# jac_tmp=0
# else:
# jac_tmp = float(aandb)/aorb
# if AllFeatureNames[i] in interest_feature and AllFeatureNames[j] in interest_feature:
# print(AllFeatureNames[i], AllFeatureNames[j], jac_tmp)
#
# jac_sim[i][j]=jac_tmp
# jac_sim[j][i]=jac_tmp
#
#
# with open('../results/Jaccard_feature_coefficient.csv', 'w') as fi:
# fi.write('Feature value 1, Feature value 2, Jaccard coefficient\n')
# for i in range(len(AllFeatureNames)):
# for j in range(i+1, len(AllFeatureNames)):
# if AllFeatureNames[i].startswith('Motif') or AllFeatureNames[j].startswith('Motif'):
# continue
# fi.write(AllFeatureNames[i]+ ','+AllFeatureNames[j]+','+ str(jac_sim[i][j])+'\n')
#
#
# fig = plt.figure(figsize=(100, 70))
# ax = fig.add_subplot(111)
# cax = ax.matshow(jac_sim, cmap='Blues', vmin=0, vmax =1)
# fig.colorbar(cax)
# ticks = np.arange(0, len(df.columns),1)
# ax.set_xticks(ticks)
# plt.xticks(rotation=90)
# ax.set_yticks(ticks)
# ax.set_xticklabels(df.columns)
# ax.set_yticklabels(df.columns)
# plt.savefig('../results/feature_coefficient.png')
#
# # print(AllFeatureVectors[:,AllFeatureNames.index('Germ_LJ_IGKJ3*01')])
# # print(AllFeatureVectors[:,AllFeatureNames.index('Canonical_L2_0*01')])
# # where(np.triu(np.ones(jac_sim.shape), k=1).astype(np.bool))
# # au_jac = jac_sim.where(np.triu(np.ones(jac_sim.shape), k=0).astype(np.bool))
# # au_jac = au_jac.stack().sort_values(ascending=False)
# # au_jac = jac_sim.unstack()
# # print(len(au_jac))
# # au_jac.to_csv(r'../results/Jaccard_feature_coefficient.csv', header=True)
def JaccardCoefficientAnalysis():
PDB_size = DatasetSize[0]
jac_sim_PDB = np.eye(len(AllFeatureNames))
for i in range(len(AllFeatureNames)):
for j in range(i+1, len(AllFeatureNames)):
if AllFeatureNames[i].startswith('Motif') or AllFeatureNames[j].startswith('Motif'):
continue
a = AllFeatureVectors[:PDB_size, i]
b = AllFeatureVectors[:PDB_size, j]
aandb =0
aorb = 0
for k in range(len(a)):
if a[k]==b[k] and a[k]==1:
aandb +=1
if a[k]==1 or b[k]==1:
aorb +=1
if aorb==0:
jac_tmp=0
else:
jac_tmp = float(aandb)/aorb
# if AllFeatureNames[i] == 'Germ_HV_IGHV3-23*01' and AllFeatureNames[j] =='Canonical_H2_6':
# print(a, b, jac_tmp)
# if AllFeatureNames[i] in interest_feature and AllFeatureNames[j] in interest_feature:
# print(AllFeatureNames[i], AllFeatureNames[j], jac_tmp)
jac_sim_PDB[i][j]=jac_tmp
jac_sim_PDB[j][i]=jac_tmp
jac_sim_MMP = np.eye(len(AllFeatureNames))
for i in range(len(AllFeatureNames)):
for j in range(i+1, len(AllFeatureNames)):
if AllFeatureNames[i].startswith('Motif') or AllFeatureNames[j].startswith('Motif'):
continue
a = AllFeatureVectors[PDB_size:, i]
b = AllFeatureVectors[PDB_size:, j]
aandb =0
aorb = 0
for k in range(len(a)):
if a[k]==b[k] and a[k]==1:
aandb +=1
if a[k]==1 or b[k]==1:
aorb +=1
if aorb==0:
jac_tmp=0
else:
jac_tmp = float(aandb)/aorb
# if AllFeatureNames[i] in interest_feature and AllFeatureNames[j] in interest_feature:
# print(AllFeatureNames[i], AllFeatureNames[j], jac_tmp)
jac_sim_MMP[i][j]=jac_tmp
jac_sim_MMP[j][i]=jac_tmp
with open('../results/'+SET_NAME+'_Jaccard Feature Coefficient.csv', 'w') as fi:
fi.write('Feature value 1, Feature value 2, Jaccard coefficient for reference set, Jaccard coefficient for MMP-targeting set\n')
for i in range(len(AllFeatureNames)):
for j in range(i+1, len(AllFeatureNames)):
if AllFeatureNames[i].startswith('Motif') or AllFeatureNames[j].startswith('Motif'):
continue
fi.write(AllFeatureNames[i]+ ','+AllFeatureNames[j]+','+ str(jac_sim_PDB[i][j])+','+ str(jac_sim_MMP[i][j])+'\n')
if __name__=='__main__':
sequence_raw()
sequence_region()
OneHotGerm, GermFeatureNames = extract.GetOneHotGerm(Germ, DatasetSize, DatasetName)
OneHotCanon, CanonFeatureNames = extract.GetOneHotCanon(canonical_direct, Amino, Num, DatasetSize, DatasetName)
CDRH3 = extract.GetCDRH3(Amino, Num)
OneHotPI, PIFeatureNames = extract.GetOneHotPI(CDRH3, DatasetSize, DatasetName)
MultiHotMotif, MotifFeatureNames = extract.MultiHotMotif(CDRH3, DatasetSize, DatasetName)
AllFeatureVectors, AllFeatureNames, _, _ = extract.GetFeatureVectors(OneHotGerm, GermFeatureNames, OneHotCanon, CanonFeatureNames, OneHotPI, PIFeatureNames, MultiHotMotif, MotifFeatureNames)
feature()
# correlation_feature()
JaccardCoefficientAnalysis()
|
[
"pandas.DataFrame",
"ASAP.FeatureExtraction.MultiHotMotif",
"ASAP.FeatureExtraction.GetOneHotGerm",
"ASAP.FeatureExtraction.GetOneHotCanon",
"ASAP.FeatureExtraction.GetFeatureVectors",
"numpy.array",
"ASAP.FeatureExtraction.GetCDRH3",
"collections.Counter",
"ASAP.FeatureExtraction.GetOneHotPI",
"ASAP.FeatureExtraction.ReadAminoNumGerm"
] |
[((728, 788), 'ASAP.FeatureExtraction.ReadAminoNumGerm', 'extract.ReadAminoNumGerm', (['targeting_direct', 'reference_direct'], {}), '(targeting_direct, reference_direct)\n', (752, 788), True, 'import ASAP.FeatureExtraction as extract\n'), ((7133, 7189), 'pandas.DataFrame', 'pd.DataFrame', (['AllFeatureVectors'], {'columns': 'AllFeatureNames'}), '(AllFeatureVectors, columns=AllFeatureNames)\n', (7145, 7189), True, 'import pandas as pd\n'), ((7284, 7298), 'numpy.array', 'np.array', (['corr'], {}), '(corr)\n', (7292, 7298), True, 'import numpy as np\n'), ((14457, 14510), 'ASAP.FeatureExtraction.GetOneHotGerm', 'extract.GetOneHotGerm', (['Germ', 'DatasetSize', 'DatasetName'], {}), '(Germ, DatasetSize, DatasetName)\n', (14478, 14510), True, 'import ASAP.FeatureExtraction as extract\n'), ((14548, 14626), 'ASAP.FeatureExtraction.GetOneHotCanon', 'extract.GetOneHotCanon', (['canonical_direct', 'Amino', 'Num', 'DatasetSize', 'DatasetName'], {}), '(canonical_direct, Amino, Num, DatasetSize, DatasetName)\n', (14570, 14626), True, 'import ASAP.FeatureExtraction as extract\n'), ((14639, 14667), 'ASAP.FeatureExtraction.GetCDRH3', 'extract.GetCDRH3', (['Amino', 'Num'], {}), '(Amino, Num)\n', (14655, 14667), True, 'import ASAP.FeatureExtraction as extract\n'), ((14699, 14751), 'ASAP.FeatureExtraction.GetOneHotPI', 'extract.GetOneHotPI', (['CDRH3', 'DatasetSize', 'DatasetName'], {}), '(CDRH3, DatasetSize, DatasetName)\n', (14718, 14751), True, 'import ASAP.FeatureExtraction as extract\n'), ((14791, 14845), 'ASAP.FeatureExtraction.MultiHotMotif', 'extract.MultiHotMotif', (['CDRH3', 'DatasetSize', 'DatasetName'], {}), '(CDRH3, DatasetSize, DatasetName)\n', (14812, 14845), True, 'import ASAP.FeatureExtraction as extract\n'), ((14893, 15048), 'ASAP.FeatureExtraction.GetFeatureVectors', 'extract.GetFeatureVectors', (['OneHotGerm', 'GermFeatureNames', 'OneHotCanon', 'CanonFeatureNames', 'OneHotPI', 'PIFeatureNames', 'MultiHotMotif', 'MotifFeatureNames'], {}), '(OneHotGerm, GermFeatureNames, OneHotCanon,\n CanonFeatureNames, OneHotPI, PIFeatureNames, MultiHotMotif,\n MotifFeatureNames)\n', (14918, 15048), True, 'import ASAP.FeatureExtraction as extract\n'), ((5731, 5744), 'collections.Counter', 'Counter', (['feat'], {}), '(feat)\n', (5738, 5744), False, 'from collections import Counter\n')]
|
import numpy as np
import tensorflow as tf
from rl.losses import QLearningLoss
from rl.algorithms import OnlineRLAlgorithm
from rl.runner import *
from rl.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
from rl import util
from deeplearning.layers import Adam, RunningNorm
from deeplearning.schedules import LinearSchedule
from deeplearning import logger
from collections import deque
import time
class QLearning(OnlineRLAlgorithm):
def defaults(self):
return {
'lr': 1e-4,
'momentum': 0.9,
'beta2': 0.999,
'clip_norm': 10.,
'gamma': 0.99,
'learning_starts': int(1e5),
'exploration_timesteps': int(1e6),
'final_eps': 0.02,
'target_update_freq': int(1e4),
'prioritized_replay': True,
'huber_loss': True,
'buffer_size': int(1e6),
'replay_alpha': 0.6,
'replay_beta': 0.4,
't_beta_max': int(1e7)
}
def __init__(self,
logdir,
env_fn,
model_fn,
nenv,
rollout_length=1,
batch_size=32,
callback=None,
**kwargs
):
defaults = self.defaults()
for k in kwargs:
assert k in defaults, "Unknown argument: {}".format(k)
defaults.update(kwargs)
super().__init__(logdir, env_fn, model_fn, nenv, rollout_length, batch_size, callback, runner_flags=[], **defaults)
self.target_sync = tf.group([tf.assign(v1,v2) for v1,v2 in zip(self.loss.qtarg.variables(), self.loss.qvals.variables())])
if self.args.prioritized_replay:
self.buffer = PrioritizedReplayBuffer(self.args.buffer_size, alpha=self.args.replay_alpha)
else:
self.buffer = ReplayBuffer(self.args.buffer_size)
# determine if the network has a RunningNorm submodule that needs to be updated.
submods = self.opt.find_submodules_by_instance(RunningNorm)
self.rn = submods[0] if len(submods) > 0 else None
self.losses = deque(maxlen=100)
self.nsteps = 0
self.last_target_sync = (self.t // self.args.target_update_freq) * self.args.target_update_freq
self.beta_schedule = LinearSchedule(self.args.t_beta_max, 1.0, self.args.replay_beta)
self.eps_schedule = LinearSchedule(int(self.args.exploration_timesteps), self.args.final_eps, 1.0)
self._time_start = time.time()
self._t_start = self.t
def _def_loss(self, model_fn, env):
target_network = model_fn(env)
target_network.build('target', self.nenv, self.batch_size, trainable=False)
# extra network for double dqn. Tie variables with network
return QLearningLoss('loss', model_fn(env), model_fn(env), target_network, gamma=self.args.gamma, use_huber_loss=self.args.huber_loss)
def _def_opt(self, loss):
return Adam(
'opt',
loss,
lr=self.args.lr,
beta1=self.args.momentum,
beta2=self.args.beta2,
clip_norm=self.args.clip_norm
)
def _before_step(self):
if self.t == 0 or self.t - self.last_target_sync > self.args.target_update_freq:
self.target_sync.run()
self.last_target_sync = self.t
self.actor.update_eps(self.eps_schedule.value(self.t))
def _process_rollout(self, rollout):
self._update_buffer(rollout)
while len(self.buffer) < self.args.learning_starts and len(self.buffer) != self.args.buffer_size:
self._update_buffer(self.runner.rollout())
self.t += self.timesteps_per_step
if self.args.prioritized_replay:
obs, acs, rews, next_obs, dones, weights, self._inds = self.buffer.sample(self.nenv * self.batch_size, self.beta_schedule.value(self.t))
inputs=[obs, next_obs, next_obs, rews, acs, dones, weights[...,None]]
else:
obs, acs, rews, next_obs, dones = self.buffer.sample(self.nenv * self.batch_size)
inputs=[obs, next_obs, next_obs, rews, acs, dones]
return inputs
def _update_buffer(self, rollout):
if self.rn is not None:
x = np.asarray(rollout.obs)
self._update_running_norm(x.reshape([-1] + list(x.shape[2:])))
for i,obs in enumerate(rollout.obs):
next_obs = rollout.end_ob if i == len(rollout.obs) - 1 else rollout.obs[i+1]
for j in range(self.nenv):
ob = obs[j]
next_ob = next_obs[j]
ac = rollout.actions[i][j]
r = rollout.rewards[i][j]
done = rollout.dones[i][j]
self.buffer.add(ob, ac, r, next_ob, done)
def _update_model(self, data):
outs = self.opt.run(inputs=data, state=[], state_out=False, update=True, td=True)
if self.args.prioritized_replay:
self.buffer.update_priorities(self._inds, priorities=np.abs(outs['td'][:,0]) + 1e-6)
self.losses.append(outs['out'])
return outs
def _after_step(self, rollout, data, outs):
self.nsteps += 1
if self.nsteps % 100 == 0:
logger.log("========================| Timestep: {} |========================".format(self.t))
meanloss = np.mean(np.array(self.losses), axis=0)
# Logging stats...
logger.logkv('Loss', meanloss)
logger.logkv('timesteps', self.t)
logger.logkv('serial timesteps', self.t / self.nenv)
logger.logkv('mean episode length', np.mean(self.runner.get_episode_lengths()))
logger.logkv('mean episode reward', np.mean(self.runner.get_episode_rewards()))
logger.logkv('fps', int((self.t - self._t_start) / (time.time() - self._time_start)))
logger.logkv('time_elapsed', time.time() - self._time_start)
logger.logkv('time spent exploring', self.actor.eps)
logger.dumpkvs()
def _update_running_norm(self, x):
mean = x.mean(axis=0)
var = x.var(axis=0)
count = x.shape[0]
self.rn.update(mean, var, count)
def update_lr(self, new_lr):
self.opt.update_lr(new_lr)
|
[
"numpy.abs",
"numpy.asarray",
"deeplearning.logger.dumpkvs",
"deeplearning.schedules.LinearSchedule",
"time.time",
"deeplearning.logger.logkv",
"deeplearning.layers.Adam",
"tensorflow.assign",
"numpy.array",
"rl.replay_buffer.PrioritizedReplayBuffer",
"rl.replay_buffer.ReplayBuffer",
"collections.deque"
] |
[((2065, 2082), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (2070, 2082), False, 'from collections import deque\n'), ((2240, 2304), 'deeplearning.schedules.LinearSchedule', 'LinearSchedule', (['self.args.t_beta_max', '(1.0)', 'self.args.replay_beta'], {}), '(self.args.t_beta_max, 1.0, self.args.replay_beta)\n', (2254, 2304), False, 'from deeplearning.schedules import LinearSchedule\n'), ((2439, 2450), 'time.time', 'time.time', ([], {}), '()\n', (2448, 2450), False, 'import time\n'), ((2902, 3021), 'deeplearning.layers.Adam', 'Adam', (['"""opt"""', 'loss'], {'lr': 'self.args.lr', 'beta1': 'self.args.momentum', 'beta2': 'self.args.beta2', 'clip_norm': 'self.args.clip_norm'}), "('opt', loss, lr=self.args.lr, beta1=self.args.momentum, beta2=self.\n args.beta2, clip_norm=self.args.clip_norm)\n", (2906, 3021), False, 'from deeplearning.layers import Adam, RunningNorm\n'), ((1673, 1749), 'rl.replay_buffer.PrioritizedReplayBuffer', 'PrioritizedReplayBuffer', (['self.args.buffer_size'], {'alpha': 'self.args.replay_alpha'}), '(self.args.buffer_size, alpha=self.args.replay_alpha)\n', (1696, 1749), False, 'from rl.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer\n'), ((1790, 1825), 'rl.replay_buffer.ReplayBuffer', 'ReplayBuffer', (['self.args.buffer_size'], {}), '(self.args.buffer_size)\n', (1802, 1825), False, 'from rl.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer\n'), ((4222, 4245), 'numpy.asarray', 'np.asarray', (['rollout.obs'], {}), '(rollout.obs)\n', (4232, 4245), True, 'import numpy as np\n'), ((5392, 5422), 'deeplearning.logger.logkv', 'logger.logkv', (['"""Loss"""', 'meanloss'], {}), "('Loss', meanloss)\n", (5404, 5422), False, 'from deeplearning import logger\n'), ((5435, 5468), 'deeplearning.logger.logkv', 'logger.logkv', (['"""timesteps"""', 'self.t'], {}), "('timesteps', self.t)\n", (5447, 5468), False, 'from deeplearning import logger\n'), ((5481, 5533), 'deeplearning.logger.logkv', 'logger.logkv', (['"""serial timesteps"""', '(self.t / self.nenv)'], {}), "('serial timesteps', self.t / self.nenv)\n", (5493, 5533), False, 'from deeplearning import logger\n'), ((5901, 5953), 'deeplearning.logger.logkv', 'logger.logkv', (['"""time spent exploring"""', 'self.actor.eps'], {}), "('time spent exploring', self.actor.eps)\n", (5913, 5953), False, 'from deeplearning import logger\n'), ((5966, 5982), 'deeplearning.logger.dumpkvs', 'logger.dumpkvs', ([], {}), '()\n', (5980, 5982), False, 'from deeplearning import logger\n'), ((1512, 1529), 'tensorflow.assign', 'tf.assign', (['v1', 'v2'], {}), '(v1, v2)\n', (1521, 1529), True, 'import tensorflow as tf\n'), ((5318, 5339), 'numpy.array', 'np.array', (['self.losses'], {}), '(self.losses)\n', (5326, 5339), True, 'import numpy as np\n'), ((5857, 5868), 'time.time', 'time.time', ([], {}), '()\n', (5866, 5868), False, 'import time\n'), ((4978, 5002), 'numpy.abs', 'np.abs', (["outs['td'][:, 0]"], {}), "(outs['td'][:, 0])\n", (4984, 5002), True, 'import numpy as np\n'), ((5782, 5793), 'time.time', 'time.time', ([], {}), '()\n', (5791, 5793), False, 'import time\n')]
|
import gi
import numpy.testing
import pint
import pyRestTable
import pytest
gi.require_version("Hkl", "5.0")
# NOTE: MUST call gi.require_version() BEFORE import hkl
from hkl.calc import A_KEV
from hkl.diffract import Constraint
from hkl import SimulatedE4CV
class Fourc(SimulatedE4CV):
...
@pytest.fixture(scope="function")
def fourc():
fourc = Fourc("", name="fourc")
fourc.wait_for_connection()
fourc._update_calc_energy()
return fourc
def test_calc_energy_permit(fourc):
assert fourc._calc_energy_update_permitted
fourc.energy_update_calc_flag.put(False)
assert not fourc._calc_energy_update_permitted
nrg = fourc.calc.energy
fourc.energy.put(5.989) # BTW: Cr K absorption edge
numpy.testing.assert_almost_equal(fourc.energy.get(), 5.989)
numpy.testing.assert_almost_equal(fourc.calc.energy, nrg)
fourc._energy_changed()
numpy.testing.assert_almost_equal(fourc.calc.energy, nrg)
fourc._energy_changed(fourc.energy.get())
numpy.testing.assert_almost_equal(fourc.calc.energy, nrg)
fourc._energy_changed(5.989)
numpy.testing.assert_almost_equal(fourc.calc.energy, nrg)
fourc._update_calc_energy()
numpy.testing.assert_almost_equal(fourc.calc.energy, 5.989)
# test that value argument is ignored
fourc._update_calc_energy(A_KEV / 1)
numpy.testing.assert_almost_equal(fourc.calc.energy, 5.989)
def test_energy(fourc):
numpy.testing.assert_almost_equal(fourc.energy.get(), fourc.calc.energy)
for nrg in (8.0, 8.04, 9.0, 0.931):
fourc.energy.put(nrg)
numpy.testing.assert_almost_equal(fourc.energy.get(), nrg)
numpy.testing.assert_almost_equal(fourc.calc.energy, nrg)
numpy.testing.assert_almost_equal(fourc.calc.wavelength, A_KEV / nrg)
def test_energy_offset(fourc):
assert fourc.energy_offset.get() == 0
nrg = 8.0
fourc.energy.put(nrg)
numpy.testing.assert_almost_equal(fourc.energy.get(), nrg)
numpy.testing.assert_almost_equal(fourc.energy.get(), fourc.calc.energy)
for offset in (0.05, -0.1):
fourc.energy_offset.put(offset)
fourc.energy.put(nrg)
numpy.testing.assert_almost_equal(fourc.energy.get(), nrg)
numpy.testing.assert_almost_equal(fourc.energy.get() + offset, fourc.calc.energy)
def test_energy_offset_units(fourc):
assert fourc.energy_offset.get() == 0
assert fourc.energy_units.get() == "keV"
fourc.energy_units.put("eV")
assert fourc.energy_units.get() == "eV"
nrg = 931
fourc.energy.put(nrg)
numpy.testing.assert_almost_equal(fourc.energy.get(), nrg)
numpy.testing.assert_almost_equal(fourc.energy.get() / 1000, fourc.calc.energy)
for offset in (5, -6):
fourc.energy_offset.put(offset)
fourc.energy.put(nrg)
numpy.testing.assert_almost_equal(fourc.energy.get(), nrg)
numpy.testing.assert_almost_equal((fourc.energy.get() + offset) / 1000, fourc.calc.energy)
def test_energy_units_931eV(fourc):
assert fourc.energy_units.get() == "keV"
fourc.energy_units.put("eV")
assert fourc.energy_units.get() == "eV"
eV = 931
fourc.energy.put(eV)
numpy.testing.assert_almost_equal(fourc.energy.get(), eV)
numpy.testing.assert_almost_equal(fourc.calc.energy, eV / 1000)
def test_energy_units_issue79(fourc):
# issue #79
fourc.energy_units.put("eV")
fourc.energy_offset.put(0)
eV = 1746
fourc.energy.put(eV)
numpy.testing.assert_almost_equal(fourc.calc.energy, eV / 1000)
numpy.testing.assert_almost_equal(
# fmt: off
pint.Quantity(fourc.calc.energy, "keV").to(fourc.energy_units.get()).magnitude,
fourc.energy.get(),
# fmt: on
)
def test_energy_units_offset(fourc):
fourc.energy_units.put("keV")
fourc.energy.put(7.985)
fourc.energy_offset.put(0.015)
assert fourc.calc.energy == 8.0
assert round(fourc.energy.get(), 6) == 7.985
fourc.energy.put(8)
assert fourc.calc.energy == 8.015
assert round(fourc.energy.get(), 6) == 8
fourc.energy_offset.put(0.0)
assert fourc.calc.energy == 8.0
def test_energy_units_issue86(fourc):
# issue #86
# changing units or offset changes .energy, not .calc.energy
fourc.energy.put(8)
fourc.energy_offset.put(0.015)
fourc.energy_units.put("eV")
# test interim state when fourc.energy value has not changed but units have
assert round(fourc.calc.energy, 6) == 8.015e-3
assert round(fourc.energy.get(), 1) == 8
fourc.energy.put(8000)
assert round(fourc.calc.energy, 8) == 8.000015
assert round(fourc.energy.get(), 1) == 8000
fourc.energy_offset.put(15)
assert round(fourc.calc.energy, 8) == 8.015
assert round(fourc.energy.get(), 1) == 8000
fourc.energy.put(8000)
assert round(fourc.calc.energy, 8) == 8.015
assert round(fourc.energy.get(), 1) == 8000
def test_names(fourc):
assert fourc.geometry_name.get() == "E4CV"
assert fourc.class_name.get() == "Fourc"
def test_forward_solutions_table(fourc):
fourc.energy.put(A_KEV / 1.54)
# (100) has chi ~ 0 which poses occasional roundoff errors
# (sometimes -0.00000, sometimes 0.00000)
sol = fourc.forward(1, 0, 0)
assert pytest.approx(sol.omega, 1e-5) == -30
assert pytest.approx(sol.chi, 1e-5) == 0
assert pytest.approx(sol.phi, 1e-5) == -90
assert pytest.approx(sol.tth, 1e-5) == -60
fourc.apply_constraints({"tth": Constraint(0, 180, 0, True)})
tbl = fourc.forward_solutions_table(
# fmt: off
[
[1, 1, 0],
[1, 1, 1],
[100, 1, 1], # no solutions
]
# fmt: on
)
received = str(tbl).splitlines()
expected = [
"=========== ======== ===== ======== ==== =====",
"(hkl) solution omega chi phi tth ",
"=========== ======== ===== ======== ==== =====",
"[1, 1, 0] 0 45.0 45.0 90.0 90.0 ",
"[1, 1, 1] 0 60.0 35.26439 45.0 120.0",
"[100, 1, 1] none ",
"=========== ======== ===== ======== ==== =====",
]
for r, e in zip(received, expected):
assert r == e
def test_pa(fourc, capsys):
tbl = fourc.pa()
assert isinstance(tbl, pyRestTable.Table)
out, err = capsys.readouterr()
assert len(out) > 0
assert err == ""
out = [v.rstrip() for v in out.strip().splitlines()]
expected = [
"===================== ====================================================================",
"term value",
"===================== ====================================================================",
"diffractometer fourc",
"geometry E4CV",
"class Fourc",
"energy (keV) 8.00000",
"wavelength (angstrom) 1.54980",
"calc engine hkl",
"mode bissector",
"positions ===== =======",
" name value",
" ===== =======",
" omega 0.00000",
" chi 0.00000",
" phi 0.00000",
" tth 0.00000",
" ===== =======",
"constraints ===== ========= ========== ===== ====",
" axis low_limit high_limit value fit",
" ===== ========= ========== ===== ====",
" omega -180.0 180.0 0.0 True",
" chi -180.0 180.0 0.0 True",
" phi -180.0 180.0 0.0 True",
" tth -180.0 180.0 0.0 True",
" ===== ========= ========== ===== ====",
"sample: main ================ ===================================================",
" term value",
" ================ ===================================================",
" unit cell edges a=1.54, b=1.54, c=1.54",
" unit cell angles alpha=90.0, beta=90.0, gamma=90.0",
" [U] [[1. 0. 0.]",
" [0. 1. 0.]",
" [0. 0. 1.]]",
" [UB] [[ 4.07999046e+00 -2.49827363e-16 -2.49827363e-16]",
" [ 0.00000000e+00 4.07999046e+00 -2.49827363e-16]",
" [ 0.00000000e+00 0.00000000e+00 4.07999046e+00]]",
" ================ ===================================================",
"===================== ====================================================================",
]
assert len(out) == len(expected)
assert out == expected
def test_wh(fourc, capsys):
tbl = fourc.wh()
assert isinstance(tbl, pyRestTable.Table)
out, err = capsys.readouterr()
assert len(out) > 0
assert err == ""
out = [v.rstrip() for v in out.strip().splitlines()]
expected = [
"===================== ========= =========",
"term value axis_type",
"===================== ========= =========",
"diffractometer fourc",
"sample name main",
"energy (keV) 8.00000",
"wavelength (angstrom) 1.54980",
"calc engine hkl",
"mode bissector",
"h 0.0 pseudo",
"k 0.0 pseudo",
"l 0.0 pseudo",
"omega 0 real",
"chi 0 real",
"phi 0 real",
"tth 0 real",
"===================== ========= =========",
]
assert len(out) == len(expected)
assert out == expected
def test_show_constraints(fourc, capsys):
fourc.show_constraints()
out, err = capsys.readouterr()
assert len(out) > 0
assert err == ""
out = [v.rstrip() for v in out.strip().splitlines()]
expected = [
"===== ========= ========== ===== ====",
"axis low_limit high_limit value fit",
"===== ========= ========== ===== ====",
"omega -180.0 180.0 0.0 True",
"chi -180.0 180.0 0.0 True",
"phi -180.0 180.0 0.0 True",
"tth -180.0 180.0 0.0 True",
"===== ========= ========== ===== ====",
]
for r, e in zip(out, expected):
assert r.rstrip() == e.rstrip()
def test_apply_constraints(fourc):
fourc.energy.put(A_KEV / 1.54)
# fmt: off
fourc.apply_constraints(
{
"tth": Constraint(0, 180, 0, True),
"chi": Constraint(0, 180, 0, True),
}
)
# fmt: on
sol = fourc.forward(1, 0, 0)
assert pytest.approx(sol.omega, 1e-5) == 30
assert pytest.approx(sol.chi, 1e-5) == 0
assert pytest.approx(sol.phi, 1e-5) == 90
assert pytest.approx(sol.tth, 1e-5) == 60
def test_specify_engine():
import hkl
import numpy as np
from ophyd import Component as Cpt
from ophyd import PseudoSingle
from ophyd import SoftPositioner
class Q4C(hkl.E4CV):
q = Cpt(PseudoSingle, "")
omega = Cpt(SoftPositioner, limits=(-180, 180), init_pos=0)
chi = Cpt(SoftPositioner, limits=(-180, 180), init_pos=0)
phi = Cpt(SoftPositioner, limits=(-180, 180), init_pos=0)
tth = Cpt(SoftPositioner, limits=(-180, 180), init_pos=0)
q4c = Q4C("", name="q4c")
assert q4c.calc.engine.name == "hkl"
q4c = Q4C("", name="q4c", engine="q")
assert q4c.calc.engine.name == "q"
q = 1.0
angle = 2 * np.arcsin(q * q4c.calc.wavelength / 4 / np.pi) * 180 / np.pi
value = q4c.forward(q)
assert round(value.tth, 5) == round(angle, 5)
assert value.omega == 0.0
assert value.chi == 0.0
assert value.phi == 0.0
|
[
"gi.require_version",
"pytest.fixture",
"numpy.arcsin",
"ophyd.Component",
"hkl.diffract.Constraint",
"pytest.approx",
"pint.Quantity"
] |
[((77, 109), 'gi.require_version', 'gi.require_version', (['"""Hkl"""', '"""5.0"""'], {}), "('Hkl', '5.0')\n", (95, 109), False, 'import gi\n'), ((301, 333), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (315, 333), False, 'import pytest\n'), ((5226, 5257), 'pytest.approx', 'pytest.approx', (['sol.omega', '(1e-05)'], {}), '(sol.omega, 1e-05)\n', (5239, 5257), False, 'import pytest\n'), ((5275, 5304), 'pytest.approx', 'pytest.approx', (['sol.chi', '(1e-05)'], {}), '(sol.chi, 1e-05)\n', (5288, 5304), False, 'import pytest\n'), ((5320, 5349), 'pytest.approx', 'pytest.approx', (['sol.phi', '(1e-05)'], {}), '(sol.phi, 1e-05)\n', (5333, 5349), False, 'import pytest\n'), ((5367, 5396), 'pytest.approx', 'pytest.approx', (['sol.tth', '(1e-05)'], {}), '(sol.tth, 1e-05)\n', (5380, 5396), False, 'import pytest\n'), ((11196, 11227), 'pytest.approx', 'pytest.approx', (['sol.omega', '(1e-05)'], {}), '(sol.omega, 1e-05)\n', (11209, 11227), False, 'import pytest\n'), ((11244, 11273), 'pytest.approx', 'pytest.approx', (['sol.chi', '(1e-05)'], {}), '(sol.chi, 1e-05)\n', (11257, 11273), False, 'import pytest\n'), ((11289, 11318), 'pytest.approx', 'pytest.approx', (['sol.phi', '(1e-05)'], {}), '(sol.phi, 1e-05)\n', (11302, 11318), False, 'import pytest\n'), ((11335, 11364), 'pytest.approx', 'pytest.approx', (['sol.tth', '(1e-05)'], {}), '(sol.tth, 1e-05)\n', (11348, 11364), False, 'import pytest\n'), ((11586, 11607), 'ophyd.Component', 'Cpt', (['PseudoSingle', '""""""'], {}), "(PseudoSingle, '')\n", (11589, 11607), True, 'from ophyd import Component as Cpt\n'), ((11624, 11675), 'ophyd.Component', 'Cpt', (['SoftPositioner'], {'limits': '(-180, 180)', 'init_pos': '(0)'}), '(SoftPositioner, limits=(-180, 180), init_pos=0)\n', (11627, 11675), True, 'from ophyd import Component as Cpt\n'), ((11690, 11741), 'ophyd.Component', 'Cpt', (['SoftPositioner'], {'limits': '(-180, 180)', 'init_pos': '(0)'}), '(SoftPositioner, limits=(-180, 180), init_pos=0)\n', (11693, 11741), True, 'from ophyd import Component as Cpt\n'), ((11756, 11807), 'ophyd.Component', 'Cpt', (['SoftPositioner'], {'limits': '(-180, 180)', 'init_pos': '(0)'}), '(SoftPositioner, limits=(-180, 180), init_pos=0)\n', (11759, 11807), True, 'from ophyd import Component as Cpt\n'), ((11822, 11873), 'ophyd.Component', 'Cpt', (['SoftPositioner'], {'limits': '(-180, 180)', 'init_pos': '(0)'}), '(SoftPositioner, limits=(-180, 180), init_pos=0)\n', (11825, 11873), True, 'from ophyd import Component as Cpt\n'), ((5440, 5467), 'hkl.diffract.Constraint', 'Constraint', (['(0)', '(180)', '(0)', '(True)'], {}), '(0, 180, 0, True)\n', (5450, 5467), False, 'from hkl.diffract import Constraint\n'), ((11045, 11072), 'hkl.diffract.Constraint', 'Constraint', (['(0)', '(180)', '(0)', '(True)'], {}), '(0, 180, 0, True)\n', (11055, 11072), False, 'from hkl.diffract import Constraint\n'), ((11093, 11120), 'hkl.diffract.Constraint', 'Constraint', (['(0)', '(180)', '(0)', '(True)'], {}), '(0, 180, 0, True)\n', (11103, 11120), False, 'from hkl.diffract import Constraint\n'), ((12056, 12102), 'numpy.arcsin', 'np.arcsin', (['(q * q4c.calc.wavelength / 4 / np.pi)'], {}), '(q * q4c.calc.wavelength / 4 / np.pi)\n', (12065, 12102), True, 'import numpy as np\n'), ((3577, 3616), 'pint.Quantity', 'pint.Quantity', (['fourc.calc.energy', '"""keV"""'], {}), "(fourc.calc.energy, 'keV')\n", (3590, 3616), False, 'import pint\n')]
|
from itertools import groupby
import numpy as np
def best_path(mat: np.ndarray, labels: str) -> str:
"""Best path (greedy) decoder.
Take best-scoring character per time-step, then remove repeated characters and CTC blank characters.
See dissertation of Graves, p63.
Args:
mat: Output of neural network of shape TxC.
labels: The set of characters the neural network can recognize, excluding the CTC-blank.
Returns:
The decoded text.
"""
# get char indices along best path
best_path_indices = np.argmax(mat, axis=1)
# collapse best path (using itertools.groupby), map to chars, join char list to string
blank_idx = len(labels)
best_chars_collapsed = [labels[k] for k, _ in groupby(best_path_indices) if k != blank_idx]
res = ''.join(best_chars_collapsed)
return res
|
[
"itertools.groupby",
"numpy.argmax"
] |
[((554, 576), 'numpy.argmax', 'np.argmax', (['mat'], {'axis': '(1)'}), '(mat, axis=1)\n', (563, 576), True, 'import numpy as np\n'), ((747, 773), 'itertools.groupby', 'groupby', (['best_path_indices'], {}), '(best_path_indices)\n', (754, 773), False, 'from itertools import groupby\n')]
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# use all cores
#import os
#os.system("taskset -p 0xff %d" % os.getpid())
pd.options.mode.chained_assignment = None # deactivating slicing warns
def load_seattle_speed_matrix():
""" Loads the whole Seattle `speed_matrix_2015` into memory.
Caution ~ 200 mb of data
:param:
:return df (pandas.DataFrame): speed matrix as DataFrame. Columns are sensors, rows are timestamps
"""
speed_matrix = './data/Seattle_Loop_Dataset/speed_matrix_2015'
print('Loading data...')
df = pd.read_pickle(speed_matrix)
df.index = pd.to_datetime(df.index, format='%Y-%m-%d %H:%M')
print('Load completed.')
return df
def best_moving_average(df, col, average_window_in_hours=27, from_date=None, to_date=None, plot=False):
""" Calculates the moving average in a window of `average_window_in_hours` hours and propagates
into the future.
Beware! This code uses data from the future to perform predictions.
Meaning it is meant to be used to generate the "perfect" moving average baseline.
:param df (pandas.DataFrame): dataset being used
:param col (str): column for which the moving average will be applied
:param average_window_in_hours (int): the window (in hours) used to generate predictions
:param from_date (str): initial date to be shown in the plot, format: "YYYY-MM-DD"
:param to_date (str): end date to be shown in the plot
:param plot (bool): plot moving average and original df
:return MAE, RMSE (tuple): Both metrics are calculated for the column `col`
"""
ndf = df[[col]]
window_size = average_window_in_hours*12
ndf['preds'] = ndf.rolling(window=window_size).mean().shift(1)
MAE = ndf.apply((lambda x: np.abs(x[0] - x[1])), axis=1).dropna().mean()
RMSE = np.sqrt(ndf.apply((lambda x: np.power(x[0] - x[1], 2)), axis=1).dropna().mean())
if plot:
if from_date is not None and to_date is not None:
ndf.resample('1h').mean().loc[from_date:to_date].plot(figsize=(12, 7))
else:
ndf.resample('1h').mean()[:500].plot(figsize=(12, 7))
plt.show()
return (MAE, RMSE)
def calculate_metrics(df, average_window_in_hours, verbose=5, save=True):
""" Calculates MAE and RMSE for all columns of `df`, taking a sliding window of `average_window_in_hours` hours.
:param df (panads.DataFrame): dataset being used
:param average_window_in_hours (int): the window (in hours) used to generate predictions
:param verbose (int): option to display the calculations on-the-fly.
Values are going to be displayed after `verbose` iterations.
:param save (bool):
:return mae_and_rmse (dict): dictionary containing (MAE, RMSE) for each column of `df`
"""
mae_and_rmse = {}
for (it, col) in enumerate(df.columns):
MAE, RMSE = best_moving_average(df, col, average_window_in_hours)
mae_and_rmse[col] = (MAE, RMSE)
if it%verbose == 0:
print('Column: {}, MAE: {}, RMSE: {}'.format(col, MAE, RMSE))
if save:
# TODO: add param to attribute filename and filedir
pd.DataFrame(mae_rmse, index=['MAE', 'RMSE']).to_csv('./experiment_results/seattle_best_moving_average_mae_rmse.csv')
return mae_and_rmse
def real_moving_average(df, col, sliding_window_in_hours, forecast_window_in_minutes):
""" Calculating the moving average using a sliding window of `sliding_window_in_hours`
on a forecast window of `forecast_window_in_minutes` over the dataset.
Returns a dataframe with the forecast for the given dataframe.
"""
sliding_window = 12*sliding_window_in_hours
forecast_window = ((forecast_window_in_minutes+5)//5)
X = df[col].values
Y = X[:sliding_window]
for i in range(forecast_window):
ypred = np.mean(Y[i: i+sliding_window])
Y = np.append(Y, ypred)
forecast_df = pd.DataFrame(
data=Y[len(Y)-forecast_window:],
index=df.index[sliding_window:sliding_window+forecast_window]
)
return forecast_df
# still need to compute MAE and RMSE for all data
def moving_average_forecast(df, col, sliding_window_in_hours, forecast_window_in_minutes):
""" Applies moving average forecast across all the dataset. Stride can be applied to make forecasting faster,
ie, stride makes the sliding window jump a window of `stride_in_minutes`.
Returns a pandas.DataFrame containing a side-by-side comparison of the real dataframe and its predictions,
for all predicted values.
"""
sliding_window = 12*sliding_window_in_hours
forecast_window = ((forecast_window_in_minutes+5)//5)
stride_in_minutes = 60
stride = (stride_in_minutes//5)
all_predictions = []
if stride_in_minutes == 0:
max_it = len(df)
else:
max_it = len(df)//stride
for i in range(max_it):
try:
smaller_df = df.iloc[i*stride: (sliding_window+forecast_window) + (i+1)*stride]
preds = real_moving_average(smaller_df, col, sliding_window_in_hours, forecast_window_in_minutes)
fdf = pd.concat([smaller_df[[col]].loc[preds.index[0]:preds.index[-1]],preds], axis=1)
fdf = fdf.rename(columns={0:col+'_pred'})
all_predictions.append(fdf)
except:
pass
return pd.concat(all_predictions, axis=0)
def metrics(preds_df):
""" Given a `preds_df` containing two columns, the first with real values and the second being preds,
returns MAE and RMSE
"""
preds = preds_df
MAE = np.mean(np.abs(preds[preds.columns[0]] - preds[preds.columns[1]] ))
RMSE = np.sqrt(np.mean(np.power(preds[preds.columns[0]] - preds[preds.columns[1]], 2)))
return (MAE, RMSE)
def main():
# this options should go into an argument parser
SLIDING_WINDOW_IN_HOURS = 4
FORECAST_WINDOW_IN_MINUTES = 15
STRIDE_IN_MINUTES = 60
df = load_seattle_speed_matrix()
metrics_dict = {}
for col in df.columns:
print(col)
preds = moving_average_forecast(df, col, SLIDING_WINDOW_IN_HOURS, FORECAST_WINDOW_IN_MINUTES)
mae_rmse = metrics(preds)
metrics_dict[col] = mae_rmse
pd.DataFrame(metrics_dict, index=['MAE', 'RMSE']).to_csv('./experiment_results/training_window_4_hour_forecast_window_15_min_mae_rmse_seattle.csv')
if __name__ == '__main__':
main()
|
[
"pandas.DataFrame",
"matplotlib.pyplot.show",
"numpy.abs",
"numpy.power",
"numpy.append",
"numpy.mean",
"pandas.to_datetime",
"pandas.read_pickle",
"pandas.concat"
] |
[((580, 608), 'pandas.read_pickle', 'pd.read_pickle', (['speed_matrix'], {}), '(speed_matrix)\n', (594, 608), True, 'import pandas as pd\n'), ((624, 673), 'pandas.to_datetime', 'pd.to_datetime', (['df.index'], {'format': '"""%Y-%m-%d %H:%M"""'}), "(df.index, format='%Y-%m-%d %H:%M')\n", (638, 673), True, 'import pandas as pd\n'), ((5405, 5439), 'pandas.concat', 'pd.concat', (['all_predictions'], {'axis': '(0)'}), '(all_predictions, axis=0)\n', (5414, 5439), True, 'import pandas as pd\n'), ((2169, 2179), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2177, 2179), True, 'import matplotlib.pyplot as plt\n'), ((3886, 3918), 'numpy.mean', 'np.mean', (['Y[i:i + sliding_window]'], {}), '(Y[i:i + sliding_window])\n', (3893, 3918), True, 'import numpy as np\n'), ((3930, 3949), 'numpy.append', 'np.append', (['Y', 'ypred'], {}), '(Y, ypred)\n', (3939, 3949), True, 'import numpy as np\n'), ((5644, 5701), 'numpy.abs', 'np.abs', (['(preds[preds.columns[0]] - preds[preds.columns[1]])'], {}), '(preds[preds.columns[0]] - preds[preds.columns[1]])\n', (5650, 5701), True, 'import numpy as np\n'), ((5186, 5271), 'pandas.concat', 'pd.concat', (['[smaller_df[[col]].loc[preds.index[0]:preds.index[-1]], preds]'], {'axis': '(1)'}), '([smaller_df[[col]].loc[preds.index[0]:preds.index[-1]], preds],\n axis=1)\n', (5195, 5271), True, 'import pandas as pd\n'), ((5731, 5793), 'numpy.power', 'np.power', (['(preds[preds.columns[0]] - preds[preds.columns[1]])', '(2)'], {}), '(preds[preds.columns[0]] - preds[preds.columns[1]], 2)\n', (5739, 5793), True, 'import numpy as np\n'), ((6277, 6326), 'pandas.DataFrame', 'pd.DataFrame', (['metrics_dict'], {'index': "['MAE', 'RMSE']"}), "(metrics_dict, index=['MAE', 'RMSE'])\n", (6289, 6326), True, 'import pandas as pd\n'), ((3192, 3237), 'pandas.DataFrame', 'pd.DataFrame', (['mae_rmse'], {'index': "['MAE', 'RMSE']"}), "(mae_rmse, index=['MAE', 'RMSE'])\n", (3204, 3237), True, 'import pandas as pd\n'), ((1789, 1808), 'numpy.abs', 'np.abs', (['(x[0] - x[1])'], {}), '(x[0] - x[1])\n', (1795, 1808), True, 'import numpy as np\n'), ((1875, 1899), 'numpy.power', 'np.power', (['(x[0] - x[1])', '(2)'], {}), '(x[0] - x[1], 2)\n', (1883, 1899), True, 'import numpy as np\n')]
|
""" This module contains a class that describes an object in the world. """
import numpy as np
class Object:
"""
Object is a simple wireframe composed of multiple points connected by
lines that can be drawn in the viewport.
"""
TOTAL_OBJECTS = -1
def __init__(self, points=None, name=None, color=None):
self._points = [] if points is None else points
self._name = self.default_name() if name is None else name
self._color = (0, 0, 0) if color is None else color
Object.TOTAL_OBJECTS += 1
@staticmethod
def default_name():
""" Default name for new objects. """
return "object{}".format(Object.TOTAL_OBJECTS + 1)
@property
def points(self):
""" The points in the wireframe. """
return self._points
@property
def name(self):
""" Name of the object. """
return self._name
@property
def color(self):
""" Color of the object. """
return self._color
@property
def center(self):
""" Center of the object. """
points = set()
for face in self._points:
points.update(face)
x_points = [point[0] for point in points]
y_points = [point[1] for point in points]
z_points = [point[2] for point in points]
return \
(np.average(x_points), np.average(y_points), np.average(z_points))
def _transform(self, matrix, center=None, offset=None):
center = self.center if center is None else center
# move object to center
operation_matrix = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[-center[0], -center[1], -center[2], 1],
])
# perform operation
operation_matrix = operation_matrix.dot([
matrix[0] + [0],
matrix[1] + [0],
matrix[2] + [0],
([0, 0, 0] if offset is None else offset) + [1],
])
# move object back to original position
operation_matrix = operation_matrix.dot([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[center[0], center[1], center[2], 1],
])
for fpos, face in enumerate(self._points):
for ppos, point in enumerate(face):
new_point = np.dot(point + (1,), operation_matrix)
self._points[fpos][ppos] = tuple(new_point[:3])
def move(self, offset):
""" Moves the object by an offset = (x, y). """
self._transform(
[
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
], center=None, offset=list(offset))
def zoom(self, factor):
""" Zooms in the object by 'factor' times. """
self._transform(
[
[factor, 0, 0],
[0, factor, 0],
[0, 0, factor],
])
@staticmethod
def generate_rotation_matrix(x_angle, y_angle, z_angle):
""" Generates the matrix that rotates points. """
return np.array([
[1, 0, 0],
[0, np.cos(x_angle), -np.sin(x_angle)],
[0, np.sin(x_angle), np.cos(x_angle)],
]).dot([
[np.cos(y_angle), 0, np.sin(y_angle)],
[0, 1, 0],
[-np.sin(y_angle), 0, np.cos(y_angle)],
]).dot([
[np.cos(z_angle), -np.sin(z_angle), 0],
[np.sin(z_angle), np.cos(z_angle), 0],
[0, 0, 1],
]).tolist()
def rotate(self, x_angle, y_angle, z_angle, center=None):
""" Rotates the object around center, the angle is in radians. """
self._transform(
Object.generate_rotation_matrix(x_angle, y_angle, z_angle),
center)
def project(self):
""" Projects the 3D objects to 2D. Using perspective projection. """
def _project(point):
return (
point[0]/(point[2]/Window.COP_DISTANCE+1),
point[1]/(point[2]/Window.COP_DISTANCE+1))
self._points = [list(map(_project, face)) for face in self._points]
def clip(self, window):
""" Weiler-Atherton polygon clipping algorithm. """
def connect_points(clipped, side1, side2, window):
""" Connects points of the window. """
edge = side1
while edge != side2:
clipped.append(window.points[0][edge])
edge = (edge - 1) % 4
boundaries = window.real_boundaries
clipped = []
for face in self._points:
new_face = []
entered, exited = None, None
for i in range(len(face) - 1):
points, side = Object._clip_line(
face[i], face[i + 1], *boundaries[0], *boundaries[1])
if not points: # clipped line is outside window
continue
if side[0] is not None: # entered
if exited is not None:
connect_points(new_face, exited, side[0], window)
else:
entered = side[0]
if side[1] is not None: # exited
exited = side[1]
new_face.append(points[0])
new_face.append(points[1])
else:
new_face.append(points[0])
if new_face and face[0] == face[-1]:
if entered is not None:
connect_points(new_face, exited, entered, window)
new_face.append(new_face[0])
clipped.append(new_face)
self._points = clipped
@staticmethod
def _clip_line(point1, point2, xmin, ymin, xmax, ymax):
""" Liang-Barsky line clipping algorithm. """
deltax, deltay = point2[0] - point1[0], point2[1] - point1[1]
deltas = [-deltax, -deltay, deltax, deltay] # p
distances = [ # q
point1[0] - xmin, point1[1] - ymin,
xmax - point1[0], ymax - point1[1]]
ratios = np.divide(distances, deltas) # r
pct1, pct2 = 0, 1 # how much of the line is inside the window
side = [None, None]
for i in range(4):
if deltas[i] == 0 and distances[i] < 0:
return (), side
if deltas[i] < 0:
if ratios[i] > pct1: # entered
side[0] = i
pct1 = ratios[i]
if deltas[i] > 0:
if ratios[i] < pct2: # exited
side[1] = i
pct2 = ratios[i]
if pct1 > pct2:
return (), side
clipped = (
tuple(np.add((point1[0], point1[1]), (pct1*deltax, pct1*deltay))),
tuple(np.add((point1[0], point1[1]), (pct2*deltax, pct2*deltay))),
)
return clipped, side
@staticmethod
def build_from_file(path):
""" Returns objects described in an OBJ file. """
with open(path) as obj:
raw_file = obj.read()
file_lines = [line.split(" ") for line in raw_file.split("\n")]
vertices = {}
faces = []
for number, line in enumerate(file_lines):
if line[0] == "v":
vertices[number + 1] = tuple(map(float, line[1:]))
if line[0] == "f":
face = []
for index in line[1:]:
face.append(vertices[int(index)])
face.append(vertices[int(line[1])])
faces.append(face)
return Object(points=faces)
class Window(Object):
"""
The window object.
This object delimits what should be drawn in the viewport. Moving and
rescaling it has the effect to change which portion of the world is
drawn at the viewport.
"""
BORDER = 0.05
def __init__(self, width, height):
points = [
(-width/2, height/2, 0),
(-width/2, -height/2, 0),
(width/2, -height/2, 0),
(width/2, height/2, 0),
]
points.append(points[0])
super().__init__([points], "window", (0, 0, 0))
self._rotation_matrix = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
@property
def expanded_boundaries(self):
""" Boundaries a little bigger than the actual window. """
width = self._points[0][3][0] - self._points[0][1][0]
height = self._points[0][3][1] - self._points[0][1][1]
factor = np.multiply((width, height), Window.BORDER)
return (
np.subtract(self._points[0][1], factor),
np.add(self._points[0][3], factor))
@property
def real_boundaries(self):
""" Returns windows' bottom left and upper right coordinates. """
return (self._points[0][1], self._points[0][3])
@property
def inv_rotation_matrix(self):
""" This matrix rotates the window back to its original position. """
return np.linalg.inv(self._rotation_matrix).tolist()
def move(self, offset):
# rotate offset vector to move window relative to its own directions
offset = np.dot(offset, self._rotation_matrix)
super().move(offset)
def zoom(self, factor):
# save original state
original_points = self._points.copy()
# apply the zoom operation
super().zoom(factor**(-1))
# find new window size
minimum, maximum = self.real_boundaries
width = np.abs(maximum[0] - minimum[0])
height = np.abs(maximum[1] - minimum[1])
# if zoom was exceeded, go back to original state and raise an error
if width < 10 and height < 10:
self._points = original_points
raise RuntimeError("Maximum zoom in exceeded")
def rotate(self, x_angle, y_angle, z_angle, center=None):
# find M = R^-1 * T * R
# R is the rotation matrix, it saves the rotation state of the window
# T is the matrix of the rotation that is being applied
matrix = Object.generate_rotation_matrix(x_angle, y_angle, z_angle)
matrix = np.dot(self.inv_rotation_matrix, matrix)
matrix = np.dot(matrix, self._rotation_matrix)
self._transform(matrix.tolist())
# update rotation matrix
self._rotation_matrix = np.dot(self._rotation_matrix, matrix)
def clip(self, _):
pass
class Curve(Object):
""" A Bezier curve with four control points. """
def __init__(self, points, name=None, color=None):
curve = Curve._generate_curve(points)
curve.append(curve[-1]) # add stub point for clipping
super().__init__(
points=[curve], name=name, color=color)
@staticmethod
def _generate_curve(points):
def f(t, i):
return np.array([t**3, t**2, t, 1]).dot(np.array([
[-1, 3, -3, 1],
[3, -6, 3, 0],
[-3, 3, 0, 0],
[1, 0, 0, 0],
])).dot(np.array([p[i] for p in points]))
step = 0.02
x_points = [f(t, 0) for t in np.arange(0, 1+step, step)]
y_points = [f(t, 1) for t in np.arange(0, 1+step, step)]
z_points = [f(t, 2) for t in np.arange(0, 1+step, step)]
return list(zip(x_points, y_points, z_points))
class Spline(Object):
""" A Spline curve with arbitrary amount of control points. """
def __init__(self, points, name=None, color=None):
curves = []
for i in range(len(points) - 3):
# build a curve for every four control points
curve = Spline._generate_curve(points[i:i+4])
curve.append(curve[-1]) # add stub point for clipping
curves.append(curve)
super().__init__(
points=curves, name=name, color=color)
@staticmethod
def _generate_curve(points):
coef = np.multiply(1/6, np.array([
[-1, 3, -3, 1],
[3, -6, 3, 0],
[-3, 0, 3, 0],
[1, 4, 1, 0],
])).dot(np.array(points))
number_of_points = 50
delta = 1/number_of_points
deltas = np.array([
[0, 0, 0, 1],
[delta**3, delta**2, delta, 0],
[6*delta**3, 2*delta**2, 0, 0],
[6*delta**3, 0, 0, 0],
]).dot(coef)
points = [tuple(deltas[0])]
for _ in range(number_of_points):
# update coordinates using forward differences
deltas[0] += deltas[1]
deltas[1] += deltas[2]
deltas[2] += deltas[3]
points.append(tuple(deltas[0]))
return points
|
[
"numpy.divide",
"numpy.multiply",
"numpy.abs",
"numpy.average",
"numpy.subtract",
"numpy.sin",
"numpy.array",
"numpy.linalg.inv",
"numpy.arange",
"numpy.cos",
"numpy.dot",
"numpy.add"
] |
[((1601, 1698), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [-center[0], -center[1], -center\n [2], 1]]'], {}), '([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [-center[0], -center[1],\n -center[2], 1]])\n', (1609, 1698), True, 'import numpy as np\n'), ((6089, 6117), 'numpy.divide', 'np.divide', (['distances', 'deltas'], {}), '(distances, deltas)\n', (6098, 6117), True, 'import numpy as np\n'), ((8215, 8258), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n', (8223, 8258), True, 'import numpy as np\n'), ((8555, 8598), 'numpy.multiply', 'np.multiply', (['(width, height)', 'Window.BORDER'], {}), '((width, height), Window.BORDER)\n', (8566, 8598), True, 'import numpy as np\n'), ((9205, 9242), 'numpy.dot', 'np.dot', (['offset', 'self._rotation_matrix'], {}), '(offset, self._rotation_matrix)\n', (9211, 9242), True, 'import numpy as np\n'), ((9544, 9575), 'numpy.abs', 'np.abs', (['(maximum[0] - minimum[0])'], {}), '(maximum[0] - minimum[0])\n', (9550, 9575), True, 'import numpy as np\n'), ((9593, 9624), 'numpy.abs', 'np.abs', (['(maximum[1] - minimum[1])'], {}), '(maximum[1] - minimum[1])\n', (9599, 9624), True, 'import numpy as np\n'), ((10174, 10214), 'numpy.dot', 'np.dot', (['self.inv_rotation_matrix', 'matrix'], {}), '(self.inv_rotation_matrix, matrix)\n', (10180, 10214), True, 'import numpy as np\n'), ((10232, 10269), 'numpy.dot', 'np.dot', (['matrix', 'self._rotation_matrix'], {}), '(matrix, self._rotation_matrix)\n', (10238, 10269), True, 'import numpy as np\n'), ((10376, 10413), 'numpy.dot', 'np.dot', (['self._rotation_matrix', 'matrix'], {}), '(self._rotation_matrix, matrix)\n', (10382, 10413), True, 'import numpy as np\n'), ((1355, 1375), 'numpy.average', 'np.average', (['x_points'], {}), '(x_points)\n', (1365, 1375), True, 'import numpy as np\n'), ((1377, 1397), 'numpy.average', 'np.average', (['y_points'], {}), '(y_points)\n', (1387, 1397), True, 'import numpy as np\n'), ((1399, 1419), 'numpy.average', 'np.average', (['z_points'], {}), '(z_points)\n', (1409, 1419), True, 'import numpy as np\n'), ((8628, 8667), 'numpy.subtract', 'np.subtract', (['self._points[0][1]', 'factor'], {}), '(self._points[0][1], factor)\n', (8639, 8667), True, 'import numpy as np\n'), ((8681, 8715), 'numpy.add', 'np.add', (['self._points[0][3]', 'factor'], {}), '(self._points[0][3], factor)\n', (8687, 8715), True, 'import numpy as np\n'), ((12077, 12093), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (12085, 12093), True, 'import numpy as np\n'), ((2358, 2396), 'numpy.dot', 'np.dot', (['(point + (1,))', 'operation_matrix'], {}), '(point + (1,), operation_matrix)\n', (2364, 2396), True, 'import numpy as np\n'), ((6716, 6778), 'numpy.add', 'np.add', (['(point1[0], point1[1])', '(pct1 * deltax, pct1 * deltay)'], {}), '((point1[0], point1[1]), (pct1 * deltax, pct1 * deltay))\n', (6722, 6778), True, 'import numpy as np\n'), ((6795, 6857), 'numpy.add', 'np.add', (['(point1[0], point1[1])', '(pct2 * deltax, pct2 * deltay)'], {}), '((point1[0], point1[1]), (pct2 * deltax, pct2 * deltay))\n', (6801, 6857), True, 'import numpy as np\n'), ((9036, 9072), 'numpy.linalg.inv', 'np.linalg.inv', (['self._rotation_matrix'], {}), '(self._rotation_matrix)\n', (9049, 9072), True, 'import numpy as np\n'), ((11050, 11082), 'numpy.array', 'np.array', (['[p[i] for p in points]'], {}), '([p[i] for p in points])\n', (11058, 11082), True, 'import numpy as np\n'), ((11142, 11170), 'numpy.arange', 'np.arange', (['(0)', '(1 + step)', 'step'], {}), '(0, 1 + step, step)\n', (11151, 11170), True, 'import numpy as np\n'), ((11207, 11235), 'numpy.arange', 'np.arange', (['(0)', '(1 + step)', 'step'], {}), '(0, 1 + step, step)\n', (11216, 11235), True, 'import numpy as np\n'), ((11272, 11300), 'numpy.arange', 'np.arange', (['(0)', '(1 + step)', 'step'], {}), '(0, 1 + step, step)\n', (11281, 11300), True, 'import numpy as np\n'), ((12178, 12309), 'numpy.array', 'np.array', (['[[0, 0, 0, 1], [delta ** 3, delta ** 2, delta, 0], [6 * delta ** 3, 2 * \n delta ** 2, 0, 0], [6 * delta ** 3, 0, 0, 0]]'], {}), '([[0, 0, 0, 1], [delta ** 3, delta ** 2, delta, 0], [6 * delta ** 3,\n 2 * delta ** 2, 0, 0], [6 * delta ** 3, 0, 0, 0]])\n', (12186, 12309), True, 'import numpy as np\n'), ((11942, 12012), 'numpy.array', 'np.array', (['[[-1, 3, -3, 1], [3, -6, 3, 0], [-3, 0, 3, 0], [1, 4, 1, 0]]'], {}), '([[-1, 3, -3, 1], [3, -6, 3, 0], [-3, 0, 3, 0], [1, 4, 1, 0]])\n', (11950, 12012), True, 'import numpy as np\n'), ((10895, 10965), 'numpy.array', 'np.array', (['[[-1, 3, -3, 1], [3, -6, 3, 0], [-3, 3, 0, 0], [1, 0, 0, 0]]'], {}), '([[-1, 3, -3, 1], [3, -6, 3, 0], [-3, 3, 0, 0], [1, 0, 0, 0]])\n', (10903, 10965), True, 'import numpy as np\n'), ((3412, 3427), 'numpy.cos', 'np.cos', (['z_angle'], {}), '(z_angle)\n', (3418, 3427), True, 'import numpy as np\n'), ((3464, 3479), 'numpy.sin', 'np.sin', (['z_angle'], {}), '(z_angle)\n', (3470, 3479), True, 'import numpy as np\n'), ((3481, 3496), 'numpy.cos', 'np.cos', (['z_angle'], {}), '(z_angle)\n', (3487, 3496), True, 'import numpy as np\n'), ((10862, 10894), 'numpy.array', 'np.array', (['[t ** 3, t ** 2, t, 1]'], {}), '([t ** 3, t ** 2, t, 1])\n', (10870, 10894), True, 'import numpy as np\n'), ((3430, 3445), 'numpy.sin', 'np.sin', (['z_angle'], {}), '(z_angle)\n', (3436, 3445), True, 'import numpy as np\n'), ((3269, 3284), 'numpy.cos', 'np.cos', (['y_angle'], {}), '(y_angle)\n', (3275, 3284), True, 'import numpy as np\n'), ((3289, 3304), 'numpy.sin', 'np.sin', (['y_angle'], {}), '(y_angle)\n', (3295, 3304), True, 'import numpy as np\n'), ((3364, 3379), 'numpy.cos', 'np.cos', (['y_angle'], {}), '(y_angle)\n', (3370, 3379), True, 'import numpy as np\n'), ((3344, 3359), 'numpy.sin', 'np.sin', (['y_angle'], {}), '(y_angle)\n', (3350, 3359), True, 'import numpy as np\n'), ((3152, 3167), 'numpy.cos', 'np.cos', (['x_angle'], {}), '(x_angle)\n', (3158, 3167), True, 'import numpy as np\n'), ((3204, 3219), 'numpy.sin', 'np.sin', (['x_angle'], {}), '(x_angle)\n', (3210, 3219), True, 'import numpy as np\n'), ((3221, 3236), 'numpy.cos', 'np.cos', (['x_angle'], {}), '(x_angle)\n', (3227, 3236), True, 'import numpy as np\n'), ((3170, 3185), 'numpy.sin', 'np.sin', (['x_angle'], {}), '(x_angle)\n', (3176, 3185), True, 'import numpy as np\n')]
|
"""Utils functions."""
from copy import deepcopy
import mne
import numpy as np
from ._logs import logger
# TODO: Add test for this. Also compare speed with latest version of numpy.
# Also compared speed with a numba implementation.
def _corr_vectors(A, B, axis=0):
# based on:
# https://github.com/wmvanvliet/mne_microstates/blob/master/microstates.py
# written by <NAME> <<EMAIL>>
"""Compute pairwise correlation of multiple pairs of vectors.
Fast way to compute correlation of multiple pairs of vectors without
computing all pairs as would with corr(A,B). Borrowed from Oli at
StackOverflow. Note the resulting coefficients vary slightly from the ones
obtained from corr due to differences in the order of the calculations.
(Differences are of a magnitude of 1e-9 to 1e-17 depending on the tested
data).
Parameters
----------
A : ndarray, shape (n, m)
The first collection of vectors
B : ndarray, shape (n, m)
The second collection of vectors
axis : int
The axis that contains the elements of each vector. Defaults to 0.
Returns
-------
corr : ndarray, shape (m, )
For each pair of vectors, the correlation between them.
"""
if A.shape != B.shape:
raise ValueError("All input arrays must have the same shape")
# If maps is null, divide will not trhow an error.
np.seterr(divide="ignore", invalid="ignore")
An = A - np.mean(A, axis=axis)
Bn = B - np.mean(B, axis=axis)
An /= np.linalg.norm(An, axis=axis)
Bn /= np.linalg.norm(Bn, axis=axis)
corr = np.sum(An * Bn, axis=axis)
corr = np.nan_to_num(corr, posinf=0, neginf=0)
np.seterr(divide="warn", invalid="warn")
return corr
# TODO: To be removed when ChInfo is implemented.
def _copy_info(inst, sfreq):
ch_names = inst.info["ch_names"]
ch_types = [
mne.channel_type(inst.info, idx)
for idx in range(0, inst.info["nchan"])
]
new_info = mne.create_info(ch_names, sfreq=sfreq, ch_types=ch_types)
if inst.get_montage():
montage = inst.get_montage()
new_info.set_montage(montage)
return new_info
def _compare_infos(cluster_info, inst_info):
"""Check that channels in cluster_info are all present in inst_info."""
for ch in cluster_info["ch_names"]:
if ch not in inst_info["ch_names"]:
raise ValueError(
"Instance to segment into microstates sequence does not have "
"the same channels as the instance used for fitting."
)
# Extract loc arrays
cluster_loc = list()
for ch in cluster_info["chs"]:
cluster_loc.append((ch["ch_name"], deepcopy(ch["loc"])))
inst_loc = list()
for ch in inst_info["chs"]:
if ch["ch_name"] in cluster_info["ch_names"]:
inst_loc.append((ch["ch_name"], deepcopy(ch["loc"])))
cluster_loc = [loc[1] for loc in sorted(cluster_loc, key=lambda x: x[0])]
inst_loc = [loc[1] for loc in sorted(inst_loc, key=lambda x: x[0])]
# Compare loc
assert len(cluster_loc) == len(inst_loc) # sanity-check
for l1, l2 in zip(cluster_loc, inst_loc):
if not np.allclose(l1, l2, equal_nan=True):
logger.warning(
"Instance to segment into microstates sequence does not have "
"the same channels montage as the instance used for fitting. "
)
break
# Compare attributes in chs
cluster_kinds = list()
cluster_units = list()
cluster_coord_frame = list()
for ch in cluster_info["chs"]:
cluster_kinds.append((ch["ch_name"], ch["kind"]))
cluster_units.append((ch["ch_name"], ch["unit"]))
cluster_coord_frame.append((ch["ch_name"], ch["coord_frame"]))
inst_kinds = list()
inst_units = list()
inst_coord_frames = list()
for ch in inst_info["chs"]:
if ch["ch_name"] in cluster_info["ch_names"]:
inst_kinds.append((ch["ch_name"], ch["kind"]))
inst_units.append((ch["ch_name"], ch["unit"]))
inst_coord_frames.append((ch["ch_name"], ch["coord_frame"]))
cluster_kinds = [
elt[1] for elt in sorted(cluster_kinds, key=lambda x: x[0])
]
cluster_units = [
elt[1] for elt in sorted(cluster_units, key=lambda x: x[0])
]
cluster_coord_frame = [
elt[1] for elt in sorted(cluster_coord_frame, key=lambda x: x[0])
]
inst_kinds = [elt[1] for elt in sorted(inst_kinds, key=lambda x: x[0])]
inst_units = [elt[1] for elt in sorted(inst_units, key=lambda x: x[0])]
inst_coord_frames = [
elt[1] for elt in sorted(inst_coord_frames, key=lambda x: x[0])
]
if not all(
kind1 == kind2 for kind1, kind2 in zip(cluster_kinds, inst_kinds)
):
logger.warning(
"Instance to segment into microstates sequence does not have "
"the same channels kinds as the instance used for fitting. "
)
if not all(
unit1 == unit2 for unit1, unit2 in zip(cluster_units, inst_units)
):
logger.warning(
"Instance to segment into microstates sequence does not have "
"the same channels units as the instance used for fitting. "
)
if not all(
f1 == f2 for f1, f2 in zip(cluster_coord_frame, inst_coord_frames)
):
logger.warning(
"Instance to segment into microstates sequence does not have "
"the same coordinate frames as the instance used for fitting. "
)
|
[
"copy.deepcopy",
"numpy.sum",
"numpy.nan_to_num",
"numpy.seterr",
"numpy.allclose",
"mne.channel_type",
"mne.create_info",
"numpy.mean",
"numpy.linalg.norm"
] |
[((1399, 1443), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (1408, 1443), True, 'import numpy as np\n'), ((1524, 1553), 'numpy.linalg.norm', 'np.linalg.norm', (['An'], {'axis': 'axis'}), '(An, axis=axis)\n', (1538, 1553), True, 'import numpy as np\n'), ((1564, 1593), 'numpy.linalg.norm', 'np.linalg.norm', (['Bn'], {'axis': 'axis'}), '(Bn, axis=axis)\n', (1578, 1593), True, 'import numpy as np\n'), ((1605, 1631), 'numpy.sum', 'np.sum', (['(An * Bn)'], {'axis': 'axis'}), '(An * Bn, axis=axis)\n', (1611, 1631), True, 'import numpy as np\n'), ((1643, 1682), 'numpy.nan_to_num', 'np.nan_to_num', (['corr'], {'posinf': '(0)', 'neginf': '(0)'}), '(corr, posinf=0, neginf=0)\n', (1656, 1682), True, 'import numpy as np\n'), ((1687, 1727), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""warn"""', 'invalid': '"""warn"""'}), "(divide='warn', invalid='warn')\n", (1696, 1727), True, 'import numpy as np\n'), ((1989, 2046), 'mne.create_info', 'mne.create_info', (['ch_names'], {'sfreq': 'sfreq', 'ch_types': 'ch_types'}), '(ch_names, sfreq=sfreq, ch_types=ch_types)\n', (2004, 2046), False, 'import mne\n'), ((1457, 1478), 'numpy.mean', 'np.mean', (['A'], {'axis': 'axis'}), '(A, axis=axis)\n', (1464, 1478), True, 'import numpy as np\n'), ((1492, 1513), 'numpy.mean', 'np.mean', (['B'], {'axis': 'axis'}), '(B, axis=axis)\n', (1499, 1513), True, 'import numpy as np\n'), ((1887, 1919), 'mne.channel_type', 'mne.channel_type', (['inst.info', 'idx'], {}), '(inst.info, idx)\n', (1903, 1919), False, 'import mne\n'), ((3185, 3220), 'numpy.allclose', 'np.allclose', (['l1', 'l2'], {'equal_nan': '(True)'}), '(l1, l2, equal_nan=True)\n', (3196, 3220), True, 'import numpy as np\n'), ((2698, 2717), 'copy.deepcopy', 'deepcopy', (["ch['loc']"], {}), "(ch['loc'])\n", (2706, 2717), False, 'from copy import deepcopy\n'), ((2872, 2891), 'copy.deepcopy', 'deepcopy', (["ch['loc']"], {}), "(ch['loc'])\n", (2880, 2891), False, 'from copy import deepcopy\n')]
|
import numpy as numpy
a = numpy.arange(150)
# a[0::2] *= numpy.sqrt(2)/2.0 * (numpy.cos(2) - numpy.sin(2))
a[0::2] *= 2
print(a)
|
[
"numpy.arange"
] |
[((26, 43), 'numpy.arange', 'numpy.arange', (['(150)'], {}), '(150)\n', (38, 43), True, 'import numpy as numpy\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is pytest for twinpy.properties.hexagonal.
"""
from copy import deepcopy
import numpy as np
from twinpy.properties import hexagonal
a = 2.93
c = 4.65
def test_check_hexagonal_lattice(ti_cell_wyckoff_c):
"""
Check check_hexagonal_lattice.
"""
hexagonal_lattice = ti_cell_wyckoff_c[0]
hexagonal.check_hexagonal_lattice(lattice=hexagonal_lattice)
def test_check_cell_is_hcp(ti_cell_wyckoff_c, ti_cell_wyckoff_d):
"""
Check check_cell_is_hcp.
"""
for cell in [ti_cell_wyckoff_c, ti_cell_wyckoff_d]:
hexagonal.check_cell_is_hcp(cell=cell)
def test_convert_direction():
"""
Check convert_direction_from_four_to_three
and convert_direction_from_three_to_four.
Note:
Let basis vectors for hexagonal lattice be a_1, a_2 and c,
a_1 = [1,0,0] = 1/3[2,-1,-1,0].
"""
def _test_convert_direction_from_three_to_four(three, four_expected):
_four = hexagonal.convert_direction_from_three_to_four(
three=three)
np.testing.assert_allclose(_four, four_expected)
def _test_convert_direction_from_four_to_three(four, three_expected):
_three = hexagonal.convert_direction_from_four_to_three(
four=four)
np.testing.assert_allclose(_three, three_expected)
a_1_three = np.array([1.,0.,0.])
a_1_four = np.array([2.,-1.,-1.,0.]) / 3.
_test_convert_direction_from_three_to_four(three=a_1_three,
four_expected=a_1_four)
_test_convert_direction_from_four_to_three(four=a_1_four,
three_expected=a_1_three)
def test_hexagonal_direction(ti_cell_wyckoff_c):
"""
Check HexagonalDirection.
"""
def _test_reset_indices(hex_dr, three):
_hex_dr = deepcopy(hex_dr)
_hex_dr.reset_indices(three=three)
_three_expected = _hex_dr.three
np.testing.assert_allclose(three, _three_expected)
def _test_inverse(hex_dr):
_inv_hex_dr = deepcopy(hex_dr)
_inv_hex_dr.inverse()
_three = hex_dr.three
_inv_three = _inv_hex_dr.three
np.testing.assert_allclose(_three, _inv_three*(-1.))
def _test_get_cartesian(hex_dr, cart_expected):
_cart = hex_dr.get_cartesian(normalize=False)
_cart_normalized = hex_dr.get_cartesian(normalize=True)
_norm = np.linalg.norm(_cart_normalized)
np.testing.assert_allclose(_cart, cart_expected)
np.testing.assert_allclose(_norm, 1.)
lattice = ti_cell_wyckoff_c[0]
three_a1 = np.array([1.,0.,0.]) # a_1
three_c = np.array([0.,0.,1.]) # c
a1_cart = np.array([a,0.,0.]) # cartesian coordinate for vector a_1
hex_dr_a1 = hexagonal.HexagonalDirection(lattice=lattice, three=three_a1)
_test_reset_indices(hex_dr=hex_dr_a1,
three=three_c)
_test_inverse(hex_dr=hex_dr_a1)
_test_get_cartesian(hex_dr=hex_dr_a1, cart_expected=a1_cart)
def test_convert_plane():
"""
Check convert_plane_from_four_to_three
and convert_plane_from_three_to_four.
Note:
(10-12) plane is equal to (102).
"""
def _test_convert_plane_from_three_to_four(three, four_expected):
_four = hexagonal.convert_plane_from_three_to_four(
three=three)
np.testing.assert_allclose(_four, four_expected)
def _test_convert_plane_from_four_to_three(four, three_expected):
_three = hexagonal.convert_plane_from_four_to_three(
four=four)
np.testing.assert_allclose(_three, three_expected)
twin_three = np.array([1.,0.,2.])
twin_four = np.array([1.,0.,-1.,2.])
_test_convert_plane_from_three_to_four(three=twin_three,
four_expected=twin_four)
_test_convert_plane_from_four_to_three(four=twin_four,
three_expected=twin_three)
def test_hexagonal_plane(ti_cell_wyckoff_c):
"""
Check HexagonalPlane.
"""
def _test_reset_indices(hex_pln, four):
_hex_pln = deepcopy(hex_pln)
_hex_pln.reset_indices(four=four)
_four = _hex_pln.four
np.testing.assert_allclose(_four, four)
def _test_inverse(hex_pln):
_inv_hex_pln = deepcopy(hex_pln)
_inv_hex_pln.inverse()
four = hex_pln.four
_inv_four = _inv_hex_pln.four
np.testing.assert_allclose(_inv_four, four*(-1))
def _test_get_distance_from_plane(hex_pln, frac_coord, d_expected):
_d = hex_pln.get_distance_from_plane(frac_coord=frac_coord)
np.testing.assert_allclose(_d, d_expected)
def _test_get_plane_interval(hex_pln, d_expected):
_d = hex_pln.get_plane_interval()
np.testing.assert_allclose(_d, d_expected)
lattice = ti_cell_wyckoff_c[0]
basal_four = np.array([0.,0.,0.,1.])
twin_four = np.array([1.,0.,-1.,2.])
hex_pln_basal = hexagonal.HexagonalPlane(lattice=lattice,
four=basal_four)
hex_pln_twin = hexagonal.HexagonalPlane(lattice=lattice,
four=twin_four)
c_three = np.array([0.,0.,1.])
_test_reset_indices(hex_pln=hex_pln_twin,
four=basal_four)
_test_inverse(hex_pln=hex_pln_twin)
_test_get_distance_from_plane(hex_pln=hex_pln_basal,
frac_coord=c_three,
d_expected=c)
_test_get_plane_interval(hex_pln=hex_pln_basal,
d_expected=c)
|
[
"twinpy.properties.hexagonal.HexagonalPlane",
"twinpy.properties.hexagonal.convert_direction_from_three_to_four",
"copy.deepcopy",
"twinpy.properties.hexagonal.convert_direction_from_four_to_three",
"twinpy.properties.hexagonal.check_cell_is_hcp",
"twinpy.properties.hexagonal.HexagonalDirection",
"twinpy.properties.hexagonal.convert_plane_from_four_to_three",
"numpy.array",
"numpy.linalg.norm",
"numpy.testing.assert_allclose",
"twinpy.properties.hexagonal.convert_plane_from_three_to_four",
"twinpy.properties.hexagonal.check_hexagonal_lattice"
] |
[((363, 423), 'twinpy.properties.hexagonal.check_hexagonal_lattice', 'hexagonal.check_hexagonal_lattice', ([], {'lattice': 'hexagonal_lattice'}), '(lattice=hexagonal_lattice)\n', (396, 423), False, 'from twinpy.properties import hexagonal\n'), ((1366, 1391), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (1374, 1391), True, 'import numpy as np\n'), ((2626, 2651), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (2634, 2651), True, 'import numpy as np\n'), ((2668, 2693), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (2676, 2693), True, 'import numpy as np\n'), ((2708, 2731), 'numpy.array', 'np.array', (['[a, 0.0, 0.0]'], {}), '([a, 0.0, 0.0])\n', (2716, 2731), True, 'import numpy as np\n'), ((2783, 2844), 'twinpy.properties.hexagonal.HexagonalDirection', 'hexagonal.HexagonalDirection', ([], {'lattice': 'lattice', 'three': 'three_a1'}), '(lattice=lattice, three=three_a1)\n', (2811, 2844), False, 'from twinpy.properties import hexagonal\n'), ((3661, 3686), 'numpy.array', 'np.array', (['[1.0, 0.0, 2.0]'], {}), '([1.0, 0.0, 2.0])\n', (3669, 3686), True, 'import numpy as np\n'), ((3698, 3729), 'numpy.array', 'np.array', (['[1.0, 0.0, -1.0, 2.0]'], {}), '([1.0, 0.0, -1.0, 2.0])\n', (3706, 3729), True, 'import numpy as np\n'), ((4894, 4924), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 0.0, 1.0])\n', (4902, 4924), True, 'import numpy as np\n'), ((4934, 4965), 'numpy.array', 'np.array', (['[1.0, 0.0, -1.0, 2.0]'], {}), '([1.0, 0.0, -1.0, 2.0])\n', (4942, 4965), True, 'import numpy as np\n'), ((4979, 5037), 'twinpy.properties.hexagonal.HexagonalPlane', 'hexagonal.HexagonalPlane', ([], {'lattice': 'lattice', 'four': 'basal_four'}), '(lattice=lattice, four=basal_four)\n', (5003, 5037), False, 'from twinpy.properties import hexagonal\n'), ((5102, 5159), 'twinpy.properties.hexagonal.HexagonalPlane', 'hexagonal.HexagonalPlane', ([], {'lattice': 'lattice', 'four': 'twin_four'}), '(lattice=lattice, four=twin_four)\n', (5126, 5159), False, 'from twinpy.properties import hexagonal\n'), ((5218, 5243), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (5226, 5243), True, 'import numpy as np\n'), ((601, 639), 'twinpy.properties.hexagonal.check_cell_is_hcp', 'hexagonal.check_cell_is_hcp', ([], {'cell': 'cell'}), '(cell=cell)\n', (628, 639), False, 'from twinpy.properties import hexagonal\n'), ((989, 1048), 'twinpy.properties.hexagonal.convert_direction_from_three_to_four', 'hexagonal.convert_direction_from_three_to_four', ([], {'three': 'three'}), '(three=three)\n', (1035, 1048), False, 'from twinpy.properties import hexagonal\n'), ((1074, 1122), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['_four', 'four_expected'], {}), '(_four, four_expected)\n', (1100, 1122), True, 'import numpy as np\n'), ((1215, 1272), 'twinpy.properties.hexagonal.convert_direction_from_four_to_three', 'hexagonal.convert_direction_from_four_to_three', ([], {'four': 'four'}), '(four=four)\n', (1261, 1272), False, 'from twinpy.properties import hexagonal\n'), ((1298, 1348), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['_three', 'three_expected'], {}), '(_three, three_expected)\n', (1324, 1348), True, 'import numpy as np\n'), ((1402, 1434), 'numpy.array', 'np.array', (['[2.0, -1.0, -1.0, 0.0]'], {}), '([2.0, -1.0, -1.0, 0.0])\n', (1410, 1434), True, 'import numpy as np\n'), ((1862, 1878), 'copy.deepcopy', 'deepcopy', (['hex_dr'], {}), '(hex_dr)\n', (1870, 1878), False, 'from copy import deepcopy\n'), ((1970, 2020), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['three', '_three_expected'], {}), '(three, _three_expected)\n', (1996, 2020), True, 'import numpy as np\n'), ((2075, 2091), 'copy.deepcopy', 'deepcopy', (['hex_dr'], {}), '(hex_dr)\n', (2083, 2091), False, 'from copy import deepcopy\n'), ((2199, 2252), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['_three', '(_inv_three * -1.0)'], {}), '(_three, _inv_three * -1.0)\n', (2225, 2252), True, 'import numpy as np\n'), ((2439, 2471), 'numpy.linalg.norm', 'np.linalg.norm', (['_cart_normalized'], {}), '(_cart_normalized)\n', (2453, 2471), True, 'import numpy as np\n'), ((2480, 2528), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['_cart', 'cart_expected'], {}), '(_cart, cart_expected)\n', (2506, 2528), True, 'import numpy as np\n'), ((2537, 2575), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['_norm', '(1.0)'], {}), '(_norm, 1.0)\n', (2563, 2575), True, 'import numpy as np\n'), ((3295, 3350), 'twinpy.properties.hexagonal.convert_plane_from_three_to_four', 'hexagonal.convert_plane_from_three_to_four', ([], {'three': 'three'}), '(three=three)\n', (3337, 3350), False, 'from twinpy.properties import hexagonal\n'), ((3376, 3424), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['_four', 'four_expected'], {}), '(_four, four_expected)\n', (3402, 3424), True, 'import numpy as np\n'), ((3513, 3566), 'twinpy.properties.hexagonal.convert_plane_from_four_to_three', 'hexagonal.convert_plane_from_four_to_three', ([], {'four': 'four'}), '(four=four)\n', (3555, 3566), False, 'from twinpy.properties import hexagonal\n'), ((3592, 3642), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['_three', 'three_expected'], {}), '(_three, three_expected)\n', (3618, 3642), True, 'import numpy as np\n'), ((4134, 4151), 'copy.deepcopy', 'deepcopy', (['hex_pln'], {}), '(hex_pln)\n', (4142, 4151), False, 'from copy import deepcopy\n'), ((4232, 4271), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['_four', 'four'], {}), '(_four, four)\n', (4258, 4271), True, 'import numpy as np\n'), ((4328, 4345), 'copy.deepcopy', 'deepcopy', (['hex_pln'], {}), '(hex_pln)\n', (4336, 4345), False, 'from copy import deepcopy\n'), ((4451, 4499), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['_inv_four', '(four * -1)'], {}), '(_inv_four, four * -1)\n', (4477, 4499), True, 'import numpy as np\n'), ((4649, 4691), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['_d', 'd_expected'], {}), '(_d, d_expected)\n', (4675, 4691), True, 'import numpy as np\n'), ((4798, 4840), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['_d', 'd_expected'], {}), '(_d, d_expected)\n', (4824, 4840), True, 'import numpy as np\n')]
|
"""
Helper functions used by multiple parts of LAtools.
(c) <NAME> : https://github.com/oscarbranson
"""
import os
import shutil
import re
import configparser
import datetime as dt
import numpy as np
import dateutil as du
import pkg_resources as pkgrs
import uncertainties.unumpy as un
import scipy.interpolate as interp
from .stat_fns import nominal_values
from .analyte_names import pretty_element
# Bunch modifies dict to allow item access using dot (.) operator
class Bunch(dict):
def __init__(self, *args, **kwds):
super(Bunch, self).__init__(*args, **kwds)
self.__dict__ = self
# warnings monkeypatch
# https://stackoverflow.com/questions/2187269/python-print-only-the-message-on-warnings
def _warning(message, category=UserWarning,
filename='', lineno=-1,
file=None, line=None):
print(message)
def get_date(datetime, time_format=None):
"""
Return a datetime oject from a string, with optional time format.
Parameters
----------
datetime : str
Date-time as string in any sensible format.
time_format : datetime str (optional)
String describing the datetime format. If missing uses
dateutil.parser to guess time format.
"""
if time_format is None:
t = du.parser.parse(datetime)
else:
t = dt.datetime.strptime(datetime, time_format)
return t
def get_total_n_points(d):
"""
Returns the total number of data points in values of dict.
Paramters
---------
d : dict
"""
n = 0
for di in d.values():
n += len(di)
return n
def get_total_time_span(d):
"""
Returns total length of analysis.
"""
tmax = 0
for di in d.values():
if di.uTime.max() > tmax:
tmax = di.uTime.max()
return tmax
def unitpicker(a, llim=0.1, denominator=None, focus_stage=None):
"""
Determines the most appropriate plotting unit for data.
Parameters
----------
a : float or array-like
number to optimise. If array like, the 25% quantile is optimised.
llim : float
minimum allowable value in scaled data.
Returns
-------
(float, str)
(multiplier, unit)
"""
if not isinstance(a, (int, float)):
a = nominal_values(a)
a = np.percentile(a[~np.isnan(a)], 25)
if a == 0:
raise ValueError("Cannot calculate unit for zero.")
if denominator is not None:
pd = pretty_element(denominator)
else:
pd = ''
if focus_stage == 'calibrated':
udict = {0: 'mol/mol ' + pd,
1: 'mmol/mol ' + pd,
2: '$\mu$mol/mol ' + pd,
3: 'nmol/mol ' + pd,
4: 'pmol/mol ' + pd,
5: 'fmol/mol ' + pd}
elif focus_stage == 'ratios':
udict = {0: 'counts/count ' + pd,
1: '$10^{-3}$ counts/count ' + pd,
2: '$10^{-6}$ counts/count ' + pd,
3: '$10^{-9}$ counts/count ' + pd,
4: '$10^{-12}$ counts/count ' + pd,
5: '$10^{-15}$ counts/count ' + pd}
elif focus_stage in ('rawdata', 'despiked', 'bkgsub'):
udict = udict = {0: 'counts',
1: '$10^{-3}$ counts',
2: '$10^{-6}$ counts',
3: '$10^{-9}$ counts',
4: '$10^{-12}$ counts',
5: '$10^{-15}$ counts'}
else:
udict = {0: '', 1: '', 2: '', 3: '', 4: '', 5: ''}
a = abs(a)
n = 0
if a < llim:
while a < llim:
a *= 1000
n += 1
return float(1000**n), udict[n]
def collate_data(in_dir, extension='.csv', out_dir=None):
"""
Copy all csvs in nested directroy to single directory.
Function to copy all csvs from a directory, and place
them in a new directory.
Parameters
----------
in_dir : str
Input directory containing csv files in subfolders
extension : str
The extension that identifies your data files.
Defaults to '.csv'.
out_dir : str
Destination directory
Returns
-------
None
"""
if out_dir is None:
out_dir = './' + re.search('^\.(.*)', extension).groups(0)[0]
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
for p, d, fs in os.walk(in_dir):
for f in fs:
if extension in f:
shutil.copy(p + '/' + f, out_dir + '/' + f)
return
def bool_transitions(a):
"""
Return indices where a boolean array changes from True to False
"""
return np.where(a[:-1] != a[1:])[0]
def bool_2_indices(a):
"""
Convert boolean array into a 2D array of (start, stop) pairs.
"""
if any(a):
lims = []
lims.append(np.where(a[:-1] != a[1:])[0])
if a[0]:
lims.append([0])
if a[-1]:
lims.append([len(a) - 1])
lims = np.concatenate(lims)
lims.sort()
return np.reshape(lims, (lims.size // 2, 2))
else:
return None
def enumerate_bool(bool_array, nstart=0):
"""
Consecutively numbers contiguous booleans in array.
i.e. a boolean sequence, and resulting numbering
T F T T T F T F F F T T F
0-1 1 1 - 2 ---3 3 -
where ' - '
Parameters
----------
bool_array : array_like
Array of booleans.
nstart : int
The number of the first boolean group.
"""
ind = bool_2_indices(bool_array)
ns = np.full(bool_array.size, nstart, dtype=int)
for n, lims in enumerate(ind):
ns[lims[0]:lims[-1] + 1] = nstart + n + 1
return ns
def tuples_2_bool(tuples, x):
"""
Generate boolean array from list of limit tuples.
Parameters
----------
tuples : array_like
[2, n] array of (start, end) values
x : array_like
x scale the tuples are mapped to
Returns
-------
array_like
boolean array, True where x is between each pair of tuples.
"""
if np.ndim(tuples) == 1:
tuples = [tuples]
out = np.zeros(x.size, dtype=bool)
for l, u in tuples:
out[(x > l) & (x < u)] = True
return out
def get_example_data(destination_dir):
if os.path.isdir(destination_dir):
overwrite = input(destination_dir +
' already exists. Overwrite? [N/y]: ').lower() == 'y'
if overwrite:
shutil.rmtree(destination_dir)
else:
print(destination_dir + ' was not overwritten.')
shutil.copytree(pkgrs.resource_filename('latools', 'resources/test_data'),
destination_dir)
return
def rangecalc(xs, pad=0.05):
mn = np.nanmin(xs)
mx = np.nanmax(xs)
xr = mx - mn
return [mn - pad * xr, mx + pad * xr]
class un_interp1d(object):
"""
object for handling interpolation of values with uncertainties.
"""
def __init__(self, x, y, fill_value=np.nan, **kwargs):
if isinstance(fill_value, tuple):
nom_fill = tuple([un.nominal_values(v) for v in fill_value])
std_fill = tuple([un.std_devs(v) for v in fill_value])
else:
nom_fill = std_fill = fill_value
self.nom_interp = interp.interp1d(un.nominal_values(x),
un.nominal_values(y),
fill_value=nom_fill, **kwargs)
self.std_interp = interp.interp1d(un.nominal_values(x),
un.std_devs(y),
fill_value=std_fill, **kwargs)
def new(self, xn):
yn = self.nom_interp(xn)
yn_err = self.std_interp(xn)
return un.uarray(yn, yn_err)
def new_nom(self, xn):
return self.nom_interp(xn)
def new_std(self, xn):
return self.std_interp(xn)
def rolling_window(a, window, pad=None):
"""
Returns (win, len(a)) rolling - window array of data.
Parameters
----------
a : array_like
Array to calculate the rolling window of
window : int
Description of `window`.
pad : same as dtype(a)
Description of `pad`.
Returns
-------
array_like
An array of shape (n, window), where n is either len(a) - window
if pad is None, or len(a) if pad is not None.
"""
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1], )
out = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
# pad shape
if window % 2 == 0:
npre = window // 2 - 1
npost = window // 2
else:
npre = npost = window // 2
if isinstance(pad, str):
if pad == 'ends':
prepad = np.full((npre, window), a[0])
postpad = np.full((npost, window), a[-1])
elif pad == 'mean_ends':
prepad = np.full((npre, window), np.mean(a[:(window // 2)]))
postpad = np.full((npost, window), np.mean(a[-(window // 2):]))
elif pad == 'repeat_ends':
prepad = np.full((npre, window), out[0])
postpad = np.full((npost, window), out[0])
else:
raise ValueError("If pad is a string, it must be either 'ends', 'mean_ends' or 'repeat_ends'.")
return np.concatenate((prepad, out, postpad))
elif pad is not None:
pre_blankpad = np.empty(((npre, window)))
pre_blankpad[:] = pad
post_blankpad = np.empty(((npost, window)))
post_blankpad[:] = pad
return np.concatenate([pre_blankpad, out, post_blankpad])
else:
return out
def fastsmooth(a, win=11):
"""
Returns rolling - window smooth of a.
Function to efficiently calculate the rolling mean of a numpy
array using 'stride_tricks' to split up a 1D array into an ndarray of
sub - sections of the original array, of dimensions [len(a) - win, win].
Parameters
----------
a : array_like
The 1D array to calculate the rolling gradient of.
win : int
The width of the rolling window.
Returns
-------
array_like
Gradient of a, assuming as constant integer x - scale.
"""
# check to see if 'window' is odd (even does not work)
if win % 2 == 0:
win += 1 # add 1 to window if it is even.
kernel = np.ones(win) / win
npad = int((win - 1) / 2)
spad = np.full(npad + 1, np.mean(a[:(npad + 1)]))
epad = np.full(npad - 1, np.mean(a[-(npad - 1):]))
return np.concatenate([spad, np.convolve(a, kernel, 'valid'), epad])
def fastgrad(a, win=11):
"""
Returns rolling - window gradient of a.
Function to efficiently calculate the rolling gradient of a numpy
array using 'stride_tricks' to split up a 1D array into an ndarray of
sub - sections of the original array, of dimensions [len(a) - win, win].
Parameters
----------
a : array_like
The 1D array to calculate the rolling gradient of.
win : int
The width of the rolling window.
Returns
-------
array_like
Gradient of a, assuming as constant integer x - scale.
"""
# check to see if 'window' is odd (even does not work)
if win % 2 == 0:
win += 1 # subtract 1 from window if it is even.
# trick for efficient 'rolling' computation in numpy
# shape = a.shape[:-1] + (a.shape[-1] - win + 1, win)
# strides = a.strides + (a.strides[-1], )
# wins = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
wins = rolling_window(a, win, 'ends')
# apply rolling gradient to data
a = map(lambda x: np.polyfit(np.arange(win), x, 1)[0], wins)
return np.array(list(a))
def calc_grads(x, dat, keys=None, win=5):
"""
Calculate gradients of values in dat.
Parameters
----------
x : array like
Independent variable for items in dat.
dat : dict
{key: dependent_variable} pairs
keys : str or array-like
Which keys in dict to calculate the gradient of.
win : int
The side of the rolling window for gradient calculation
Returns
-------
dict of gradients
"""
if keys is None:
keys = dat.keys()
def grad(xy):
if (~np.isnan(xy)).all():
try:
return np.polyfit(xy[0], xy[1], 1)[0]
except ValueError:
return np.nan
else:
return np.nan
xs = rolling_window(x, win, pad='repeat_ends')
grads = Bunch()
for k in keys:
d = nominal_values(rolling_window(dat[k], win, pad='repeat_ends'))
grads[k] = np.array(list(map(grad, zip(xs, d))))
return grads
def findmins(x, y):
""" Function to find local minima.
Parameters
----------
x, y : array_like
1D arrays of the independent (x) and dependent (y) variables.
Returns
-------
array_like
Array of points in x where y has a local minimum.
"""
return x[np.r_[False, y[1:] < y[:-1]] & np.r_[y[:-1] < y[1:], False]]
def stack_keys(ddict, keys, extra=None):
"""
Combine elements of ddict into an array of shape (len(ddict[key]), len(keys)).
Useful for preparing data for sklearn.
Parameters
----------
ddict : dict
A dict containing arrays or lists to be stacked.
Must be of equal length.
keys : list or str
The keys of dict to stack. Must be present in ddict.
extra : list (optional)
A list of additional arrays to stack. Elements of extra
must be the same length as arrays in ddict.
Extras are inserted as the first columns of output.
"""
if isinstance(keys, str):
d = [ddict[keys]]
else:
d = [ddict[k] for k in keys]
if extra is not None:
d = extra + d
return np.vstack(d).T
|
[
"os.mkdir",
"numpy.polyfit",
"numpy.empty",
"os.walk",
"numpy.ones",
"pkg_resources.resource_filename",
"numpy.isnan",
"numpy.mean",
"numpy.arange",
"shutil.rmtree",
"numpy.convolve",
"shutil.copy",
"numpy.full",
"numpy.ndim",
"numpy.reshape",
"re.search",
"dateutil.parser.parse",
"uncertainties.unumpy.uarray",
"datetime.datetime.strptime",
"numpy.lib.stride_tricks.as_strided",
"uncertainties.unumpy.nominal_values",
"numpy.concatenate",
"numpy.vstack",
"numpy.nanmax",
"uncertainties.unumpy.std_devs",
"os.path.isdir",
"numpy.zeros",
"numpy.nanmin",
"numpy.where"
] |
[((4364, 4379), 'os.walk', 'os.walk', (['in_dir'], {}), '(in_dir)\n', (4371, 4379), False, 'import os\n'), ((5523, 5566), 'numpy.full', 'np.full', (['bool_array.size', 'nstart'], {'dtype': 'int'}), '(bool_array.size, nstart, dtype=int)\n', (5530, 5566), True, 'import numpy as np\n'), ((6100, 6128), 'numpy.zeros', 'np.zeros', (['x.size'], {'dtype': 'bool'}), '(x.size, dtype=bool)\n', (6108, 6128), True, 'import numpy as np\n'), ((6253, 6283), 'os.path.isdir', 'os.path.isdir', (['destination_dir'], {}), '(destination_dir)\n', (6266, 6283), False, 'import os\n'), ((6717, 6730), 'numpy.nanmin', 'np.nanmin', (['xs'], {}), '(xs)\n', (6726, 6730), True, 'import numpy as np\n'), ((6740, 6753), 'numpy.nanmax', 'np.nanmax', (['xs'], {}), '(xs)\n', (6749, 6753), True, 'import numpy as np\n'), ((8484, 8548), 'numpy.lib.stride_tricks.as_strided', 'np.lib.stride_tricks.as_strided', (['a'], {'shape': 'shape', 'strides': 'strides'}), '(a, shape=shape, strides=strides)\n', (8515, 8548), True, 'import numpy as np\n'), ((1276, 1301), 'dateutil.parser.parse', 'du.parser.parse', (['datetime'], {}), '(datetime)\n', (1291, 1301), True, 'import dateutil as du\n'), ((1324, 1367), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['datetime', 'time_format'], {}), '(datetime, time_format)\n', (1344, 1367), True, 'import datetime as dt\n'), ((4293, 4315), 'os.path.isdir', 'os.path.isdir', (['out_dir'], {}), '(out_dir)\n', (4306, 4315), False, 'import os\n'), ((4325, 4342), 'os.mkdir', 'os.mkdir', (['out_dir'], {}), '(out_dir)\n', (4333, 4342), False, 'import os\n'), ((4625, 4650), 'numpy.where', 'np.where', (['(a[:-1] != a[1:])'], {}), '(a[:-1] != a[1:])\n', (4633, 4650), True, 'import numpy as np\n'), ((4961, 4981), 'numpy.concatenate', 'np.concatenate', (['lims'], {}), '(lims)\n', (4975, 4981), True, 'import numpy as np\n'), ((5018, 5055), 'numpy.reshape', 'np.reshape', (['lims', '(lims.size // 2, 2)'], {}), '(lims, (lims.size // 2, 2))\n', (5028, 5055), True, 'import numpy as np\n'), ((6041, 6056), 'numpy.ndim', 'np.ndim', (['tuples'], {}), '(tuples)\n', (6048, 6056), True, 'import numpy as np\n'), ((6570, 6627), 'pkg_resources.resource_filename', 'pkgrs.resource_filename', (['"""latools"""', '"""resources/test_data"""'], {}), "('latools', 'resources/test_data')\n", (6593, 6627), True, 'import pkg_resources as pkgrs\n'), ((7731, 7752), 'uncertainties.unumpy.uarray', 'un.uarray', (['yn', 'yn_err'], {}), '(yn, yn_err)\n', (7740, 7752), True, 'import uncertainties.unumpy as un\n'), ((9316, 9354), 'numpy.concatenate', 'np.concatenate', (['(prepad, out, postpad)'], {}), '((prepad, out, postpad))\n', (9330, 9354), True, 'import numpy as np\n'), ((10354, 10366), 'numpy.ones', 'np.ones', (['win'], {}), '(win)\n', (10361, 10366), True, 'import numpy as np\n'), ((10432, 10453), 'numpy.mean', 'np.mean', (['a[:npad + 1]'], {}), '(a[:npad + 1])\n', (10439, 10453), True, 'import numpy as np\n'), ((10486, 10510), 'numpy.mean', 'np.mean', (['a[-(npad - 1):]'], {}), '(a[-(npad - 1):])\n', (10493, 10510), True, 'import numpy as np\n'), ((13829, 13841), 'numpy.vstack', 'np.vstack', (['d'], {}), '(d)\n', (13838, 13841), True, 'import numpy as np\n'), ((6443, 6473), 'shutil.rmtree', 'shutil.rmtree', (['destination_dir'], {}), '(destination_dir)\n', (6456, 6473), False, 'import shutil\n'), ((7268, 7288), 'uncertainties.unumpy.nominal_values', 'un.nominal_values', (['x'], {}), '(x)\n', (7285, 7288), True, 'import uncertainties.unumpy as un\n'), ((7332, 7352), 'uncertainties.unumpy.nominal_values', 'un.nominal_values', (['y'], {}), '(y)\n', (7349, 7352), True, 'import uncertainties.unumpy as un\n'), ((7469, 7489), 'uncertainties.unumpy.nominal_values', 'un.nominal_values', (['x'], {}), '(x)\n', (7486, 7489), True, 'import uncertainties.unumpy as un\n'), ((7533, 7547), 'uncertainties.unumpy.std_devs', 'un.std_devs', (['y'], {}), '(y)\n', (7544, 7547), True, 'import uncertainties.unumpy as un\n'), ((8769, 8798), 'numpy.full', 'np.full', (['(npre, window)', 'a[0]'], {}), '((npre, window), a[0])\n', (8776, 8798), True, 'import numpy as np\n'), ((8821, 8852), 'numpy.full', 'np.full', (['(npost, window)', 'a[-1]'], {}), '((npost, window), a[-1])\n', (8828, 8852), True, 'import numpy as np\n'), ((9404, 9428), 'numpy.empty', 'np.empty', (['(npre, window)'], {}), '((npre, window))\n', (9412, 9428), True, 'import numpy as np\n'), ((9485, 9510), 'numpy.empty', 'np.empty', (['(npost, window)'], {}), '((npost, window))\n', (9493, 9510), True, 'import numpy as np\n'), ((9559, 9609), 'numpy.concatenate', 'np.concatenate', (['[pre_blankpad, out, post_blankpad]'], {}), '([pre_blankpad, out, post_blankpad])\n', (9573, 9609), True, 'import numpy as np\n'), ((10545, 10576), 'numpy.convolve', 'np.convolve', (['a', 'kernel', '"""valid"""'], {}), "(a, kernel, 'valid')\n", (10556, 10576), True, 'import numpy as np\n'), ((4449, 4492), 'shutil.copy', 'shutil.copy', (["(p + '/' + f)", "(out_dir + '/' + f)"], {}), "(p + '/' + f, out_dir + '/' + f)\n", (4460, 4492), False, 'import shutil\n'), ((4813, 4838), 'numpy.where', 'np.where', (['(a[:-1] != a[1:])'], {}), '(a[:-1] != a[1:])\n', (4821, 4838), True, 'import numpy as np\n'), ((2321, 2332), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (2329, 2332), True, 'import numpy as np\n'), ((7057, 7077), 'uncertainties.unumpy.nominal_values', 'un.nominal_values', (['v'], {}), '(v)\n', (7074, 7077), True, 'import uncertainties.unumpy as un\n'), ((7130, 7144), 'uncertainties.unumpy.std_devs', 'un.std_devs', (['v'], {}), '(v)\n', (7141, 7144), True, 'import uncertainties.unumpy as un\n'), ((8931, 8955), 'numpy.mean', 'np.mean', (['a[:window // 2]'], {}), '(a[:window // 2])\n', (8938, 8955), True, 'import numpy as np\n'), ((9006, 9033), 'numpy.mean', 'np.mean', (['a[-(window // 2):]'], {}), '(a[-(window // 2):])\n', (9013, 9033), True, 'import numpy as np\n'), ((9091, 9122), 'numpy.full', 'np.full', (['(npre, window)', 'out[0]'], {}), '((npre, window), out[0])\n', (9098, 9122), True, 'import numpy as np\n'), ((9145, 9177), 'numpy.full', 'np.full', (['(npost, window)', 'out[0]'], {}), '((npost, window), out[0])\n', (9152, 9177), True, 'import numpy as np\n'), ((11649, 11663), 'numpy.arange', 'np.arange', (['win'], {}), '(win)\n', (11658, 11663), True, 'import numpy as np\n'), ((12258, 12270), 'numpy.isnan', 'np.isnan', (['xy'], {}), '(xy)\n', (12266, 12270), True, 'import numpy as np\n'), ((12319, 12346), 'numpy.polyfit', 'np.polyfit', (['xy[0]', 'xy[1]', '(1)'], {}), '(xy[0], xy[1], 1)\n', (12329, 12346), True, 'import numpy as np\n'), ((4236, 4268), 're.search', 're.search', (['"""^\\\\.(.*)"""', 'extension'], {}), "('^\\\\.(.*)', extension)\n", (4245, 4268), False, 'import re\n')]
|
# Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Test for hook of session run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from distutils.version import LooseVersion as Version
import six
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import test
from tensorflow.python.framework.versions import __version__
import epl
from epl.parallel.hooks import _append_replicated_fetches
# pylint: disable=missing-docstring,unused-argument,unused-variable
class RunHookTest(test.TestCase):
def test_for_append_replicated_fetches(self):
epl.init(config=epl.Config({"communication.gradients_reduce_method": "sum"}))
with epl.Cluster(worker_hosts="127.0.0.1:8001", worker_index=0):
with epl.replicate(device_count=1):
num_x = np.random.randint(0, 10, (500, 20)).astype(dtype=np.float32)
num_y = np.random.randint(0, 10, 500).astype(dtype=np.int64)
dataset = tf.data.Dataset.from_tensor_slices((num_x, num_y)) \
.batch(10).repeat(1)
iterator = dataset.make_initializable_iterator()
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS,
iterator.initializer)
x, labels = iterator.get_next()
logits = tf.layers.dense(x, 2)
logits = tf.layers.dense(logits, 10)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels,
logits=logits)
epl.add_to_collection(loss, epl.GraphKeys.GLOBAL_MEAN_OBJECTS)
global_step = tf.train.get_or_create_global_step()
optimizer = tf.train.MomentumOptimizer(learning_rate=0.001,
momentum=0.9)
train_op = optimizer.minimize(loss, global_step=global_step)
tf.train.MonitoredTrainingSession()
# Test for a single operation/tensor.
fetches = loss
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
replicas = [rep.name for rep in replicas]
self.assertEqual(fetches.name, "EPL_PARALLEL_STRATEGY/truediv:0")
self.assertListEqual(replicas, [
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0"
])
fetches = train_op
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
replicas = [rep.name for rep in replicas]
# Test for nvidia-tf(1.15.4) and deeprec(1.15.5).
if Version(__version__) >= Version("1.15.4") and Version(__version__) < Version("2.0"):
suffix = "/group_deps"
else:
suffix = ""
self.assertEqual(fetches.name, "Momentum" + suffix)
self.assertEqual(replicas, [
"EPL_REPLICA_1/Momentum" + suffix, "EPL_REPLICA_2/Momentum" +
suffix, "EPL_REPLICA_3/Momentum" + suffix
])
# Test for list fetches.
fetches = [loss, train_op]
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
fetches = [fetch.name for fetch in fetches]
replicas = [rep.name for rep in replicas]
self.assertListEqual(
fetches, ["EPL_PARALLEL_STRATEGY/truediv:0", "Momentum" + suffix])
self.assertListEqual(replicas, [
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0", "EPL_REPLICA_1/Momentum" +
suffix, "EPL_REPLICA_2/Momentum" + suffix,
"EPL_REPLICA_3/Momentum" + suffix
])
# Test for type of dict.
fetches = {"loss": loss, "train_op": train_op}
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
replicas = [rep.name for rep in replicas]
self.assertEqual(fetches["loss"].name,
"EPL_PARALLEL_STRATEGY/truediv:0")
self.assertEqual(fetches["train_op"].name, "Momentum" + suffix)
if six.PY2:
self.assertListEqual(replicas, [
"EPL_REPLICA_1/Momentum" + suffix, "EPL_REPLICA_2/Momentum" +
suffix, "EPL_REPLICA_3/Momentum" + suffix,
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0"
])
else:
self.assertListEqual(replicas, [
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0", "EPL_REPLICA_1/Momentum" +
suffix, "EPL_REPLICA_2/Momentum" + suffix,
"EPL_REPLICA_3/Momentum" + suffix
])
# Test for type of OrderedDict
fetches = collections.OrderedDict()
fetches["loss"] = loss
fetches["train_op"] = train_op
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
replicas = [rep.name for rep in replicas]
self.assertEqual(fetches["loss"].name,
"EPL_PARALLEL_STRATEGY/truediv:0")
self.assertEqual(fetches["train_op"].name, "Momentum" + suffix)
self.assertListEqual(replicas, [
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0", "EPL_REPLICA_1/Momentum" +
suffix, "EPL_REPLICA_2/Momentum" + suffix,
"EPL_REPLICA_3/Momentum" + suffix
])
# Test for type of tuple.
fetches = (loss, train_op)
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
replicas = [rep.name for rep in replicas]
self.assertEqual(fetches[0].name, "EPL_PARALLEL_STRATEGY/truediv:0")
self.assertEqual(fetches[1].name, "Momentum" + suffix)
self.assertListEqual(replicas, [
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0", "EPL_REPLICA_1/Momentum" +
suffix, "EPL_REPLICA_2/Momentum" + suffix,
"EPL_REPLICA_3/Momentum" + suffix
])
# Test for type of namedtuple.
fetch_type = collections.namedtuple("fetch_type", ["loss", "train_op"])
fetches = fetch_type(loss=loss, train_op=train_op)
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
replicas = [rep.name for rep in replicas]
self.assertEqual(fetches.loss.name, "EPL_PARALLEL_STRATEGY/truediv:0")
self.assertEqual(fetches.train_op.name, "Momentum" + suffix)
self.assertListEqual(replicas, [
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0", "EPL_REPLICA_1/Momentum" +
suffix, "EPL_REPLICA_2/Momentum" + suffix,
"EPL_REPLICA_3/Momentum" + suffix
])
# Test for nested list fetches.
def _flatten(li):
return sum(
([x] if not isinstance(x, list) else _flatten(x) for x in li), [])
fetches = [labels, [train_op, logits, [loss, global_step]]]
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
fetches = _flatten(fetches)
fetches = [fetch.name for fetch in fetches]
replicas = [rep.name for rep in replicas]
self.assertListEqual(fetches, [
"IteratorGetNext:1", "Momentum" + suffix, "dense_1/BiasAdd:0",
"EPL_PARALLEL_STRATEGY/truediv:0", "global_step:0"
])
self.assertListEqual(replicas, [
"EPL_REPLICA_1/IteratorGetNext:1",
"EPL_REPLICA_2/IteratorGetNext:1",
"EPL_REPLICA_3/IteratorGetNext:1", "EPL_REPLICA_1/Momentum" +
suffix, "EPL_REPLICA_2/Momentum" + suffix,
"EPL_REPLICA_3/Momentum" + suffix,
"EPL_REPLICA_1/dense_1/BiasAdd:0",
"EPL_REPLICA_2/dense_1/BiasAdd:0",
"EPL_REPLICA_3/dense_1/BiasAdd:0",
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0",
"EPL_REPLICA_1/global_step:0",
"EPL_REPLICA_2/global_step:0",
"EPL_REPLICA_3/global_step:0"
])
# Test for nested list with dict.
fetches = [labels, {"loss": loss}]
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
replicas = [rep.name for rep in replicas]
self.assertEqual(fetches[0].name, "IteratorGetNext:1")
self.assertEqual(fetches[1]["loss"].name,
"EPL_PARALLEL_STRATEGY/truediv:0")
self.assertListEqual(replicas, [
"EPL_REPLICA_1/IteratorGetNext:1",
"EPL_REPLICA_2/IteratorGetNext:1",
"EPL_REPLICA_3/IteratorGetNext:1",
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0"
])
# Test for nested list with tuple.
fetches = [labels, (loss, global_step)]
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
replicas = [rep.name for rep in replicas]
self.assertEqual(fetches[0].name, "IteratorGetNext:1")
self.assertEqual(fetches[1][0].name, "EPL_PARALLEL_STRATEGY/truediv:0")
self.assertEqual(fetches[1][1].name, "global_step:0")
self.assertListEqual(replicas, [
"EPL_REPLICA_1/IteratorGetNext:1",
"EPL_REPLICA_2/IteratorGetNext:1",
"EPL_REPLICA_3/IteratorGetNext:1",
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0",
"EPL_REPLICA_1/global_step:0",
"EPL_REPLICA_2/global_step:0",
"EPL_REPLICA_3/global_step:0"
])
# pylint: enable=missing-docstring,unused-argument,unused-variable
if __name__ == "__main__":
test.main()
|
[
"tensorflow.python.platform.test.main",
"tensorflow.train.MonitoredTrainingSession",
"tensorflow.losses.sparse_softmax_cross_entropy",
"epl.add_to_collection",
"distutils.version.LooseVersion",
"tensorflow.layers.dense",
"tensorflow.train.get_or_create_global_step",
"epl.replicate",
"tensorflow.add_to_collection",
"tensorflow.data.Dataset.from_tensor_slices",
"epl.Cluster",
"epl.parallel.hooks._append_replicated_fetches",
"numpy.random.randint",
"tensorflow.train.MomentumOptimizer",
"collections.namedtuple",
"collections.OrderedDict",
"epl.Config"
] |
[((10439, 10450), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (10448, 10450), False, 'from tensorflow.python.platform import test\n'), ((8955, 9000), 'epl.parallel.hooks._append_replicated_fetches', '_append_replicated_fetches', (['fetches', 'replicas'], {}), '(fetches, replicas)\n', (8981, 9000), False, 'from epl.parallel.hooks import _append_replicated_fetches\n'), ((9631, 9676), 'epl.parallel.hooks._append_replicated_fetches', '_append_replicated_fetches', (['fetches', 'replicas'], {}), '(fetches, replicas)\n', (9657, 9676), False, 'from epl.parallel.hooks import _append_replicated_fetches\n'), ((1387, 1445), 'epl.Cluster', 'epl.Cluster', ([], {'worker_hosts': '"""127.0.0.1:8001"""', 'worker_index': '(0)'}), "(worker_hosts='127.0.0.1:8001', worker_index=0)\n", (1398, 1445), False, 'import epl\n'), ((2526, 2561), 'tensorflow.train.MonitoredTrainingSession', 'tf.train.MonitoredTrainingSession', ([], {}), '()\n', (2559, 2561), True, 'import tensorflow as tf\n'), ((2664, 2709), 'epl.parallel.hooks._append_replicated_fetches', '_append_replicated_fetches', (['fetches', 'replicas'], {}), '(fetches, replicas)\n', (2690, 2709), False, 'from epl.parallel.hooks import _append_replicated_fetches\n'), ((3080, 3125), 'epl.parallel.hooks._append_replicated_fetches', '_append_replicated_fetches', (['fetches', 'replicas'], {}), '(fetches, replicas)\n', (3106, 3125), False, 'from epl.parallel.hooks import _append_replicated_fetches\n'), ((3714, 3759), 'epl.parallel.hooks._append_replicated_fetches', '_append_replicated_fetches', (['fetches', 'replicas'], {}), '(fetches, replicas)\n', (3740, 3759), False, 'from epl.parallel.hooks import _append_replicated_fetches\n'), ((4397, 4442), 'epl.parallel.hooks._append_replicated_fetches', '_append_replicated_fetches', (['fetches', 'replicas'], {}), '(fetches, replicas)\n', (4423, 4442), False, 'from epl.parallel.hooks import _append_replicated_fetches\n'), ((5402, 5427), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (5425, 5427), False, 'import collections\n'), ((5530, 5575), 'epl.parallel.hooks._append_replicated_fetches', '_append_replicated_fetches', (['fetches', 'replicas'], {}), '(fetches, replicas)\n', (5556, 5575), False, 'from epl.parallel.hooks import _append_replicated_fetches\n'), ((6212, 6257), 'epl.parallel.hooks._append_replicated_fetches', '_append_replicated_fetches', (['fetches', 'replicas'], {}), '(fetches, replicas)\n', (6238, 6257), False, 'from epl.parallel.hooks import _append_replicated_fetches\n'), ((6812, 6870), 'collections.namedtuple', 'collections.namedtuple', (['"""fetch_type"""', "['loss', 'train_op']"], {}), "('fetch_type', ['loss', 'train_op'])\n", (6834, 6870), False, 'import collections\n'), ((6964, 7009), 'epl.parallel.hooks._append_replicated_fetches', '_append_replicated_fetches', (['fetches', 'replicas'], {}), '(fetches, replicas)\n', (6990, 7009), False, 'from epl.parallel.hooks import _append_replicated_fetches\n'), ((7780, 7825), 'epl.parallel.hooks._append_replicated_fetches', '_append_replicated_fetches', (['fetches', 'replicas'], {}), '(fetches, replicas)\n', (7806, 7825), False, 'from epl.parallel.hooks import _append_replicated_fetches\n'), ((1316, 1376), 'epl.Config', 'epl.Config', (["{'communication.gradients_reduce_method': 'sum'}"], {}), "({'communication.gradients_reduce_method': 'sum'})\n", (1326, 1376), False, 'import epl\n'), ((1458, 1487), 'epl.replicate', 'epl.replicate', ([], {'device_count': '(1)'}), '(device_count=1)\n', (1471, 1487), False, 'import epl\n'), ((1824, 1899), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['tf.GraphKeys.TABLE_INITIALIZERS', 'iterator.initializer'], {}), '(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)\n', (1844, 1899), True, 'import tensorflow as tf\n'), ((1987, 2008), 'tensorflow.layers.dense', 'tf.layers.dense', (['x', '(2)'], {}), '(x, 2)\n', (2002, 2008), True, 'import tensorflow as tf\n'), ((2026, 2053), 'tensorflow.layers.dense', 'tf.layers.dense', (['logits', '(10)'], {}), '(logits, 10)\n', (2041, 2053), True, 'import tensorflow as tf\n'), ((2069, 2137), 'tensorflow.losses.sparse_softmax_cross_entropy', 'tf.losses.sparse_softmax_cross_entropy', ([], {'labels': 'labels', 'logits': 'logits'}), '(labels=labels, logits=logits)\n', (2107, 2137), True, 'import tensorflow as tf\n'), ((2200, 2262), 'epl.add_to_collection', 'epl.add_to_collection', (['loss', 'epl.GraphKeys.GLOBAL_MEAN_OBJECTS'], {}), '(loss, epl.GraphKeys.GLOBAL_MEAN_OBJECTS)\n', (2221, 2262), False, 'import epl\n'), ((2285, 2321), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (2319, 2321), True, 'import tensorflow as tf\n'), ((2342, 2403), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', ([], {'learning_rate': '(0.001)', 'momentum': '(0.9)'}), '(learning_rate=0.001, momentum=0.9)\n', (2368, 2403), True, 'import tensorflow as tf\n'), ((3239, 3259), 'distutils.version.LooseVersion', 'Version', (['__version__'], {}), '(__version__)\n', (3246, 3259), True, 'from distutils.version import LooseVersion as Version\n'), ((3263, 3280), 'distutils.version.LooseVersion', 'Version', (['"""1.15.4"""'], {}), "('1.15.4')\n", (3270, 3280), True, 'from distutils.version import LooseVersion as Version\n'), ((3285, 3305), 'distutils.version.LooseVersion', 'Version', (['__version__'], {}), '(__version__)\n', (3292, 3305), True, 'from distutils.version import LooseVersion as Version\n'), ((3308, 3322), 'distutils.version.LooseVersion', 'Version', (['"""2.0"""'], {}), "('2.0')\n", (3315, 3322), True, 'from distutils.version import LooseVersion as Version\n'), ((1505, 1540), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', '(500, 20)'], {}), '(0, 10, (500, 20))\n', (1522, 1540), True, 'import numpy as np\n'), ((1582, 1611), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', '(500)'], {}), '(0, 10, 500)\n', (1599, 1611), True, 'import numpy as np\n'), ((1653, 1703), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(num_x, num_y)'], {}), '((num_x, num_y))\n', (1687, 1703), True, 'import tensorflow as tf\n')]
|
import six
import numpy as np
import nutszebra_utility as nz
import sys
import pickle
def unpickle(file_name):
fp = open(file_name, 'rb')
if sys.version_info.major == 2:
data = pickle.load(fp)
elif sys.version_info.major == 3:
data = pickle.load(fp, encoding='latin-1')
fp.close()
return data
class Cifar10(object):
def __init__(self):
self.utility = nz.Utility()
self.output_name = 'cifar10.pkl'
self.url = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
self.downloaded_file = 'cifar-10-python.tar.gz'
self.untared_file = 'cifar-10-batches-py'
self.batch_train_file = ['data_batch_' + str(num) for num in six.moves.range(1, 6)]
self.batch_test_file = 'test_batch'
self.meta_file = 'batches.meta'
self.converted_name = 'cifar10.pkl'
def download_cifar_10(self):
# if already downloaded and processed, then return True
if self.converted_name in self.utility.find_files(self.utility.nutszebra_path, affix_flag=True):
print('Already downloaded')
return True
# download file
print('Downloading: ' + self.downloaded_file)
self.utility.download_file(self.url, self.utility.nutszebra_path, self.downloaded_file)
print('Done')
print('Uncompressing')
# untar
self.utility.untar_gz(self.utility.nutszebra_path + '/' + self.downloaded_file)
print('Done')
# delete tar.gz file
self.utility.remove_file(self.downloaded_file)
# load train file
print('Loading train data')
train_x = np.zeros((50000, 3, 32, 32), dtype=np.float32)
train_y = np.zeros((50000), dtype=np.int32)
for i, batch_file in enumerate(self.batch_train_file):
data = unpickle(self.untared_file + '/' + batch_file)
start = i * 10000
end = start + 10000
train_x[start:end] = data['data'].reshape(10000, 3, 32, 32)
train_y[start:end] = np.array(data['labels'], dtype=np.int32)
print('Done')
# load test file
print('Loading test data')
test_x = np.zeros((10000, 3, 32, 32), dtype=np.float32)
test_y = np.zeros((10000), dtype=np.int32)
data = unpickle(self.untared_file + '/' + self.batch_test_file)
test_x[:] = data['data'].reshape(10000, 3, 32, 32)
test_y[:] = np.array(data['labels'], dtype=np.int32)
print('Done')
# load meta file
data = unpickle(self.untared_file + '/' + self.meta_file)
meta = data['label_names']
# save loaded data
print('Saving')
data = {}
data['train_x'] = train_x
data['train_y'] = train_y
data['test_x'] = test_x
data['test_y'] = test_y
data['meta'] = meta
self.utility.save_pickle(data, self.utility.nutszebra_path + '/' + self.converted_name)
def check_overlap(self):
data = self.load_cifar10_data()
length = len(data['test_x'])
result = [0] * length
for i in six.moves.range(length):
result[i] = np.any(np.all(data['test_x'][i] == data['train_x']))
return (np.any(result), result)
def load_cifar10_data(self):
self.download_cifar_10()
return unpickle(self.utility.nutszebra_path + '/' + self.converted_name)
|
[
"six.moves.range",
"numpy.zeros",
"numpy.any",
"nutszebra_utility.Utility",
"pickle.load",
"numpy.array",
"numpy.all"
] |
[((195, 210), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (206, 210), False, 'import pickle\n'), ((404, 416), 'nutszebra_utility.Utility', 'nz.Utility', ([], {}), '()\n', (414, 416), True, 'import nutszebra_utility as nz\n'), ((1644, 1690), 'numpy.zeros', 'np.zeros', (['(50000, 3, 32, 32)'], {'dtype': 'np.float32'}), '((50000, 3, 32, 32), dtype=np.float32)\n', (1652, 1690), True, 'import numpy as np\n'), ((1709, 1740), 'numpy.zeros', 'np.zeros', (['(50000)'], {'dtype': 'np.int32'}), '(50000, dtype=np.int32)\n', (1717, 1740), True, 'import numpy as np\n'), ((2179, 2225), 'numpy.zeros', 'np.zeros', (['(10000, 3, 32, 32)'], {'dtype': 'np.float32'}), '((10000, 3, 32, 32), dtype=np.float32)\n', (2187, 2225), True, 'import numpy as np\n'), ((2243, 2274), 'numpy.zeros', 'np.zeros', (['(10000)'], {'dtype': 'np.int32'}), '(10000, dtype=np.int32)\n', (2251, 2274), True, 'import numpy as np\n'), ((2428, 2468), 'numpy.array', 'np.array', (["data['labels']"], {'dtype': 'np.int32'}), "(data['labels'], dtype=np.int32)\n", (2436, 2468), True, 'import numpy as np\n'), ((3096, 3119), 'six.moves.range', 'six.moves.range', (['length'], {}), '(length)\n', (3111, 3119), False, 'import six\n'), ((264, 299), 'pickle.load', 'pickle.load', (['fp'], {'encoding': '"""latin-1"""'}), "(fp, encoding='latin-1')\n", (275, 299), False, 'import pickle\n'), ((2039, 2079), 'numpy.array', 'np.array', (["data['labels']"], {'dtype': 'np.int32'}), "(data['labels'], dtype=np.int32)\n", (2047, 2079), True, 'import numpy as np\n'), ((3214, 3228), 'numpy.any', 'np.any', (['result'], {}), '(result)\n', (3220, 3228), True, 'import numpy as np\n'), ((709, 730), 'six.moves.range', 'six.moves.range', (['(1)', '(6)'], {}), '(1, 6)\n', (724, 730), False, 'import six\n'), ((3152, 3196), 'numpy.all', 'np.all', (["(data['test_x'][i] == data['train_x'])"], {}), "(data['test_x'][i] == data['train_x'])\n", (3158, 3196), True, 'import numpy as np\n')]
|
from tqdm import tqdm
from MCTS import MCTS
from BinaryTree import BinaryTree
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(15)
def run_experiment(max_iterations, dynamic_c=False):
"""
Run a single experiment of a sequence of MCTS searches to find the optimal path.
:param max_iterations: Number of iterations to run the MCTS.
:param dynamic_c: Boolean indicating whether to use a dynamic c or not.
:return: value of the optimal path found from the search
"""
tree = BinaryTree(depth=12, b=20, tau=3)
best_leaf = max(tree.leaves)
mcts = MCTS(max_iterations=max_iterations, c=2)
optimal_path = ""
while tree.depth > 0:
# search the best direction
direction = mcts.search(tree, dynamic_c=dynamic_c, verbose=False)
optimal_path += direction
# update the tree
tree.update_root(direction)
# return the distance of the optimal path found from the search wrt the best leaf
return sum(1 for a, b in zip(optimal_path, best_leaf.address) if a != b)
def main():
# compute statistics for static c and dynamic c
n_iterations = np.logspace(0.7, 3, num=18, base=10, dtype=int)
values_static_c = [run_experiment(max_iterations=n, dynamic_c=False) for n in tqdm(n_iterations, desc='Execute MCTS with c=2', unit=' experiment')]
values_dynamic_c = [run_experiment(max_iterations=n, dynamic_c=True) for n in tqdm(n_iterations, desc='Execute MCTS with dynamic c', unit=' experiment')]
# plot the results
plt.figure(figsize=(8, 4))
plt.plot(n_iterations, values_dynamic_c, '-o', label="MCTS with dynamic c")
plt.plot(n_iterations, values_static_c, '-o', label="MCTS with c=2")
plt.xlabel("Number of iterations")
plt.ylabel("Distance of the optimal path from the best leaf")
plt.title("Compare the value of the optimal path found by MCTS with and without dynamic c")
plt.grid(linestyle='--', linewidth=1)
plt.xscale("log")
plt.xticks(n_iterations, n_iterations)
plt.legend()
plt.show()
if __name__ == "__main__":
main()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.xscale",
"tqdm.tqdm",
"numpy.random.seed",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.logspace",
"matplotlib.pyplot.legend",
"MCTS.MCTS",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"BinaryTree.BinaryTree",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel"
] |
[((131, 149), 'numpy.random.seed', 'np.random.seed', (['(15)'], {}), '(15)\n', (145, 149), True, 'import numpy as np\n'), ((519, 552), 'BinaryTree.BinaryTree', 'BinaryTree', ([], {'depth': '(12)', 'b': '(20)', 'tau': '(3)'}), '(depth=12, b=20, tau=3)\n', (529, 552), False, 'from BinaryTree import BinaryTree\n'), ((597, 637), 'MCTS.MCTS', 'MCTS', ([], {'max_iterations': 'max_iterations', 'c': '(2)'}), '(max_iterations=max_iterations, c=2)\n', (601, 637), False, 'from MCTS import MCTS\n'), ((1143, 1190), 'numpy.logspace', 'np.logspace', (['(0.7)', '(3)'], {'num': '(18)', 'base': '(10)', 'dtype': 'int'}), '(0.7, 3, num=18, base=10, dtype=int)\n', (1154, 1190), True, 'import numpy as np\n'), ((1529, 1555), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 4)'}), '(figsize=(8, 4))\n', (1539, 1555), True, 'import matplotlib.pyplot as plt\n'), ((1560, 1635), 'matplotlib.pyplot.plot', 'plt.plot', (['n_iterations', 'values_dynamic_c', '"""-o"""'], {'label': '"""MCTS with dynamic c"""'}), "(n_iterations, values_dynamic_c, '-o', label='MCTS with dynamic c')\n", (1568, 1635), True, 'import matplotlib.pyplot as plt\n'), ((1640, 1708), 'matplotlib.pyplot.plot', 'plt.plot', (['n_iterations', 'values_static_c', '"""-o"""'], {'label': '"""MCTS with c=2"""'}), "(n_iterations, values_static_c, '-o', label='MCTS with c=2')\n", (1648, 1708), True, 'import matplotlib.pyplot as plt\n'), ((1713, 1747), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of iterations"""'], {}), "('Number of iterations')\n", (1723, 1747), True, 'import matplotlib.pyplot as plt\n'), ((1752, 1813), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Distance of the optimal path from the best leaf"""'], {}), "('Distance of the optimal path from the best leaf')\n", (1762, 1813), True, 'import matplotlib.pyplot as plt\n'), ((1818, 1919), 'matplotlib.pyplot.title', 'plt.title', (['"""Compare the value of the optimal path found by MCTS with and without dynamic c"""'], {}), "(\n 'Compare the value of the optimal path found by MCTS with and without dynamic c'\n )\n", (1827, 1919), True, 'import matplotlib.pyplot as plt\n'), ((1914, 1951), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'linestyle': '"""--"""', 'linewidth': '(1)'}), "(linestyle='--', linewidth=1)\n", (1922, 1951), True, 'import matplotlib.pyplot as plt\n'), ((1956, 1973), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (1966, 1973), True, 'import matplotlib.pyplot as plt\n'), ((1978, 2016), 'matplotlib.pyplot.xticks', 'plt.xticks', (['n_iterations', 'n_iterations'], {}), '(n_iterations, n_iterations)\n', (1988, 2016), True, 'import matplotlib.pyplot as plt\n'), ((2021, 2033), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2031, 2033), True, 'import matplotlib.pyplot as plt\n'), ((2038, 2048), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2046, 2048), True, 'import matplotlib.pyplot as plt\n'), ((1273, 1341), 'tqdm.tqdm', 'tqdm', (['n_iterations'], {'desc': '"""Execute MCTS with c=2"""', 'unit': '""" experiment"""'}), "(n_iterations, desc='Execute MCTS with c=2', unit=' experiment')\n", (1277, 1341), False, 'from tqdm import tqdm\n'), ((1425, 1499), 'tqdm.tqdm', 'tqdm', (['n_iterations'], {'desc': '"""Execute MCTS with dynamic c"""', 'unit': '""" experiment"""'}), "(n_iterations, desc='Execute MCTS with dynamic c', unit=' experiment')\n", (1429, 1499), False, 'from tqdm import tqdm\n')]
|
import numpy as np
import time
import keyboard
import math
import threading
def attack_mob(boxes,classes):
"""
recevies in the player box and the mob box and then will move the player towards the mob and then attack it
"""
#midpoints X1 and X2
player, closestmob = calculate_distance(boxes,classes)
#vertical movement y axis
if player[0]<closestmob[0]:
keyboard.teledown()
else:
keyboard.teleup()
# horizontal movement, i messed up the coordinates while creating the tuple index 1 is x, index 0 is y
if player[1]<closestmob[1]:
#moveleft and attack
print("player coord:"+str(player[0])+" "+str(player[1]))
print("\n mob coord:"+str(closestmob[0])+" "+str(closestmob[1]))
keyboard.moveRight()
keyboard.moveRight()
# keyboard.moveRight()
keyboard.attackFiveTimes()
keyboard.loot()
else:
# mob is to the right and attack
print("player coord:"+str(player[0])+" "+str(player[1]))
print("\n mob coord:"+str(closestmob[0])+" "+str(closestmob[1]))
keyboard.moveLeft()
keyboard.moveLeft()
# keyboard.moveLeft()
keyboard.attackFiveTimes()
keyboard.loot()
def filter(detections):
"""
takes first five detections returns boxes,scores and classes as numpy arrays
"""
#get first five predictions
boxes = detections['detection_boxes'][0].numpy()[:5]
scores = detections['detection_scores'][0].numpy()[:5]
classes = (detections['detection_classes'][0].numpy() + 1).astype(int)[:5]
isTherePlayer = False
if 2 in classes[:]:
isTherePlayer = True
return boxes, scores, classes, isTherePlayer
def calculate_distance(boxes,classes):
"""
calculates the distance between the player and the three mobs, and returns the mob with the shortest distance
"""
#get the index of the player, returns a numpy array containing the index
itemindex = np.where(classes==2)
#get the midpoints, list of tuples
midpoints =[]
for i in range(np.shape(boxes)[0]):
midpoints.append(getBoxesMidpoint(boxes[i]))
#calculate the distance between the player and the mobs
distance=np.zeros(5,dtype=np.float32)
for i in range(np.shape(boxes)[0]):
if i == itemindex[0][0]:
distance[i]= 99999.0
else:
distance[i]=distance_2points(midpoints[i],midpoints[itemindex[0][0]])
#get the min index, and return the player coord and mob coord.
minindex = np.argmin(distance)
return midpoints[itemindex[0][0]],midpoints[minindex]
def getBoxesMidpoint(box):
"""
takes in normalized coordinates of the 800x600 screen. coordinates are xmin,ymin,xmax,ymax
returns a tuple of the midpoint
"""
#denormalize them
normalized_coord = np.array([box[0]*806,box[1]*629,box[2]*806,box[3]*629],dtype=np.float32)
#offset from the origin
return (((normalized_coord[2]-normalized_coord[0])/2)+normalized_coord[0],((((normalized_coord[3]-normalized_coord[1])/2))+normalized_coord[1]))
def distance_2points(pt1,pt2):
"""
returns distance between two points pt1(x1,y1),pt2(x2,y2). points as tuples.
"""
return math.hypot(pt2[0]-pt1[0], pt2[1]-pt1[1])
def autobuff(stop_event):
starttime = time.time()
while not stop_event.wait(1):
print("Buffing!")
keyboard.buff()
keyboard.buff()
keyboard.buff()
time.sleep(65.0 - ((time.time() - starttime) % 65.0))
def autocc(stop_event):
starttime = time.time()
while not stop_event.wait(1):
print("CC'ing!")
keyboard.cc()
time.sleep(90.0 - ((time.time() - starttime) % 90.0))
if __name__ == "__main__":
pass
|
[
"math.hypot",
"numpy.zeros",
"keyboard.moveRight",
"numpy.argmin",
"time.time",
"keyboard.teledown",
"keyboard.loot",
"keyboard.moveLeft",
"numpy.where",
"numpy.array",
"keyboard.cc",
"keyboard.teleup",
"keyboard.buff",
"numpy.shape",
"keyboard.attackFiveTimes"
] |
[((1979, 2001), 'numpy.where', 'np.where', (['(classes == 2)'], {}), '(classes == 2)\n', (1987, 2001), True, 'import numpy as np\n'), ((2228, 2257), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'np.float32'}), '(5, dtype=np.float32)\n', (2236, 2257), True, 'import numpy as np\n'), ((2546, 2565), 'numpy.argmin', 'np.argmin', (['distance'], {}), '(distance)\n', (2555, 2565), True, 'import numpy as np\n'), ((2845, 2934), 'numpy.array', 'np.array', (['[box[0] * 806, box[1] * 629, box[2] * 806, box[3] * 629]'], {'dtype': 'np.float32'}), '([box[0] * 806, box[1] * 629, box[2] * 806, box[3] * 629], dtype=np\n .float32)\n', (2853, 2934), True, 'import numpy as np\n'), ((3236, 3280), 'math.hypot', 'math.hypot', (['(pt2[0] - pt1[0])', '(pt2[1] - pt1[1])'], {}), '(pt2[0] - pt1[0], pt2[1] - pt1[1])\n', (3246, 3280), False, 'import math\n'), ((3320, 3331), 'time.time', 'time.time', ([], {}), '()\n', (3329, 3331), False, 'import time\n'), ((3567, 3578), 'time.time', 'time.time', ([], {}), '()\n', (3576, 3578), False, 'import time\n'), ((391, 410), 'keyboard.teledown', 'keyboard.teledown', ([], {}), '()\n', (408, 410), False, 'import keyboard\n'), ((429, 446), 'keyboard.teleup', 'keyboard.teleup', ([], {}), '()\n', (444, 446), False, 'import keyboard\n'), ((762, 782), 'keyboard.moveRight', 'keyboard.moveRight', ([], {}), '()\n', (780, 782), False, 'import keyboard\n'), ((791, 811), 'keyboard.moveRight', 'keyboard.moveRight', ([], {}), '()\n', (809, 811), False, 'import keyboard\n'), ((851, 877), 'keyboard.attackFiveTimes', 'keyboard.attackFiveTimes', ([], {}), '()\n', (875, 877), False, 'import keyboard\n'), ((886, 901), 'keyboard.loot', 'keyboard.loot', ([], {}), '()\n', (899, 901), False, 'import keyboard\n'), ((1099, 1118), 'keyboard.moveLeft', 'keyboard.moveLeft', ([], {}), '()\n', (1116, 1118), False, 'import keyboard\n'), ((1127, 1146), 'keyboard.moveLeft', 'keyboard.moveLeft', ([], {}), '()\n', (1144, 1146), False, 'import keyboard\n'), ((1185, 1211), 'keyboard.attackFiveTimes', 'keyboard.attackFiveTimes', ([], {}), '()\n', (1209, 1211), False, 'import keyboard\n'), ((1220, 1235), 'keyboard.loot', 'keyboard.loot', ([], {}), '()\n', (1233, 1235), False, 'import keyboard\n'), ((3400, 3415), 'keyboard.buff', 'keyboard.buff', ([], {}), '()\n', (3413, 3415), False, 'import keyboard\n'), ((3424, 3439), 'keyboard.buff', 'keyboard.buff', ([], {}), '()\n', (3437, 3439), False, 'import keyboard\n'), ((3448, 3463), 'keyboard.buff', 'keyboard.buff', ([], {}), '()\n', (3461, 3463), False, 'import keyboard\n'), ((3646, 3659), 'keyboard.cc', 'keyboard.cc', ([], {}), '()\n', (3657, 3659), False, 'import keyboard\n'), ((2076, 2091), 'numpy.shape', 'np.shape', (['boxes'], {}), '(boxes)\n', (2084, 2091), True, 'import numpy as np\n'), ((2276, 2291), 'numpy.shape', 'np.shape', (['boxes'], {}), '(boxes)\n', (2284, 2291), True, 'import numpy as np\n'), ((3492, 3503), 'time.time', 'time.time', ([], {}), '()\n', (3501, 3503), False, 'import time\n'), ((3688, 3699), 'time.time', 'time.time', ([], {}), '()\n', (3697, 3699), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
# Created on Sat Jun 05 2021
# Last modified on Mon Jun 07 2021
# Copyright (c) CaMOS Development Team. All Rights Reserved.
# Distributed under a MIT License. See LICENSE for more info.
import numpy as np
from camos.tasks.analysis import Analysis
from camos.utils.generategui import NumericInput, DatasetInput
from camos.utils.units import get_time
class BurstClean(Analysis):
analysis_name = "Clean Events"
required = ["dataset"]
def __init__(self, *args, **kwargs):
super(BurstClean, self).__init__(*args, **kwargs)
def _run(
self,
duration: NumericInput("Total Duration ({})".format(get_time()), 100),
_filter_min: NumericInput("Minimum Events/{}".format(get_time()), 1),
_filter_max: NumericInput("Maximum Events/{}".format(get_time()), 50),
_i_data: DatasetInput("Source dataset", 0),
):
output_type = [("CellID", "int"), ("Active", "float")]
# data should be provided in format summary (active events)
data = self.signal.data[_i_data]
self.dataname = self.signal.names[_i_data]
if not ("Active" in data.dtype.names):
raise ValueError("The dataset does not have the expected shape")
# Calculates the MFR, could be given as an input?
unique, counts = np.unique(data[:]["CellID"], return_counts=True)
active = data[:]["Active"]
IDs = data[:]["CellID"]
IDs_include = unique[
np.where(
(counts >= _filter_min * duration) & (counts <= _filter_max * duration)
)
]
idx = np.isin(IDs, IDs_include)
active_filter = active[idx]
IDs_filter = IDs[idx]
# Calculate mean firing rate per cell
self.output = np.zeros(shape=(len(active_filter), 1), dtype=output_type)
self.output[:]["CellID"] = IDs_filter.reshape(-1, 1)
self.output[:]["Active"] = active_filter.reshape(-1, 1)
self.output = self.output[1:]
self.foutput = self.output
# self.notify(
# "{}: Events Before = {}; Events After = {}".format(
# self.analysis_name, len(data), len(self.output)
# ),
# "INFO",
# )
def connectComponents(self, fields):
# Changing the input data to update the duration
fields["_i_data"].connect(
lambda x: fields["duration"].widget.setText(
str(int(self.signal.properties[x]["duration"]))
)
)
|
[
"camos.utils.generategui.DatasetInput",
"numpy.isin",
"numpy.where",
"camos.utils.units.get_time",
"numpy.unique"
] |
[((1324, 1372), 'numpy.unique', 'np.unique', (["data[:]['CellID']"], {'return_counts': '(True)'}), "(data[:]['CellID'], return_counts=True)\n", (1333, 1372), True, 'import numpy as np\n'), ((1618, 1643), 'numpy.isin', 'np.isin', (['IDs', 'IDs_include'], {}), '(IDs, IDs_include)\n', (1625, 1643), True, 'import numpy as np\n'), ((850, 883), 'camos.utils.generategui.DatasetInput', 'DatasetInput', (['"""Source dataset"""', '(0)'], {}), "('Source dataset', 0)\n", (862, 883), False, 'from camos.utils.generategui import NumericInput, DatasetInput\n'), ((1482, 1567), 'numpy.where', 'np.where', (['((counts >= _filter_min * duration) & (counts <= _filter_max * duration))'], {}), '((counts >= _filter_min * duration) & (counts <= _filter_max *\n duration))\n', (1490, 1567), True, 'import numpy as np\n'), ((657, 667), 'camos.utils.units.get_time', 'get_time', ([], {}), '()\n', (665, 667), False, 'from camos.utils.units import get_time\n'), ((737, 747), 'camos.utils.units.get_time', 'get_time', ([], {}), '()\n', (745, 747), False, 'from camos.utils.units import get_time\n'), ((815, 825), 'camos.utils.units.get_time', 'get_time', ([], {}), '()\n', (823, 825), False, 'from camos.utils.units import get_time\n')]
|
"""
Created on April 13, 2018
Edited on July 05, 2019
@author: <NAME> & <NAME>
Sony CSL Paris, France
Institute for Computational Perception, Johannes Kepler University, Linz
Austrian Research Institute for Artificial Intelligence, Vienna
"""
import numpy as np
import librosa
import torch.utils.data as data
import torch
import logging
import PIL
from scipy.signal import get_window
from torchvision.transforms import Resize, ToPILImage, ToTensor, Compose, \
CenterCrop
from complex_auto.util import to_numpy, cached
LOGGER = logging.getLogger(__name__)
def standardize_(ngram):
ngram = ngram - ngram.mean()
std = ngram.std()
if std > 1e-8:
ngram = .1 * ngram / std
return ngram
class Data(object):
def __init__(self, data_x, data_y, standardize=False):
self.data_x = data_x
self.data_y = data_y
def __getitem__(self, index):
return [standardize_(torch.FloatTensor(self.data_x[index])),
standardize_(torch.FloatTensor(self.data_y[index])),
-1, -1, -1]
def __len__(self):
return len(self.data_x)
class DataSampler(object):
def __init__(self, data_x, length_ngram, samples_epoch, standard=True,
shifts=[24, 24], scales=[1., 0], shuffle=True,
transform=(0, 1, 2), emph_onset=0, random_pairs=False):
"""
Returns random ngrams from data, can shift and scale data in two
dimensions
:param data_x: data (2d)
:param length_ngram: length of sampled ngrams
:param samples_epoch: number of samples per epoch
:param standard: if instances should be standardized
:param shifts: 2-tuple, maximal random shifts in two dimensions
:param scales: 2-tuple, maximal random scaling in two dimensions
:param shuffle: instances are returned in random order
:param transform: iterable; which transforms should be applied.
pitch_shift (0), time shift (1), tempo-change (2)
:param emph_onset: onsets are emphasized
:param random_pairs: a pair is sampled using two random (unrelated)
instances
"""
self.data_x = data_x
self.length_ngram = length_ngram
self.samples_epoch = samples_epoch
self.standard = standard
self.max_x = shifts[0]
self.max_y = shifts[1]
self.scale_x = scales[0]
self.scale_y = scales[1]
self.shuffle = shuffle
self.transform = transform
self.emph_onset = emph_onset
self.random_pairs = random_pairs
self.check_lengths()
def check_lengths(self):
delete = []
for i, song in enumerate(self.data_x):
max_ = song.shape[1] - self.length_ngram - self.max_x
if not self.max_x < max_:
print(f"Warning: Song number {i} is too short to be used "
f"with ngram length {self.length_ngram} and maximal "
f"time shift of {self.max_x} (will be ignored)!")
delete.append(i)
self.data_x = [i for j, i in enumerate(self.data_x) if j not in
delete]
def __len__(self):
if not self.shuffle:
return self.get_ngram_count()
return self.samples_epoch
def __getitem__(self, index):
# Transform: pitch_shift (0), time shift (1), tempo-change (2)
if self.transform is None:
# random transform
transform = np.random.randint(0, 3)
else:
transform = np.random.choice(self.transform)
if self.random_pairs:
# song_id, start, end = self.get_random_ngram()
# ngram = self.data_x[song_id][:, start:end].copy()
# song_id, start, end = self.get_random_ngram()
# ngram_trans = self.data_x[song_id][:, start:end].copy()
if np.random.randint(2) == 0:
[ngram, ngram_trans], song_id = self.get_pairs_same_song()
label = -1
transform = -1 # skips transformation codes
else:
song_id, start, end = self.get_ngram_by_idx(index)
ngram = self.data_x[song_id][:, start:end].copy()
elif self.shuffle:
song_id, start, end = self.get_random_ngram()
ngram = self.data_x[song_id][:, start:end].copy()
else:
song_id, start, end = self.get_ngram_by_idx(index)
ngram = self.data_x[song_id][:, start:end].copy()
# Normalization needed for PIL image processing (scale)
ngram -= ngram.min()
if ngram.max() > 1e-6:
ngram /= ngram.max()
assert ngram.shape[1] != 0, f"{start}, {end}," \
f"{self.data_x[song_id].shape[1]}, " \
f"{self.max_x}"
if transform == 1:
if self.max_x == 0:
shiftx = 0
else:
shiftx = np.random.randint(-self.max_x, self.max_x)
ngram_trans = self.trans_time_shift(end, song_id, start,
shiftx)
label = "shiftx" + str(shiftx)
if transform == 0:
if self.max_y == 0:
shifty = 0
else:
shifty = np.random.randint(-self.max_y, self.max_y)
ngram_trans = self.trans_pitch_shift(ngram, shifty)
label = "shifty" + str(shifty)
if transform == 2:
scale_x = 1 + self.scale_x * np.random.rand()
ngram, ngram_trans, minus = self.trans_speed_change(ngram, scale_x)
label = scale_x if not minus else -scale_x
label = "scale" + str(label)
ngram = to_numpy(ngram)
ngram_trans = to_numpy(ngram_trans)
ngram_onset = np.diff(np.concatenate((ngram[:, 0:1], ngram), axis=1),
axis=1)
ngram_trans_onset = np.diff(np.concatenate((ngram_trans[:, 0:1],
ngram_trans), axis=1), axis=1)
ngram_onset[ngram_onset < 0] = 0
ngram_trans_onset[ngram_trans_onset < 0] = 0
ngram = ngram + ngram_onset * self.emph_onset
ngram_trans = ngram_trans + ngram_trans_onset * self.emph_onset
if self.standard:
ngram = self.standardize(ngram)
ngram_trans = self.standardize(ngram_trans)
ngram = torch.FloatTensor(ngram).view(-1)
ngram_trans = torch.FloatTensor(ngram_trans).view(-1)
return ngram+1e-8, ngram_trans+1e-8, transform, song_id, label
def get_ngram_count(self):
count = 0
count_data = len(self.data_x)
for i in range(count_data):
len_data = self.data_x[i].shape[1]
startmin = 2 * self.max_x
startmax = len_data - self.length_ngram - 2 * self.max_x
count += startmax - startmin
return count
def get_ngram_by_idx(self, index):
count = 0
count_data = len(self.data_x)
for i in range(count_data):
len_data = self.data_x[i].shape[1]
startmin = 2 * self.max_x
startmax = len_data - self.length_ngram - 2 * self.max_x
if index >= count and index + startmin < count + startmax:
song_id = i
start = index - count + startmin
break
count += startmax - startmin
end = start + self.length_ngram
return song_id, start, end
def get_random_ngram(self):
count_data = len(self.data_x)
song_id = np.random.randint(0, count_data)
len_data = self.data_x[song_id].shape[1]
start = np.random.randint(self.max_x,
len_data - self.length_ngram - self.max_x)
end = start + self.length_ngram
return song_id, start, end
def get_pairs_same_song(self):
count_data = len(self.data_x)
song_id = np.random.randint(0, count_data)
len_data = self.data_x[song_id].shape[1]
pairs = []
for i in range(2):
start = np.random.randint(2 * self.max_x,
len_data - self.length_ngram - 2 * self.max_x)
end = start + self.length_ngram
ngram = self.data_x[song_id][:, start:end].copy()
pairs.append(ngram)
return pairs, song_id
def trans_speed_change(self, ngram, scale_x):
size1 = ngram.shape[1]
size0 = ngram.shape[0]
new_size_t_x = int(scale_x * size1)
new_size_t_y = ngram.shape[0]
transform_out = Compose([
ToPILImage(),
Resize((new_size_t_y, new_size_t_x),
interpolation=PIL.Image.NEAREST),
CenterCrop((size0, size1)),
ToTensor()
])
ngram_trans = transform_out(torch.FloatTensor(ngram).unsqueeze(0))
minus = False
if np.random.randint(0, 2) == 1:
ngram_ = ngram
ngram = ngram_trans
ngram_trans = ngram_
minus = True
return ngram, ngram_trans, minus
def trans_pitch_shift(self, ngram, shifty):
return to_numpy(self.transp0(torch.FloatTensor(ngram), shifty))
def trans_time_shift(self, end, song_id, start, shiftx):
return self.data_x[song_id][:, start + shiftx:end + shiftx]
def standardize(self, ngram):
ngram = ngram - ngram.mean()
std = ngram.std()
ngram = .1 * ngram / (std + 1e-8)
return ngram
def transp0(self, x, shift):
"""
Transposes axis 0 (zero-based) of x by [shift] steps.
Missing information is padded with zeros.
:param x: the array to transpose
:param shift: the transposition distance
:return: x transposed
"""
if shift == 0:
return x
pad = torch.zeros(abs(shift), x.size(1))
if shift < 0:
return torch.cat([pad, x[:-abs(shift), :]], dim=0)
return torch.cat([x[abs(shift):, :], pad], dim=0)
def transp1(self, x, shift):
"""
Transposes axis 1 (zero-based) of x by [shift] steps.
Missing information is padded with zeros.
:param x: the array to transpose
:param shift: the transposition distance
:return: x transposed
"""
if shift == 0:
return x
pad = torch.zeros(x.size(1), abs(shift))
if shift < 0:
return torch.cat([pad, x[:, :-abs(shift)]], dim=1)
return torch.cat([x[:, abs(shift):], pad], dim=1)
class Signal(data.Dataset):
def __init__(self, filelist, sr="22050", trg_shift=0, block_size=1024,
refresh_cache=False, cache_fn="signal_cache.pyc.bz",
allow_diff_shapes=False, padded=False, random_shift=0,
samples_epoch=1000, window='hann'):
"""
Constructor for 1D signal dataset
:param filelist: list of audio file names (str)
:param sr: desired sample rate
:param trg_shift: target == input shifted by [-trg_shift] steps,
blocks are shortened accordingly
:param block_size: length of one instance in a batch
:param refresh_cache: when True recalculate and save to cache file
when False loads from cache file when available
:param cache_fn: filename of cache file
"""
self.trg_shift = trg_shift
self.block_size = block_size
self.sr = sr
self.allow_diff_shapes = allow_diff_shapes
self.padded = padded
self.random_shift = random_shift
self.window = window
self.samples_epoch = samples_epoch
self.signals = cached(cache_fn, self.load_files, (filelist,),
refresh_cache=refresh_cache)
def __getitem__(self, index):
rand_inst = np.random.randint(len(self.signals))
if self.random_shift > 0:
shift = np.random.randint(-self.random_shift, self.random_shift)
else:
shift = self.trg_shift
rand_pos = np.random.randint(abs(shift),
len(self.signals[rand_inst]) -
abs(shift) - self.block_size)
w = get_window(self.window, self.block_size)
x = self.signals[rand_inst][rand_pos:rand_pos+self.block_size]
y = self.signals[rand_inst][rand_pos+shift:
rand_pos+shift+self.block_size, :]
x = torch.FloatTensor(x.squeeze() * w)
y = torch.FloatTensor(y.squeeze() * w)
x = self.standardize(x)
y = self.standardize(y)
return x, y, -1, -1, -1
def standardize(self, signal):
ngram = signal - signal.mean()
std = ngram.std()
if std > 1e-6:
ngram = ngram / std
else: # prevent empty input
ngram = ngram + 1e-8
return ngram
def __len__(self):
return self.samples_epoch
def load_files(self, filelist):
data_all = []
for file in filelist:
file = file.strip('\n')
print(f"loading file {file}")
signal = librosa.load(file)[0][:, None]
data_all.append(signal)
if len(data_all) == 0:
LOGGER.warning("No data added to Signal Dataset!")
return data_all
|
[
"complex_auto.util.cached",
"numpy.concatenate",
"scipy.signal.get_window",
"torch.FloatTensor",
"torchvision.transforms.ToPILImage",
"torchvision.transforms.ToTensor",
"complex_auto.util.to_numpy",
"numpy.random.randint",
"librosa.load",
"numpy.random.choice",
"numpy.random.rand",
"torchvision.transforms.CenterCrop",
"logging.getLogger",
"torchvision.transforms.Resize"
] |
[((541, 568), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (558, 568), False, 'import logging\n'), ((7686, 7718), 'numpy.random.randint', 'np.random.randint', (['(0)', 'count_data'], {}), '(0, count_data)\n', (7703, 7718), True, 'import numpy as np\n'), ((7784, 7856), 'numpy.random.randint', 'np.random.randint', (['self.max_x', '(len_data - self.length_ngram - self.max_x)'], {}), '(self.max_x, len_data - self.length_ngram - self.max_x)\n', (7801, 7856), True, 'import numpy as np\n'), ((8058, 8090), 'numpy.random.randint', 'np.random.randint', (['(0)', 'count_data'], {}), '(0, count_data)\n', (8075, 8090), True, 'import numpy as np\n'), ((11906, 11981), 'complex_auto.util.cached', 'cached', (['cache_fn', 'self.load_files', '(filelist,)'], {'refresh_cache': 'refresh_cache'}), '(cache_fn, self.load_files, (filelist,), refresh_cache=refresh_cache)\n', (11912, 11981), False, 'from complex_auto.util import to_numpy, cached\n'), ((12463, 12503), 'scipy.signal.get_window', 'get_window', (['self.window', 'self.block_size'], {}), '(self.window, self.block_size)\n', (12473, 12503), False, 'from scipy.signal import get_window\n'), ((3513, 3536), 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (3530, 3536), True, 'import numpy as np\n'), ((3575, 3607), 'numpy.random.choice', 'np.random.choice', (['self.transform'], {}), '(self.transform)\n', (3591, 3607), True, 'import numpy as np\n'), ((5781, 5796), 'complex_auto.util.to_numpy', 'to_numpy', (['ngram'], {}), '(ngram)\n', (5789, 5796), False, 'from complex_auto.util import to_numpy, cached\n'), ((5823, 5844), 'complex_auto.util.to_numpy', 'to_numpy', (['ngram_trans'], {}), '(ngram_trans)\n', (5831, 5844), False, 'from complex_auto.util import to_numpy, cached\n'), ((5876, 5922), 'numpy.concatenate', 'np.concatenate', (['(ngram[:, 0:1], ngram)'], {'axis': '(1)'}), '((ngram[:, 0:1], ngram), axis=1)\n', (5890, 5922), True, 'import numpy as np\n'), ((6037, 6095), 'numpy.concatenate', 'np.concatenate', (['(ngram_trans[:, 0:1], ngram_trans)'], {'axis': '(1)'}), '((ngram_trans[:, 0:1], ngram_trans), axis=1)\n', (6051, 6095), True, 'import numpy as np\n'), ((8206, 8291), 'numpy.random.randint', 'np.random.randint', (['(2 * self.max_x)', '(len_data - self.length_ngram - 2 * self.max_x)'], {}), '(2 * self.max_x, len_data - self.length_ngram - 2 * self.max_x\n )\n', (8223, 8291), True, 'import numpy as np\n'), ((9034, 9057), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (9051, 9057), True, 'import numpy as np\n'), ((12159, 12215), 'numpy.random.randint', 'np.random.randint', (['(-self.random_shift)', 'self.random_shift'], {}), '(-self.random_shift, self.random_shift)\n', (12176, 12215), True, 'import numpy as np\n'), ((923, 960), 'torch.FloatTensor', 'torch.FloatTensor', (['self.data_x[index]'], {}), '(self.data_x[index])\n', (940, 960), False, 'import torch\n'), ((992, 1029), 'torch.FloatTensor', 'torch.FloatTensor', (['self.data_y[index]'], {}), '(self.data_y[index])\n', (1009, 1029), False, 'import torch\n'), ((3908, 3928), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (3925, 3928), True, 'import numpy as np\n'), ((5007, 5049), 'numpy.random.randint', 'np.random.randint', (['(-self.max_x)', 'self.max_x'], {}), '(-self.max_x, self.max_x)\n', (5024, 5049), True, 'import numpy as np\n'), ((5349, 5391), 'numpy.random.randint', 'np.random.randint', (['(-self.max_y)', 'self.max_y'], {}), '(-self.max_y, self.max_y)\n', (5366, 5391), True, 'import numpy as np\n'), ((6516, 6540), 'torch.FloatTensor', 'torch.FloatTensor', (['ngram'], {}), '(ngram)\n', (6533, 6540), False, 'import torch\n'), ((6572, 6602), 'torch.FloatTensor', 'torch.FloatTensor', (['ngram_trans'], {}), '(ngram_trans)\n', (6589, 6602), False, 'import torch\n'), ((8735, 8747), 'torchvision.transforms.ToPILImage', 'ToPILImage', ([], {}), '()\n', (8745, 8747), False, 'from torchvision.transforms import Resize, ToPILImage, ToTensor, Compose, CenterCrop\n'), ((8761, 8830), 'torchvision.transforms.Resize', 'Resize', (['(new_size_t_y, new_size_t_x)'], {'interpolation': 'PIL.Image.NEAREST'}), '((new_size_t_y, new_size_t_x), interpolation=PIL.Image.NEAREST)\n', (8767, 8830), False, 'from torchvision.transforms import Resize, ToPILImage, ToTensor, Compose, CenterCrop\n'), ((8863, 8889), 'torchvision.transforms.CenterCrop', 'CenterCrop', (['(size0, size1)'], {}), '((size0, size1))\n', (8873, 8889), False, 'from torchvision.transforms import Resize, ToPILImage, ToTensor, Compose, CenterCrop\n'), ((8903, 8913), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (8911, 8913), False, 'from torchvision.transforms import Resize, ToPILImage, ToTensor, Compose, CenterCrop\n'), ((9309, 9333), 'torch.FloatTensor', 'torch.FloatTensor', (['ngram'], {}), '(ngram)\n', (9326, 9333), False, 'import torch\n'), ((5568, 5584), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (5582, 5584), True, 'import numpy as np\n'), ((8961, 8985), 'torch.FloatTensor', 'torch.FloatTensor', (['ngram'], {}), '(ngram)\n', (8978, 8985), False, 'import torch\n'), ((13382, 13400), 'librosa.load', 'librosa.load', (['file'], {}), '(file)\n', (13394, 13400), False, 'import librosa\n')]
|
"""
Implementation using CuPy acceleration.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
from time import time
import cupy as cp
from cupyx.scipy import fft as cufft
def powerspectrum(*u, average=True, diagnostics=False,
kmin=None, kmax=None, npts=None,
compute_fft=True, compute_sqr=True,
double=True, bench=False, **kwargs):
"""
See the documentation for the :ref:`CPU version<powerspectrum>`.
Parameters
----------
u : `np.ndarray`
Scalar or vector field.
If vector data, pass arguments as ``u1, u2, ..., un``
where ``ui`` is the ith vector component.
Each ``ui`` can be 1D, 2D, or 3D, and all must have the
same ``ui.shape`` and ``ui.dtype``.
average : `bool`, optional
If ``True``, average over values in a given
bin and multiply by the bin volume.
If ``False``, compute the sum.
diagnostics : `bool`, optional
Return the standard deviation and number of points
in a particular radial bin.
kmin : `int` or `float`, optional
Minimum wavenumber in power spectrum bins.
If ``None``, ``kmin = 1``.
kmax : `int` or `float`, optional
Maximum wavenumber in power spectrum bins.
If ``None``, ``kmax = max(u.shape)//2``.
npts : `int`, optional
Number of modes between ``kmin`` and ``kmax``,
inclusive.
If ``None``, ``npts = kmax-kmin+1``.
compute_fft : `bool`, optional
If ``False``, do not take the FFT of the input data.
FFTs should not be passed with the zero-frequency
component in the center.
compute_sqr : `bool`, optional
If ``False``, sum the real part of the FFT. This can be
useful for purely real FFTs, where the sign of the
FFT is useful information. If ``True``, take the square
as usual.
double : `bool`, optional
If ``False``, calculate FFTs in single precision.
Useful for saving memory.
bench : `bool`, optional
Print message for time of calculation.
kwargs
Additional keyword arguments passed to
``cupyx.scipy.fft.fftn`` or ``cupyx.scipy.fft.rfftn``.
Returns
-------
spectrum : `np.ndarray`, shape `(npts,)`
Radially averaged power spectrum :math:`P(k)`.
kn : `np.ndarray`, shape `(npts,)`
Left edges of radial bins :math:`k`.
counts : `np.ndarray`, shape `(npts,)`, optional
Number of points :math:`N_k` in each bin.
vol : `np.ndarray`, shape `(npts,)`, optional
Volume :math:`V_k` of each bin.
stdev : `np.ndarray`, shape `(npts,)`, optional
Standard deviation multiplied with :math:`V_k`
in each bin.
"""
if bench:
t0 = time()
shape = u[0].shape
ndim = u[0].ndim
ncomp = len(u)
N = max(u[0].shape)
if np.issubdtype(u[0].dtype, np.floating):
real = True
dtype = cp.float64 if double else cp.float32
else:
real = False
dtype = cp.complex128 if double else cp.complex64
if ndim not in [1, 2, 3]:
raise ValueError("Dimension of image must be 1, 2, or 3.")
# Get memory pools
mempool = cp.get_default_memory_pool()
pinned_mempool = cp.get_default_pinned_memory_pool()
# Compute pqower spectral density with memory efficiency
density = None
comp = cp.empty(shape, dtype=dtype)
for i in range(ncomp):
temp = cp.asarray(u[i], dtype=dtype)
comp[...] = temp
del temp
if compute_fft:
fft = _cufftn(comp, **kwargs)
else:
fft = comp
if density is None:
fftshape = fft.shape
density = cp.zeros(fft.shape)
if compute_sqr:
density[...] += _mod_squared(fft)
else:
density[...] += cp.real(fft)
del fft
mempool.free_all_blocks()
pinned_mempool.free_all_blocks()
# Need to double count if using rfftn
if real and compute_fft:
density[...] *= 2
# Get radial coordinates
kr = cp.asarray(_kmag_sampling(fftshape, real=real).astype(np.float32))
# Flatten arrays
kr = kr.ravel()
density = density.ravel()
# Get minimum and maximum k for binning if not given
if kmin is None:
kmin = 1
if kmax is None:
kmax = int(N/2)
if npts is None:
npts = kmax-kmin+1
# Generate bins
kn = cp.linspace(kmin, kmax, npts, endpoint=True) # Left edges of bins
dk = kn[1] - kn[0]
# Radially average power spectral density
if ndim == 1:
fac = 2*np.pi
elif ndim == 2:
fac = 4*np.pi
elif ndim == 3:
fac = 4./3.*np.pi
spectrum = cp.zeros_like(kn)
stdev = cp.zeros_like(kn)
vol = cp.zeros_like(kn)
counts = cp.zeros(kn.shape, dtype=np.int64)
for i, ki in enumerate(kn):
ii = cp.where(cp.logical_and(kr >= ki, kr < ki+dk))
samples = density[ii]
vk = fac*cp.pi*((ki+dk)**ndim-(ki)**ndim)
if average:
spectrum[i] = vk*cp.mean(samples)
else:
spectrum[i] = cp.sum(samples)
if diagnostics:
Nk = samples.size
stdev[i] = vk * cp.std(samples, ddof=1)
vol[i] = vk
counts[i] = Nk
del density, kr
mempool.free_all_blocks()
pinned_mempool.free_all_blocks()
if bench:
print(f"Time: {time() - t0:.04f} s")
result = [spectrum.get(), kn.get()]
if diagnostics:
result.extend([counts.get(), vol.get(), stdev.get()])
return tuple(result)
def _cufftn(data, overwrite_input=False, **kwargs):
"""
Calculate the N-dimensional fft of an image
with memory efficiency
"""
# Get memory pools
mempool = cp.get_default_memory_pool()
pinned_mempool = cp.get_default_pinned_memory_pool()
# Real vs. Complex data
if data.dtype in [cp.float32, cp.float64]:
value_type = 'R2C'
fftn = cufft.rfftn
elif data.dtype in [cp.complex64, cp.complex128]:
value_type = 'C2C'
fftn = cufft.fftn
else:
raise ValueError(f"{data.dtype} is unrecognized data type.")
# Get plan for computing fft
plan = cufft.get_fft_plan(data, value_type=value_type)
# Compute fft
with plan:
fft = fftn(data, overwrite_x=overwrite_input, **kwargs)
# Release memory
del plan
mempool.free_all_blocks()
pinned_mempool.free_all_blocks()
return fft
@cp.fuse(kernel_name='mod_squared')
def _mod_squared(a):
return cp.real(a*cp.conj(a))
def _kmag_sampling(shape, real=True):
"""
Generates the |k| coordinate system.
"""
if real:
freq = np.fft.rfftfreq
s = list(shape)
s[-1] = (s[-1]-1)*2
shape = s
else:
freq = np.fft.fftfreq
ndim = len(shape)
kmag = np.zeros(shape)
ksqr = []
for i in range(ndim):
ni = shape[i]
sample = freq(ni) if i == ndim - 1 else np.fft.fftfreq(ni)
if real:
sample = np.abs(sample)
k1d = sample * ni
ksqr.append(k1d * k1d)
if ndim == 1:
ksqr = ksqr[0]
elif ndim == 2:
ksqr = np.add.outer(ksqr[0], ksqr[1])
elif ndim == 3:
ksqr = np.add.outer(np.add.outer(ksqr[0], ksqr[1]), ksqr[2])
kmag = np.sqrt(ksqr)
return kmag
if __name__ == '__main__':
import pyFC
from matplotlib import pyplot as plt
dim = 100
fc = pyFC.LogNormalFractalCube(
ni=dim, nj=dim, nk=dim, kmin=10, mean=1, beta=-5/3)
fc.gen_cube()
data = fc.cube
psd, kn, stdev, vol, N = powerspectrum(data, diagnostics=True)
print(psd.mean())
def zero_log10(s):
"""
Takes logarithm of an array while retaining the zeros
"""
sp = np.where(s > 0., s, 1)
return np.log10(sp)
log_psd = zero_log10(psd)
log_kn = zero_log10(kn)
idxs = np.where(log_kn >= np.log10(fc.kmin))
m, b = np.polyfit(log_kn[idxs], log_psd[idxs], 1)
plt.errorbar(kn, psd,
label=rf'PSD, $\beta = {fc.beta}$', color='g')
plt.plot(log_kn[idxs], m*log_kn[idxs]+b,
label=rf'Fit, $\beta = {m}$', color='k')
plt.ylabel(r"$\log{P(k)}$")
plt.xlabel(r"$\log{k}$")
plt.legend(loc='upper right')
plt.show()
|
[
"numpy.abs",
"cupy.empty",
"cupy.zeros_like",
"numpy.polyfit",
"cupy.get_default_memory_pool",
"numpy.add.outer",
"cupy.fuse",
"cupy.std",
"numpy.fft.fftfreq",
"pyFC.LogNormalFractalCube",
"numpy.log10",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.show",
"cupy.zeros",
"cupy.real",
"matplotlib.pyplot.legend",
"cupy.conj",
"cupy.get_default_pinned_memory_pool",
"cupy.mean",
"matplotlib.pyplot.ylabel",
"numpy.issubdtype",
"cupyx.scipy.fft.get_fft_plan",
"matplotlib.pyplot.plot",
"cupy.asarray",
"numpy.zeros",
"cupy.sum",
"time.time",
"numpy.where",
"cupy.linspace",
"cupy.logical_and",
"matplotlib.pyplot.xlabel",
"numpy.sqrt"
] |
[((6522, 6556), 'cupy.fuse', 'cp.fuse', ([], {'kernel_name': '"""mod_squared"""'}), "(kernel_name='mod_squared')\n", (6529, 6556), True, 'import cupy as cp\n'), ((2898, 2936), 'numpy.issubdtype', 'np.issubdtype', (['u[0].dtype', 'np.floating'], {}), '(u[0].dtype, np.floating)\n', (2911, 2936), True, 'import numpy as np\n'), ((3236, 3264), 'cupy.get_default_memory_pool', 'cp.get_default_memory_pool', ([], {}), '()\n', (3262, 3264), True, 'import cupy as cp\n'), ((3286, 3321), 'cupy.get_default_pinned_memory_pool', 'cp.get_default_pinned_memory_pool', ([], {}), '()\n', (3319, 3321), True, 'import cupy as cp\n'), ((3414, 3442), 'cupy.empty', 'cp.empty', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (3422, 3442), True, 'import cupy as cp\n'), ((4474, 4518), 'cupy.linspace', 'cp.linspace', (['kmin', 'kmax', 'npts'], {'endpoint': '(True)'}), '(kmin, kmax, npts, endpoint=True)\n', (4485, 4518), True, 'import cupy as cp\n'), ((4754, 4771), 'cupy.zeros_like', 'cp.zeros_like', (['kn'], {}), '(kn)\n', (4767, 4771), True, 'import cupy as cp\n'), ((4784, 4801), 'cupy.zeros_like', 'cp.zeros_like', (['kn'], {}), '(kn)\n', (4797, 4801), True, 'import cupy as cp\n'), ((4812, 4829), 'cupy.zeros_like', 'cp.zeros_like', (['kn'], {}), '(kn)\n', (4825, 4829), True, 'import cupy as cp\n'), ((4843, 4877), 'cupy.zeros', 'cp.zeros', (['kn.shape'], {'dtype': 'np.int64'}), '(kn.shape, dtype=np.int64)\n', (4851, 4877), True, 'import cupy as cp\n'), ((5808, 5836), 'cupy.get_default_memory_pool', 'cp.get_default_memory_pool', ([], {}), '()\n', (5834, 5836), True, 'import cupy as cp\n'), ((5858, 5893), 'cupy.get_default_pinned_memory_pool', 'cp.get_default_pinned_memory_pool', ([], {}), '()\n', (5891, 5893), True, 'import cupy as cp\n'), ((6255, 6302), 'cupyx.scipy.fft.get_fft_plan', 'cufft.get_fft_plan', (['data'], {'value_type': 'value_type'}), '(data, value_type=value_type)\n', (6273, 6302), True, 'from cupyx.scipy import fft as cufft\n'), ((6895, 6910), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (6903, 6910), True, 'import numpy as np\n'), ((7359, 7372), 'numpy.sqrt', 'np.sqrt', (['ksqr'], {}), '(ksqr)\n', (7366, 7372), True, 'import numpy as np\n'), ((7500, 7579), 'pyFC.LogNormalFractalCube', 'pyFC.LogNormalFractalCube', ([], {'ni': 'dim', 'nj': 'dim', 'nk': 'dim', 'kmin': '(10)', 'mean': '(1)', 'beta': '(-5 / 3)'}), '(ni=dim, nj=dim, nk=dim, kmin=10, mean=1, beta=-5 / 3)\n', (7525, 7579), False, 'import pyFC\n'), ((8008, 8050), 'numpy.polyfit', 'np.polyfit', (['log_kn[idxs]', 'log_psd[idxs]', '(1)'], {}), '(log_kn[idxs], log_psd[idxs], 1)\n', (8018, 8050), True, 'import numpy as np\n'), ((8056, 8124), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['kn', 'psd'], {'label': 'f"""PSD, $\\\\beta = {fc.beta}$"""', 'color': '"""g"""'}), "(kn, psd, label=f'PSD, $\\\\beta = {fc.beta}$', color='g')\n", (8068, 8124), True, 'from matplotlib import pyplot as plt\n'), ((8146, 8235), 'matplotlib.pyplot.plot', 'plt.plot', (['log_kn[idxs]', '(m * log_kn[idxs] + b)'], {'label': 'f"""Fit, $\\\\beta = {m}$"""', 'color': '"""k"""'}), "(log_kn[idxs], m * log_kn[idxs] + b, label=f'Fit, $\\\\beta = {m}$',\n color='k')\n", (8154, 8235), True, 'from matplotlib import pyplot as plt\n'), ((8245, 8272), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\log{P(k)}$"""'], {}), "('$\\\\log{P(k)}$')\n", (8255, 8272), True, 'from matplotlib import pyplot as plt\n'), ((8277, 8301), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\log{k}$"""'], {}), "('$\\\\log{k}$')\n", (8287, 8301), True, 'from matplotlib import pyplot as plt\n'), ((8306, 8335), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (8316, 8335), True, 'from matplotlib import pyplot as plt\n'), ((8341, 8351), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8349, 8351), True, 'from matplotlib import pyplot as plt\n'), ((2795, 2801), 'time.time', 'time', ([], {}), '()\n', (2799, 2801), False, 'from time import time\n'), ((3485, 3514), 'cupy.asarray', 'cp.asarray', (['u[i]'], {'dtype': 'dtype'}), '(u[i], dtype=dtype)\n', (3495, 3514), True, 'import cupy as cp\n'), ((7838, 7861), 'numpy.where', 'np.where', (['(s > 0.0)', 's', '(1)'], {}), '(s > 0.0, s, 1)\n', (7846, 7861), True, 'import numpy as np\n'), ((7876, 7888), 'numpy.log10', 'np.log10', (['sp'], {}), '(sp)\n', (7884, 7888), True, 'import numpy as np\n'), ((3743, 3762), 'cupy.zeros', 'cp.zeros', (['fft.shape'], {}), '(fft.shape)\n', (3751, 3762), True, 'import cupy as cp\n'), ((3875, 3887), 'cupy.real', 'cp.real', (['fft'], {}), '(fft)\n', (3882, 3887), True, 'import cupy as cp\n'), ((4932, 4970), 'cupy.logical_and', 'cp.logical_and', (['(kr >= ki)', '(kr < ki + dk)'], {}), '(kr >= ki, kr < ki + dk)\n', (4946, 4970), True, 'import cupy as cp\n'), ((5156, 5171), 'cupy.sum', 'cp.sum', (['samples'], {}), '(samples)\n', (5162, 5171), True, 'import cupy as cp\n'), ((6599, 6609), 'cupy.conj', 'cp.conj', (['a'], {}), '(a)\n', (6606, 6609), True, 'import cupy as cp\n'), ((7021, 7039), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['ni'], {}), '(ni)\n', (7035, 7039), True, 'import numpy as np\n'), ((7078, 7092), 'numpy.abs', 'np.abs', (['sample'], {}), '(sample)\n', (7084, 7092), True, 'import numpy as np\n'), ((7227, 7257), 'numpy.add.outer', 'np.add.outer', (['ksqr[0]', 'ksqr[1]'], {}), '(ksqr[0], ksqr[1])\n', (7239, 7257), True, 'import numpy as np\n'), ((7978, 7995), 'numpy.log10', 'np.log10', (['fc.kmin'], {}), '(fc.kmin)\n', (7986, 7995), True, 'import numpy as np\n'), ((5099, 5115), 'cupy.mean', 'cp.mean', (['samples'], {}), '(samples)\n', (5106, 5115), True, 'import cupy as cp\n'), ((5254, 5277), 'cupy.std', 'cp.std', (['samples'], {'ddof': '(1)'}), '(samples, ddof=1)\n', (5260, 5277), True, 'import cupy as cp\n'), ((7306, 7336), 'numpy.add.outer', 'np.add.outer', (['ksqr[0]', 'ksqr[1]'], {}), '(ksqr[0], ksqr[1])\n', (7318, 7336), True, 'import numpy as np\n'), ((5455, 5461), 'time.time', 'time', ([], {}), '()\n', (5459, 5461), False, 'from time import time\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import torch
from itertools import permutations
def loss_calc(est, ref, loss_type):
"""
time-domain loss: sisdr
"""
# time domain (wav input)
if loss_type == "sisdr":
loss = batch_SDR_torch(est, ref)
if loss_type == "mse":
loss = batch_mse_torch(est, ref)
if loss_type == "log_mse":
loss = batch_log_mse_torch(est, ref)
return loss
def calc_sdr_torch(estimation, origin, mask=None):
"""
batch-wise SDR caculation for one audio file on pytorch Variables.
estimation: (batch, nsample)
origin: (batch, nsample)
mask: optional, (batch, nsample), binary
"""
if mask is not None:
origin = origin * mask
estimation = estimation * mask
origin_power = torch.pow(origin, 2).sum(1, keepdim=True) + 1e-8 # (batch, 1)
scale = torch.sum(origin*estimation, 1, keepdim=True) / origin_power # (batch, 1)
est_true = scale * origin # (batch, nsample)
est_res = estimation - est_true # (batch, nsample)
true_power = torch.pow(est_true, 2).sum(1)
res_power = torch.pow(est_res, 2).sum(1)
return 10*torch.log10(true_power) - 10*torch.log10(res_power) # (batch, 1)
def batch_SDR_torch(estimation, origin, mask=None):
"""
batch-wise SDR caculation for multiple audio files.
estimation: (batch, nsource, nsample)
origin: (batch, nsource, nsample)
mask: optional, (batch, nsample), binary
"""
batch_size_est, nsource_est, nsample_est = estimation.size()
batch_size_ori, nsource_ori, nsample_ori = origin.size()
assert batch_size_est == batch_size_ori, "Estimation and original sources should have same shape."
assert nsource_est == nsource_ori, "Estimation and original sources should have same shape."
assert nsample_est == nsample_ori, "Estimation and original sources should have same shape."
assert nsource_est < nsample_est, "Axis 1 should be the number of sources, and axis 2 should be the signal."
batch_size = batch_size_est
nsource = nsource_est
nsample = nsample_est
# zero mean signals
estimation = estimation - torch.mean(estimation, 2, keepdim=True).expand_as(estimation)
origin = origin - torch.mean(origin, 2, keepdim=True).expand_as(estimation)
# possible permutations
perm = list(set(permutations(np.arange(nsource))))
# pair-wise SDR
SDR = torch.zeros((batch_size, nsource, nsource)).type(estimation.type())
for i in range(nsource):
for j in range(nsource):
SDR[:,i,j] = calc_sdr_torch(estimation[:,i], origin[:,j], mask)
# choose the best permutation
SDR_max = []
SDR_perm = []
for permute in perm:
sdr = []
for idx in range(len(permute)):
sdr.append(SDR[:,idx,permute[idx]].view(batch_size,-1))
sdr = torch.sum(torch.cat(sdr, 1), 1)
SDR_perm.append(sdr.view(batch_size, 1))
SDR_perm = torch.cat(SDR_perm, 1)
SDR_max, _ = torch.max(SDR_perm, dim=1)
return - SDR_max / nsource
# def calc_mse_torch(estimation, origin):
# return torch.mean(torch.pow(estimation-origin,2),1).mean(1)
def batch_mse_torch(estimation, origin):
"""
batch-wise mse caculation for multiple audio files.
estimation: (batch, nsource, frames, freq_bins)
origin: (batch, nsource, frames, freq_bins)
nsource = 2
"""
mse1 = torch.sqrt(torch.pow(estimation - origin, 2).mean([3])).mean([1,2])
mse2 = torch.sqrt(torch.pow(estimation - origin.flip([1]), 2).mean([3])).mean([1,2])
return torch.stack((mse1, mse2),1).min(1)[0]
def batch_log_mse_torch(estimation, origin):
"""
batch-wise mse caculation for multiple audio files.
estimation: (batch, nsource, frames, freq_bins)
origin: (batch, nsource, frames, freq_bins)
nsource = 2
"""
# eps = 1e-20
# mse1 = torch.log10(torch.sqrt(torch.pow(estimation - origin, 2).mean([3])).mean([1,2])+eps)
# mse2 = torch.log10(torch.sqrt(torch.pow(estimation - origin.flip([1]), 2).mean([3])).mean([1,2])+eps)
mse1 = torch.log10(torch.pow(estimation - origin, 2).mean([3])).mean([1,2])
mse2 = torch.log10(torch.pow(estimation - origin.flip([1]), 2).mean([3])).mean([1,2])
return torch.stack((mse1, mse2),1).min(1)[0]
if __name__ == "__main__":
est = torch.rand(10, 2, 32, 1000)
ref = torch.rand(10, 2, 32, 1000)
out = loss_calc(est, ref, "mse")
print(out.shape)
print(out)
|
[
"torch.mean",
"torch.stack",
"torch.cat",
"torch.log10",
"torch.pow",
"torch.max",
"numpy.arange",
"torch.rand",
"torch.zeros",
"torch.sum"
] |
[((3031, 3053), 'torch.cat', 'torch.cat', (['SDR_perm', '(1)'], {}), '(SDR_perm, 1)\n', (3040, 3053), False, 'import torch\n'), ((3071, 3097), 'torch.max', 'torch.max', (['SDR_perm'], {'dim': '(1)'}), '(SDR_perm, dim=1)\n', (3080, 3097), False, 'import torch\n'), ((4405, 4432), 'torch.rand', 'torch.rand', (['(10)', '(2)', '(32)', '(1000)'], {}), '(10, 2, 32, 1000)\n', (4415, 4432), False, 'import torch\n'), ((4443, 4470), 'torch.rand', 'torch.rand', (['(10)', '(2)', '(32)', '(1000)'], {}), '(10, 2, 32, 1000)\n', (4453, 4470), False, 'import torch\n'), ((908, 955), 'torch.sum', 'torch.sum', (['(origin * estimation)', '(1)'], {'keepdim': '(True)'}), '(origin * estimation, 1, keepdim=True)\n', (917, 955), False, 'import torch\n'), ((1116, 1138), 'torch.pow', 'torch.pow', (['est_true', '(2)'], {}), '(est_true, 2)\n', (1125, 1138), False, 'import torch\n'), ((1162, 1183), 'torch.pow', 'torch.pow', (['est_res', '(2)'], {}), '(est_res, 2)\n', (1171, 1183), False, 'import torch\n'), ((1210, 1233), 'torch.log10', 'torch.log10', (['true_power'], {}), '(true_power)\n', (1221, 1233), False, 'import torch\n'), ((1239, 1261), 'torch.log10', 'torch.log10', (['res_power'], {}), '(res_power)\n', (1250, 1261), False, 'import torch\n'), ((2491, 2534), 'torch.zeros', 'torch.zeros', (['(batch_size, nsource, nsource)'], {}), '((batch_size, nsource, nsource))\n', (2502, 2534), False, 'import torch\n'), ((2945, 2962), 'torch.cat', 'torch.cat', (['sdr', '(1)'], {}), '(sdr, 1)\n', (2954, 2962), False, 'import torch\n'), ((828, 848), 'torch.pow', 'torch.pow', (['origin', '(2)'], {}), '(origin, 2)\n', (837, 848), False, 'import torch\n'), ((2226, 2265), 'torch.mean', 'torch.mean', (['estimation', '(2)'], {'keepdim': '(True)'}), '(estimation, 2, keepdim=True)\n', (2236, 2265), False, 'import torch\n'), ((2310, 2345), 'torch.mean', 'torch.mean', (['origin', '(2)'], {'keepdim': '(True)'}), '(origin, 2, keepdim=True)\n', (2320, 2345), False, 'import torch\n'), ((2434, 2452), 'numpy.arange', 'np.arange', (['nsource'], {}), '(nsource)\n', (2443, 2452), True, 'import numpy as np\n'), ((3652, 3680), 'torch.stack', 'torch.stack', (['(mse1, mse2)', '(1)'], {}), '((mse1, mse2), 1)\n', (3663, 3680), False, 'import torch\n'), ((4329, 4357), 'torch.stack', 'torch.stack', (['(mse1, mse2)', '(1)'], {}), '((mse1, mse2), 1)\n', (4340, 4357), False, 'import torch\n'), ((3495, 3528), 'torch.pow', 'torch.pow', (['(estimation - origin)', '(2)'], {}), '(estimation - origin, 2)\n', (3504, 3528), False, 'import torch\n'), ((4171, 4204), 'torch.pow', 'torch.pow', (['(estimation - origin)', '(2)'], {}), '(estimation - origin, 2)\n', (4180, 4204), False, 'import torch\n')]
|
from __future__ import absolute_import, division
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_allclose
from pytest import raises
from fatiando.seismic import conv
def test_impulse_response():
"""
conv.convolutional_model raises the source wavelet as result when the model
is a centred spike, considering the dimension of the model equal to the
source wavelet
"""
w = conv.rickerwave(30., 2.e-3)
rc_test = np.zeros((w.shape[0], 20))
rc_test[w.shape[0]//2, :] = 1.
spike = conv.convolutional_model(rc_test, 30., conv.rickerwave, dt=2.e-3)
for j in range(0, rc_test.shape[1]):
assert_array_almost_equal(spike[:, j], w, 9)
def test_rc_shorter_than_wavelet():
"""
When the reflectivity series is shorter than the wavelength, the spike
response is observed like in the opposite case. The difference is that the
the ricker wavelet (or other symmetric wavelet) is shorter in the result.
"""
w = conv.rickerwave(30., 2.e-3)
rc_test = np.zeros((21, 20))
rc_test[rc_test.shape[0]//2, :] = 1
spike = conv.convolutional_model(rc_test, 30., conv.rickerwave, dt=2.e-3)
for j in range(0, rc_test.shape[1]):
wmin = (w.shape[0] - rc_test.shape[0])//2
wmax = -(w.shape[0] - rc_test.shape[0])//2
assert_array_almost_equal(spike[:, j], w[wmin:wmax], 9)
def test_reflectivity_wrong_dimensions():
"""
Velocity and density are provided as matrix or vector to reflectivity
calculation, so they must have the same dimension.
"""
vel = np.ones((10, 10))
dens = np.ones((11, 11))
raises(AssertionError, conv.reflectivity, vel, dens)
vel = np.ones((10))
dens = np.ones((11))
raises(AssertionError, conv.reflectivity, vel, dens)
def test_depth_2_time_wrong_dimensions():
"""
Velocity and property are provided as matrix to depth to time cconversion,
so they must have the same dimension.
"""
vel = np.ones((10, 10))
dens = np.ones((11, 11))
dt = 2.e-3
dz = 1.
raises(AssertionError, conv.depth_2_time, vel, dens, dt, dz)
def test_ricker():
"""
conv.rickerwave inputs must satisfy the condition for sampling and
stability, otherwise this implies in a error.
"""
f = 50.
dt = 2.e-3
raises(AssertionError, conv.rickerwave, f, dt)
|
[
"numpy.zeros",
"numpy.ones",
"fatiando.seismic.conv.rickerwave",
"pytest.raises",
"fatiando.seismic.conv.convolutional_model",
"numpy.testing.assert_array_almost_equal"
] |
[((428, 456), 'fatiando.seismic.conv.rickerwave', 'conv.rickerwave', (['(30.0)', '(0.002)'], {}), '(30.0, 0.002)\n', (443, 456), False, 'from fatiando.seismic import conv\n'), ((470, 496), 'numpy.zeros', 'np.zeros', (['(w.shape[0], 20)'], {}), '((w.shape[0], 20))\n', (478, 496), True, 'import numpy as np\n'), ((544, 610), 'fatiando.seismic.conv.convolutional_model', 'conv.convolutional_model', (['rc_test', '(30.0)', 'conv.rickerwave'], {'dt': '(0.002)'}), '(rc_test, 30.0, conv.rickerwave, dt=0.002)\n', (568, 610), False, 'from fatiando.seismic import conv\n'), ((998, 1026), 'fatiando.seismic.conv.rickerwave', 'conv.rickerwave', (['(30.0)', '(0.002)'], {}), '(30.0, 0.002)\n', (1013, 1026), False, 'from fatiando.seismic import conv\n'), ((1040, 1058), 'numpy.zeros', 'np.zeros', (['(21, 20)'], {}), '((21, 20))\n', (1048, 1058), True, 'import numpy as np\n'), ((1111, 1177), 'fatiando.seismic.conv.convolutional_model', 'conv.convolutional_model', (['rc_test', '(30.0)', 'conv.rickerwave'], {'dt': '(0.002)'}), '(rc_test, 30.0, conv.rickerwave, dt=0.002)\n', (1135, 1177), False, 'from fatiando.seismic import conv\n'), ((1582, 1599), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (1589, 1599), True, 'import numpy as np\n'), ((1611, 1628), 'numpy.ones', 'np.ones', (['(11, 11)'], {}), '((11, 11))\n', (1618, 1628), True, 'import numpy as np\n'), ((1633, 1685), 'pytest.raises', 'raises', (['AssertionError', 'conv.reflectivity', 'vel', 'dens'], {}), '(AssertionError, conv.reflectivity, vel, dens)\n', (1639, 1685), False, 'from pytest import raises\n'), ((1696, 1707), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (1703, 1707), True, 'import numpy as np\n'), ((1721, 1732), 'numpy.ones', 'np.ones', (['(11)'], {}), '(11)\n', (1728, 1732), True, 'import numpy as np\n'), ((1739, 1791), 'pytest.raises', 'raises', (['AssertionError', 'conv.reflectivity', 'vel', 'dens'], {}), '(AssertionError, conv.reflectivity, vel, dens)\n', (1745, 1791), False, 'from pytest import raises\n'), ((1983, 2000), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (1990, 2000), True, 'import numpy as np\n'), ((2012, 2029), 'numpy.ones', 'np.ones', (['(11, 11)'], {}), '((11, 11))\n', (2019, 2029), True, 'import numpy as np\n'), ((2061, 2121), 'pytest.raises', 'raises', (['AssertionError', 'conv.depth_2_time', 'vel', 'dens', 'dt', 'dz'], {}), '(AssertionError, conv.depth_2_time, vel, dens, dt, dz)\n', (2067, 2121), False, 'from pytest import raises\n'), ((2311, 2357), 'pytest.raises', 'raises', (['AssertionError', 'conv.rickerwave', 'f', 'dt'], {}), '(AssertionError, conv.rickerwave, f, dt)\n', (2317, 2357), False, 'from pytest import raises\n'), ((659, 703), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['spike[:, j]', 'w', '(9)'], {}), '(spike[:, j], w, 9)\n', (684, 703), False, 'from numpy.testing import assert_array_almost_equal, assert_allclose\n'), ((1327, 1382), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['spike[:, j]', 'w[wmin:wmax]', '(9)'], {}), '(spike[:, j], w[wmin:wmax], 9)\n', (1352, 1382), False, 'from numpy.testing import assert_array_almost_equal, assert_allclose\n')]
|
import numpy as np
def L2Loss(y_predicted, y_ground_truth, reduction="None"):
"""returns l2 loss between two arrays
:param y_predicted: array of predicted values
:type y_predicted: ndarray
:param y_ground_truth: array of ground truth values
:type y_ground_truth: ndarray
:param reduction: reduction mode, defaults to "mean"
:type reduction: str, optional
:return: l2-loss
:rtype: scalar if reduction is sum or mean, else ndarray
"""
# Calculate the difference array
difference = y_predicted - y_ground_truth
# Raise every difference value to the power of 2
squared_difference = np.multiply(difference, difference)
# L2 distance is the reduced form of the squared difference array
if reduction == "sum":
# Reduction can be done by summing up all the values in the difference array (this is known as "L2-Loss")
l2_distance = np.sum(squared_difference)
return l2_distance
elif reduction == "mean":
# Reduction can also be done by taking the mean (this is known as "Mean Squared Error")
mean_squared_error = np.mean(squared_difference)
return mean_squared_error
elif reduction == "None":
return squared_difference
else:
print('ValueError: reduction should be "sum" / "mean" / "None"')
def main():
print("Initializing predicted and ground truth arrays:\n")
print('(NOTE: Enter the values in a space-separated format. Ex: "5.36 1.02 2.03")')
y_predicted = [
float(item) for item in input("Enter the predicted values: ").split()
]
y_ground_truth = [
float(item)
for item in input("Enter the corresponding ground truth values: ").split()
]
assert len(y_predicted) == len(
y_ground_truth
), "Number of predicted values {} and ground truth {} values should match".format(
len(y_predicted), len(y_ground_truth)
)
y_predicted = np.array(y_predicted)
y_ground_truth = np.array(y_ground_truth)
reduction = str(input('Enter the reduction mode: "sum" / "mean" / "None": '))
loss = L2Loss(y_predicted, y_ground_truth, reduction=reduction)
print("L2-Loss with {}-reduction: {}".format(reduction, loss))
if __name__ == "__main__":
main()
|
[
"numpy.array",
"numpy.mean",
"numpy.multiply",
"numpy.sum"
] |
[((637, 672), 'numpy.multiply', 'np.multiply', (['difference', 'difference'], {}), '(difference, difference)\n', (648, 672), True, 'import numpy as np\n'), ((1941, 1962), 'numpy.array', 'np.array', (['y_predicted'], {}), '(y_predicted)\n', (1949, 1962), True, 'import numpy as np\n'), ((1984, 2008), 'numpy.array', 'np.array', (['y_ground_truth'], {}), '(y_ground_truth)\n', (1992, 2008), True, 'import numpy as np\n'), ((906, 932), 'numpy.sum', 'np.sum', (['squared_difference'], {}), '(squared_difference)\n', (912, 932), True, 'import numpy as np\n'), ((1115, 1142), 'numpy.mean', 'np.mean', (['squared_difference'], {}), '(squared_difference)\n', (1122, 1142), True, 'import numpy as np\n')]
|
# Copyright 2020-2022 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from opendr.engine.datasets import Dataset
from opendr.engine.data import Image
from opendr.perception.object_detection_2d.datasets.transforms import BoundingBoxListToNumpyArray
from opendr.engine.constants import OPENDR_SERVER_URL
from pycocotools.coco import COCO
import os
from urllib.request import urlretrieve
import ssl
import time
from zipfile import ZipFile
import tarfile
import pickle
import numpy as np
import math
from tqdm import tqdm
import gc
class Dataset_NMS(Dataset):
def __init__(self, path=None, dataset_name=None, split=None, use_ssd=True, device='cuda'):
super().__init__()
available_dataset = ['COCO', 'PETS', 'TEST_MODULE']
self.dataset_sets = {'train': None,
'val': None,
'test': None}
if dataset_name not in available_dataset:
except_str = 'Unsupported dataset: ' + dataset_name + '. Currently available are:'
for j in range(len(available_dataset)):
except_str = except_str + ' \'' + available_dataset[j] + '\''
if j < len(available_dataset) - 1:
except_str = except_str + ','
except_str = except_str + '.'
raise ValueError(except_str)
ssl._create_default_https_context = ssl._create_unverified_context
self.dataset_name = dataset_name
self.split = split
# self.__prepare_dataset()
self.path = os.path.join(path, dataset_name)
self.src_data = []
if self.dataset_name == "PETS":
self.detector = 'JPD'
self.detector_type = 'default'
if use_ssd:
self.detector = 'SSD'
self.detector_type = 'custom'
self.dataset_sets['train'] = 'train'
self.dataset_sets['val'] = 'val'
self.dataset_sets['test'] = 'test'
if self.dataset_sets[self.split] is None:
raise ValueError(self.split + ' split is not available...')
if not os.path.exists(os.path.join(self.path, 'images/S1/L1')):
self.download(
'http://ftp.cs.rdg.ac.uk/pub/PETS2009/Crowd_PETS09_dataset/a_data/Crowd_PETS09/S1_L1.tar.bz2',
download_path=os.path.join(self.path, 'images'), file_format="tar.bz2", create_dir=True)
if not os.path.exists(os.path.join(self.path, 'images/S1/L2')):
self.download(
'http://ftp.cs.rdg.ac.uk/pub/PETS2009/Crowd_PETS09_dataset/a_data/Crowd_PETS09/S1_L2.tar.bz2',
download_path=os.path.join(self.path, 'images'), file_format="tar.bz2", create_dir=True)
if not os.path.exists(os.path.join(self.path, 'images/S2/L1')):
self.download(
'http://ftp.cs.rdg.ac.uk/pub/PETS2009/Crowd_PETS09_dataset/a_data/Crowd_PETS09/S2_L1.tar.bz2',
download_path=os.path.join(self.path, 'images'), file_format="tar.bz2", create_dir=True)
if not os.path.exists(os.path.join(self.path, 'images/S2/L2')):
self.download(
'http://ftp.cs.rdg.ac.uk/pub/PETS2009/Crowd_PETS09_dataset/a_data/Crowd_PETS09/S2_L2.tar.bz2',
download_path=os.path.join(self.path, 'images'), file_format="tar.bz2", create_dir=True)
if not os.path.exists(os.path.join(self.path, 'images/S2/L3')):
self.download(
'http://ftp.cs.rdg.ac.uk/pub/PETS2009/Crowd_PETS09_dataset/a_data/Crowd_PETS09/S2_L3.tar.bz2',
download_path=os.path.join(self.path, 'images'), file_format="tar.bz2", create_dir=True)
if not os.path.exists(os.path.join(self.path, 'images/S3/Multiple_Flow')):
self.download(
'http://ftp.cs.rdg.ac.uk/pub/PETS2009/Crowd_PETS09_dataset/a_data/Crowd_PETS09/S3_MF.tar.bz2',
download_path=os.path.join(self.path, 'images'), file_format="tar.bz2", create_dir=True)
if not os.path.exists(
os.path.join(self.path, 'annotations', 'pets_' + self.dataset_sets[self.split] + '.json')):
self.download('http://datasets.d2.mpi-inf.mpg.de/hosang17cvpr/PETS_annotations_json.zip',
download_path=os.path.join(self.path, 'annotations'), file_format="zip",
create_dir=True)
pkl_filename = os.path.join(self.path,
'data_' + self.detector + '_' + self.dataset_sets[self.split] + '_pets.pkl')
if not os.path.exists(pkl_filename):
ssd = None
if use_ssd:
from opendr.perception.object_detection_2d.ssd.ssd_learner import SingleShotDetectorLearner
ssd = SingleShotDetectorLearner(device=device)
ssd.download(".", mode="pretrained")
ssd.load("./ssd_default_person", verbose=True)
if not os.path.exists(
os.path.join(self.path, 'detections',
'PETS-' + self.dataset_sets[self.split] + '_siyudpm_dets.idl')):
self.download('http://datasets.d2.mpi-inf.mpg.de/hosang17cvpr/PETS_detections.zip',
download_path=os.path.join(self.path, 'detections'), file_format="zip",
create_dir=True)
if not os.path.exists(
os.path.join(self.path, 'annotations', 'PETS-' + self.dataset_sets[self.split] + '.idl')):
self.download('http://datasets.d2.mpi-inf.mpg.de/hosang17cvpr/PETS_annotations.zip',
download_path=os.path.join(self.path, 'annotations'), file_format="zip",
create_dir=True)
with open(os.path.join(self.path, 'annotations',
'PETS-' + self.dataset_sets[self.split] + '.idl')) as fp_gt:
fp_dt = None
if self.detector_type == 'default':
fp_dt = open(os.path.join(self.path, 'detections',
'PETS-' + self.dataset_sets[self.split] + '_siyudpm_dets.idl'))
print('Preparing PETS ' + self.dataset_sets[self.split] + ' set...')
current_id = 0
number_samples = 1696
if self.split == 'val':
current_id = 1696
number_samples = 240
elif self.split == 'test':
current_id = 1936
number_samples = 436
pbarDesc = "Overall progress"
pbar = tqdm(desc=pbarDesc, total=number_samples)
if self.detector_type == 'default':
line_dt = fp_dt.readline()
line_gt = fp_gt.readline()
while line_gt:
remove_strings = ['PETS09-', '\"', ':', '(', ')', ',', '', ';']
data_gt = line_gt.replace(':', ' ')
for j in range(len(remove_strings)):
data_gt = data_gt.replace(remove_strings[j], '')
data_gt = data_gt.split()
filename_gt = data_gt[0][0:2] + '/' + data_gt[0][2:]
if filename_gt[0:6] == 'S2/L1/':
filename_gt = filename_gt.replace('img/00', 'Time_12-34/View_001/frame_')
num = int(filename_gt[-8:-4]) - 1
filename_gt = filename_gt[:-8] + str(num).zfill(4) + '.jpg'
if filename_gt[0:6] == 'S2/L2/':
filename_gt = filename_gt.replace('img/00', 'Time_14-55/View_001/frame_')
num = int(filename_gt[-8:-4]) - 1
filename_gt = filename_gt[:-8] + str(num).zfill(4) + '.jpg'
if filename_gt[0:2] == 'S3':
filename_gt = filename_gt.replace('_MF', 'Multiple_Flow')
if self.detector_type == 'default':
data_dt = line_dt.replace(':', ' ')
for j in range(len(remove_strings)):
data_dt = data_dt.replace(remove_strings[j], '')
data_dt = data_dt.split()
filename_dt = data_dt[0][0:2] + '/' + data_dt[0][2:]
if filename_dt[0:6] == 'S2/L1/':
filename_dt = filename_dt.replace('img/00', 'Time_12-34/View_001/frame_')
num = int(filename_dt[-8:-4]) - 1
filename_dt = filename_dt[:-8] + str(num).zfill(4) + '.jpg'
if filename_dt[0:6] == 'S2/L2/':
filename_dt = filename_dt.replace('img/00', 'Time_14-55/View_001/frame_')
num = int(filename_dt[-8:-4]) - 1
filename_dt = filename_dt[:-8] + str(num).zfill(4) + '.jpg'
if filename_dt[0:2] == 'S3':
filename_dt = filename_dt.replace('_MF', 'Multiple_Flow')
if filename_gt != filename_dt:
raise ValueError('Errors in files...')
img = Image.open(os.path.join(self.path, 'images/', filename_gt))
dt_boxes = []
if self.detector_type == 'default':
for i in range(1, (len(data_dt)), 5):
dt_box = np.array((float(data_dt[i]), float(data_dt[i + 1]), float(data_dt[i + 2]),
float(data_dt[i + 3]), 1 / (1 + math.exp(- float(data_dt[i + 4])))))
dt_boxes.append(dt_box)
else:
bboxes_list = ssd.infer(img, threshold=0.0, custom_nms=None, nms_thresh=0.975,
nms_topk=6000, post_nms=6000)
bboxes_list = BoundingBoxListToNumpyArray()(bboxes_list)
bboxes_list = bboxes_list[bboxes_list[:, 4] > 0.015]
bboxes_list = bboxes_list[np.argsort(bboxes_list[:, 4]), :][::-1]
bboxes_list = bboxes_list[:5000, :]
for b in range(len(bboxes_list)):
dt_boxes.append(np.array([bboxes_list[b, 0], bboxes_list[b, 1], bboxes_list[b, 2],
bboxes_list[b, 3], bboxes_list[b, 4][0]]))
gt_boxes = []
for i in range(1, (len(data_gt)), 5):
gt_box = np.array((float(data_gt[i]), float(data_gt[i + 1]), float(data_gt[i + 2]),
float(data_gt[i + 3])))
gt_boxes.append(gt_box)
self.src_data.append({
'id': current_id,
'filename': os.path.join('images', filename_gt),
'resolution': img.opencv().shape[0:2][::-1],
'gt_boxes': [np.asarray([]), np.asarray(gt_boxes)],
'dt_boxes': [np.asarray([]), np.asarray(dt_boxes)]
})
current_id = current_id + 1
pbar.update(1)
if self.detector_type == 'default':
line_dt = fp_dt.readline()
line_gt = fp_gt.readline()
pbar.close()
if self.detector_type == 'default':
fp_dt.close()
elif self.detector == 'SSD':
del ssd
gc.collect()
with open(pkl_filename, 'wb') as handle:
pickle.dump(self.src_data, handle, protocol=pickle.DEFAULT_PROTOCOL)
else:
with open(pkl_filename, 'rb') as fp_pkl:
self.src_data = pickle.load(fp_pkl)
self.classes = ['background', 'human']
self.class_ids = [-1, 1]
self.annotation_file = 'pets_' + self.dataset_sets[self.split] + '.json'
elif self.dataset_name == "COCO":
self.dataset_sets['train'] = 'train'
self.dataset_sets['val'] = 'minival'
self.dataset_sets['test'] = 'valminusminival'
if self.dataset_sets[self.split] is None:
raise ValueError(self.split + ' split is not available...')
elif self.dataset_sets[self.split] == 'train':
imgs_split = 'train2014'
else:
imgs_split = 'val2014'
self.detector = 'FRCN'
self.detector_type = 'default'
ssd = None
if use_ssd:
self.detector = 'SSD'
self.detector_type = 'custom'
from opendr.perception.object_detection_2d.ssd.ssd_learner import SingleShotDetectorLearner
ssd = SingleShotDetectorLearner(device=device)
ssd.download(".", mode="pretrained")
ssd.load("./ssd_default_person", verbose=True)
if not os.path.exists(os.path.join(self.path, imgs_split)):
self.download('http://images.cocodataset.org/zips/' + imgs_split + '.zip',
download_path=os.path.join(self.path), file_format="zip",
create_dir=True)
pkl_filename = os.path.join(self.path, 'data_' + self.detector + '_' +
self.dataset_sets[self.split] + '_coco.pkl')
if not os.path.exists(pkl_filename):
if not os.path.exists(os.path.join(self.path, 'annotations', 'instances_' +
self.dataset_sets[self.split] +
'2014.json')):
if self.dataset_sets[self.split] == 'train':
ann_url = 'http://images.cocodataset.org/annotations/annotations_trainval2014.zip'
self.download(ann_url, download_path=os.path.join(self.path), file_format="zip",
create_dir=True)
else:
if self.dataset_sets[self.split] == 'minival':
ann_url = 'https://dl.dropboxusercontent.com/s/o43o90bna78omob/' \
'instances_minival2014.json.zip?dl=0'
else:
ann_url = 'https://dl.dropboxusercontent.com/s/s3tw5zcg7395368/' \
'instances_valminusminival2014.json.zip?dl=0'
self.download(ann_url, download_path=os.path.join(self.path, 'annotations'), file_format="zip",
create_dir=True)
if not os.path.exists(os.path.join(self.path, 'detections', 'coco_2014_' +
self.dataset_sets[self.split] +
'_FRCN_train.pkl')):
self.download('http://datasets.d2.mpi-inf.mpg.de/hosang17cvpr/coco_2014_FRCN.tar.gz',
download_path=os.path.join(self.path, 'detections'), file_format='tar.gz',
create_dir=True)
with open(os.path.join(self.path, 'detections',
'coco_2014_' + self.dataset_sets[self.split] + '_FRCN_train.pkl'), 'rb') as f:
dets_default = pickle.load(f, encoding='latin1')
annots = COCO(annotation_file=os.path.join(self.path, 'annotations', 'instances_' +
self.dataset_sets[self.split] + '2014.json'))
pbarDesc = "Overall progress"
pbar = tqdm(desc=pbarDesc, total=len(dets_default[1]))
for i in range(len(dets_default[1])):
dt_boxes = []
img_info = annots.loadImgs([dets_default[1][i]])[0]
img = Image.open(os.path.join(self.path, imgs_split, img_info["file_name"]))
if self.detector_type == 'default':
dt_boxes = dets_default[0][1][i]
elif self.detector == 'SSD':
bboxes_list = ssd.infer(img, threshold=0.0, custom_nms=None, nms_thresh=0.975,
nms_topk=6000, post_nms=6000)
bboxes_list = BoundingBoxListToNumpyArray()(bboxes_list)
if bboxes_list.shape[0] > 0:
bboxes_list = bboxes_list[bboxes_list[:, 4] > 0.015]
if bboxes_list.shape[0] > 0:
bboxes_list = bboxes_list[np.argsort(bboxes_list[:, 4]), :][::-1]
bboxes_list = bboxes_list[:5000, :]
for b in range(len(bboxes_list)):
dt_boxes.append(np.array([bboxes_list[b, 0], bboxes_list[b, 1], bboxes_list[b, 2],
bboxes_list[b, 3], bboxes_list[b, 4][0]]))
dt_boxes = np.asarray(dt_boxes)
annots_in_frame = annots.loadAnns(
annots.getAnnIds(imgIds=[dets_default[1][i]], catIds=[1], iscrowd=False))
gt_boxes = []
for j in range(len(annots_in_frame)):
gt_boxes.append(annots_in_frame[j]['bbox'])
gt_boxes = np.asarray(np.asarray(gt_boxes))
if gt_boxes.shape[0] > 0:
gt_boxes[:, 2] = gt_boxes[:, 0] + gt_boxes[:, 2]
gt_boxes[:, 3] = gt_boxes[:, 1] + gt_boxes[:, 3]
self.src_data.append({
'id': dets_default[1][i],
'filename': os.path.join(imgs_split, img_info["file_name"]),
'resolution': [img_info['width'], img_info['height']],
'gt_boxes': [np.asarray([]), gt_boxes],
'dt_boxes': [np.asarray([]), dt_boxes]
})
pbar.update(1)
pbar.close()
if self.detector == 'SSD':
del ssd
gc.collect()
with open(pkl_filename, 'wb') as handle:
pickle.dump(self.src_data, handle, protocol=pickle.DEFAULT_PROTOCOL)
else:
with open(pkl_filename, 'rb') as fp_pkl:
self.src_data = pickle.load(fp_pkl)
self.classes = ['background', 'person']
self.class_ids = [-1, 1]
self.annotation_file = 'instances_' + self.dataset_sets[self.split] + '2014.json'
elif self.dataset_name == "TEST_MODULE":
self.dataset_sets['train'] = 'test'
self.dataset_sets['val'] = 'test'
self.dataset_sets['test'] = 'test'
if self.dataset_sets[self.split] is None:
raise ValueError(self.split + ' split is not available...')
pkl_filename = os.path.join(self.path, 'test_module.pkl')
if not os.path.exists(pkl_filename):
data_url = OPENDR_SERVER_URL + '/perception/object_detection_2d/nms/datasets/test_module.zip'
self.download(data_url, download_path=os.path.join(self.path).replace("TEST_MODULE", ""), file_format="zip",
create_dir=True)
with open(pkl_filename, 'rb') as fp_pkl:
self.src_data = pickle.load(fp_pkl)
self.classes = ['background', 'person']
self.class_ids = [-1, 1]
self.annotation_file = 'test_module_anns.json'
@staticmethod
def download(
url, download_path, dataset_sub_path=".", file_format="zip", create_dir=False):
if create_dir:
os.makedirs(download_path, exist_ok=True)
print("Downloading dataset from", url, "to", download_path)
start_time = 0
last_print = 0
def reporthook(count, block_size, total_size):
nonlocal start_time
nonlocal last_print
if count == 0:
start_time = time.time()
last_print = start_time
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
if time.time() - last_print >= 1:
last_print = time.time()
print(
"\r%d MB, %d KB/s, %d seconds passed" %
(progress_size / (1024 * 1024), speed, duration),
end=''
)
if file_format == "zip":
zip_path = os.path.join(download_path, "dataset.zip")
urlretrieve(url, zip_path, reporthook=reporthook)
print()
print("Extracting data from zip file")
with ZipFile(zip_path, 'r') as zip_ref:
zip_ref.extractall(download_path)
os.remove(zip_path)
elif file_format == "tar.bz2" or file_format == "tar.gz":
tar_path = os.path.join(download_path, "dataset." + file_format)
urlretrieve(url, tar_path, reporthook=reporthook)
print()
def members(tf):
l = len("Crowd_PETS09/")
for member in tf.getmembers():
if member.path.startswith("Crowd_PETS09/"):
member.path = member.path[l:]
yield member
with tarfile.open(tar_path, "r:" + file_format.split('.')[1]) as tar:
if file_format == "tar.bz2":
tar.extractall(path=download_path, members=members(tar))
else:
tar.extractall(path=download_path)
tar.close()
os.remove(tar_path)
else:
raise ValueError("Unsupported file_format: " + file_format)
|
[
"os.remove",
"tqdm.tqdm",
"zipfile.ZipFile",
"os.makedirs",
"opendr.perception.object_detection_2d.ssd.ssd_learner.SingleShotDetectorLearner",
"pickle.dump",
"numpy.asarray",
"os.path.exists",
"time.time",
"opendr.perception.object_detection_2d.datasets.transforms.BoundingBoxListToNumpyArray",
"urllib.request.urlretrieve",
"gc.collect",
"pickle.load",
"numpy.array",
"numpy.argsort",
"os.path.join"
] |
[((2050, 2082), 'os.path.join', 'os.path.join', (['path', 'dataset_name'], {}), '(path, dataset_name)\n', (2062, 2082), False, 'import os\n'), ((5035, 5140), 'os.path.join', 'os.path.join', (['self.path', "('data_' + self.detector + '_' + self.dataset_sets[self.split] + '_pets.pkl')"], {}), "(self.path, 'data_' + self.detector + '_' + self.dataset_sets[\n self.split] + '_pets.pkl')\n", (5047, 5140), False, 'import os\n'), ((21238, 21279), 'os.makedirs', 'os.makedirs', (['download_path'], {'exist_ok': '(True)'}), '(download_path, exist_ok=True)\n', (21249, 21279), False, 'import os\n'), ((22149, 22191), 'os.path.join', 'os.path.join', (['download_path', '"""dataset.zip"""'], {}), "(download_path, 'dataset.zip')\n", (22161, 22191), False, 'import os\n'), ((22204, 22253), 'urllib.request.urlretrieve', 'urlretrieve', (['url', 'zip_path'], {'reporthook': 'reporthook'}), '(url, zip_path, reporthook=reporthook)\n', (22215, 22253), False, 'from urllib.request import urlretrieve\n'), ((22439, 22458), 'os.remove', 'os.remove', (['zip_path'], {}), '(zip_path)\n', (22448, 22458), False, 'import os\n'), ((5195, 5223), 'os.path.exists', 'os.path.exists', (['pkl_filename'], {}), '(pkl_filename)\n', (5209, 5223), False, 'import os\n'), ((14549, 14654), 'os.path.join', 'os.path.join', (['self.path', "('data_' + self.detector + '_' + self.dataset_sets[self.split] + '_coco.pkl')"], {}), "(self.path, 'data_' + self.detector + '_' + self.dataset_sets[\n self.split] + '_coco.pkl')\n", (14561, 14654), False, 'import os\n'), ((21572, 21583), 'time.time', 'time.time', ([], {}), '()\n', (21581, 21583), False, 'import time\n'), ((21671, 21682), 'time.time', 'time.time', ([], {}), '()\n', (21680, 21682), False, 'import time\n'), ((21882, 21893), 'time.time', 'time.time', ([], {}), '()\n', (21891, 21893), False, 'import time\n'), ((22342, 22364), 'zipfile.ZipFile', 'ZipFile', (['zip_path', '"""r"""'], {}), "(zip_path, 'r')\n", (22349, 22364), False, 'from zipfile import ZipFile\n'), ((22548, 22601), 'os.path.join', 'os.path.join', (['download_path', "('dataset.' + file_format)"], {}), "(download_path, 'dataset.' + file_format)\n", (22560, 22601), False, 'import os\n'), ((22614, 22663), 'urllib.request.urlretrieve', 'urlretrieve', (['url', 'tar_path'], {'reporthook': 'reporthook'}), '(url, tar_path, reporthook=reporthook)\n', (22625, 22663), False, 'from urllib.request import urlretrieve\n'), ((23275, 23294), 'os.remove', 'os.remove', (['tar_path'], {}), '(tar_path)\n', (23284, 23294), False, 'import os\n'), ((2642, 2681), 'os.path.join', 'os.path.join', (['self.path', '"""images/S1/L1"""'], {}), "(self.path, 'images/S1/L1')\n", (2654, 2681), False, 'import os\n'), ((2973, 3012), 'os.path.join', 'os.path.join', (['self.path', '"""images/S1/L2"""'], {}), "(self.path, 'images/S1/L2')\n", (2985, 3012), False, 'import os\n'), ((3304, 3343), 'os.path.join', 'os.path.join', (['self.path', '"""images/S2/L1"""'], {}), "(self.path, 'images/S2/L1')\n", (3316, 3343), False, 'import os\n'), ((3635, 3674), 'os.path.join', 'os.path.join', (['self.path', '"""images/S2/L2"""'], {}), "(self.path, 'images/S2/L2')\n", (3647, 3674), False, 'import os\n'), ((3966, 4005), 'os.path.join', 'os.path.join', (['self.path', '"""images/S2/L3"""'], {}), "(self.path, 'images/S2/L3')\n", (3978, 4005), False, 'import os\n'), ((4297, 4347), 'os.path.join', 'os.path.join', (['self.path', '"""images/S3/Multiple_Flow"""'], {}), "(self.path, 'images/S3/Multiple_Flow')\n", (4309, 4347), False, 'import os\n'), ((4660, 4754), 'os.path.join', 'os.path.join', (['self.path', '"""annotations"""', "('pets_' + self.dataset_sets[self.split] + '.json')"], {}), "(self.path, 'annotations', 'pets_' + self.dataset_sets[self.\n split] + '.json')\n", (4672, 4754), False, 'import os\n'), ((5418, 5458), 'opendr.perception.object_detection_2d.ssd.ssd_learner.SingleShotDetectorLearner', 'SingleShotDetectorLearner', ([], {'device': 'device'}), '(device=device)\n', (5443, 5458), False, 'from opendr.perception.object_detection_2d.ssd.ssd_learner import SingleShotDetectorLearner\n'), ((7415, 7456), 'tqdm.tqdm', 'tqdm', ([], {'desc': 'pbarDesc', 'total': 'number_samples'}), '(desc=pbarDesc, total=number_samples)\n', (7419, 7456), False, 'from tqdm import tqdm\n'), ((13049, 13068), 'pickle.load', 'pickle.load', (['fp_pkl'], {}), '(fp_pkl)\n', (13060, 13068), False, 'import pickle\n'), ((14067, 14107), 'opendr.perception.object_detection_2d.ssd.ssd_learner.SingleShotDetectorLearner', 'SingleShotDetectorLearner', ([], {'device': 'device'}), '(device=device)\n', (14092, 14107), False, 'from opendr.perception.object_detection_2d.ssd.ssd_learner import SingleShotDetectorLearner\n'), ((14709, 14737), 'os.path.exists', 'os.path.exists', (['pkl_filename'], {}), '(pkl_filename)\n', (14723, 14737), False, 'import os\n'), ((20446, 20488), 'os.path.join', 'os.path.join', (['self.path', '"""test_module.pkl"""'], {}), "(self.path, 'test_module.pkl')\n", (20458, 20488), False, 'import os\n'), ((21822, 21833), 'time.time', 'time.time', ([], {}), '()\n', (21831, 21833), False, 'import time\n'), ((2864, 2897), 'os.path.join', 'os.path.join', (['self.path', '"""images"""'], {}), "(self.path, 'images')\n", (2876, 2897), False, 'import os\n'), ((3195, 3228), 'os.path.join', 'os.path.join', (['self.path', '"""images"""'], {}), "(self.path, 'images')\n", (3207, 3228), False, 'import os\n'), ((3526, 3559), 'os.path.join', 'os.path.join', (['self.path', '"""images"""'], {}), "(self.path, 'images')\n", (3538, 3559), False, 'import os\n'), ((3857, 3890), 'os.path.join', 'os.path.join', (['self.path', '"""images"""'], {}), "(self.path, 'images')\n", (3869, 3890), False, 'import os\n'), ((4188, 4221), 'os.path.join', 'os.path.join', (['self.path', '"""images"""'], {}), "(self.path, 'images')\n", (4200, 4221), False, 'import os\n'), ((4530, 4563), 'os.path.join', 'os.path.join', (['self.path', '"""images"""'], {}), "(self.path, 'images')\n", (4542, 4563), False, 'import os\n'), ((4902, 4940), 'os.path.join', 'os.path.join', (['self.path', '"""annotations"""'], {}), "(self.path, 'annotations')\n", (4914, 4940), False, 'import os\n'), ((5646, 5751), 'os.path.join', 'os.path.join', (['self.path', '"""detections"""', "('PETS-' + self.dataset_sets[self.split] + '_siyudpm_dets.idl')"], {}), "(self.path, 'detections', 'PETS-' + self.dataset_sets[self.\n split] + '_siyudpm_dets.idl')\n", (5658, 5751), False, 'import os\n'), ((6110, 6203), 'os.path.join', 'os.path.join', (['self.path', '"""annotations"""', "('PETS-' + self.dataset_sets[self.split] + '.idl')"], {}), "(self.path, 'annotations', 'PETS-' + self.dataset_sets[self.\n split] + '.idl')\n", (6122, 6203), False, 'import os\n'), ((6490, 6583), 'os.path.join', 'os.path.join', (['self.path', '"""annotations"""', "('PETS-' + self.dataset_sets[self.split] + '.idl')"], {}), "(self.path, 'annotations', 'PETS-' + self.dataset_sets[self.\n split] + '.idl')\n", (6502, 6583), False, 'import os\n'), ((12869, 12937), 'pickle.dump', 'pickle.dump', (['self.src_data', 'handle'], {'protocol': 'pickle.DEFAULT_PROTOCOL'}), '(self.src_data, handle, protocol=pickle.DEFAULT_PROTOCOL)\n', (12880, 12937), False, 'import pickle\n'), ((14258, 14293), 'os.path.join', 'os.path.join', (['self.path', 'imgs_split'], {}), '(self.path, imgs_split)\n', (14270, 14293), False, 'import os\n'), ((16795, 16828), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (16806, 16828), False, 'import pickle\n'), ((18474, 18494), 'numpy.asarray', 'np.asarray', (['dt_boxes'], {}), '(dt_boxes)\n', (18484, 18494), True, 'import numpy as np\n'), ((19626, 19638), 'gc.collect', 'gc.collect', ([], {}), '()\n', (19636, 19638), False, 'import gc\n'), ((19716, 19784), 'pickle.dump', 'pickle.dump', (['self.src_data', 'handle'], {'protocol': 'pickle.DEFAULT_PROTOCOL'}), '(self.src_data, handle, protocol=pickle.DEFAULT_PROTOCOL)\n', (19727, 19784), False, 'import pickle\n'), ((19896, 19915), 'pickle.load', 'pickle.load', (['fp_pkl'], {}), '(fp_pkl)\n', (19907, 19915), False, 'import pickle\n'), ((20508, 20536), 'os.path.exists', 'os.path.exists', (['pkl_filename'], {}), '(pkl_filename)\n', (20522, 20536), False, 'import os\n'), ((20905, 20924), 'pickle.load', 'pickle.load', (['fp_pkl'], {}), '(fp_pkl)\n', (20916, 20924), False, 'import pickle\n'), ((5938, 5975), 'os.path.join', 'os.path.join', (['self.path', '"""detections"""'], {}), "(self.path, 'detections')\n", (5950, 5975), False, 'import os\n'), ((6354, 6392), 'os.path.join', 'os.path.join', (['self.path', '"""annotations"""'], {}), "(self.path, 'annotations')\n", (6366, 6392), False, 'import os\n'), ((6755, 6860), 'os.path.join', 'os.path.join', (['self.path', '"""detections"""', "('PETS-' + self.dataset_sets[self.split] + '_siyudpm_dets.idl')"], {}), "(self.path, 'detections', 'PETS-' + self.dataset_sets[self.\n split] + '_siyudpm_dets.idl')\n", (6767, 6860), False, 'import os\n'), ((10191, 10238), 'os.path.join', 'os.path.join', (['self.path', '"""images/"""', 'filename_gt'], {}), "(self.path, 'images/', filename_gt)\n", (10203, 10238), False, 'import os\n'), ((12771, 12783), 'gc.collect', 'gc.collect', ([], {}), '()\n', (12781, 12783), False, 'import gc\n'), ((14431, 14454), 'os.path.join', 'os.path.join', (['self.path'], {}), '(self.path)\n', (14443, 14454), False, 'import os\n'), ((14777, 14880), 'os.path.join', 'os.path.join', (['self.path', '"""annotations"""', "('instances_' + self.dataset_sets[self.split] + '2014.json')"], {}), "(self.path, 'annotations', 'instances_' + self.dataset_sets[\n self.split] + '2014.json')\n", (14789, 14880), False, 'import os\n'), ((16054, 16162), 'os.path.join', 'os.path.join', (['self.path', '"""detections"""', "('coco_2014_' + self.dataset_sets[self.split] + '_FRCN_train.pkl')"], {}), "(self.path, 'detections', 'coco_2014_' + self.dataset_sets[self\n .split] + '_FRCN_train.pkl')\n", (16066, 16162), False, 'import os\n'), ((16604, 16712), 'os.path.join', 'os.path.join', (['self.path', '"""detections"""', "('coco_2014_' + self.dataset_sets[self.split] + '_FRCN_train.pkl')"], {}), "(self.path, 'detections', 'coco_2014_' + self.dataset_sets[self\n .split] + '_FRCN_train.pkl')\n", (16616, 16712), False, 'import os\n'), ((16875, 16978), 'os.path.join', 'os.path.join', (['self.path', '"""annotations"""', "('instances_' + self.dataset_sets[self.split] + '2014.json')"], {}), "(self.path, 'annotations', 'instances_' + self.dataset_sets[\n self.split] + '2014.json')\n", (16887, 16978), False, 'import os\n'), ((17348, 17406), 'os.path.join', 'os.path.join', (['self.path', 'imgs_split', "img_info['file_name']"], {}), "(self.path, imgs_split, img_info['file_name'])\n", (17360, 17406), False, 'import os\n'), ((18850, 18870), 'numpy.asarray', 'np.asarray', (['gt_boxes'], {}), '(gt_boxes)\n', (18860, 18870), True, 'import numpy as np\n'), ((10958, 10987), 'opendr.perception.object_detection_2d.datasets.transforms.BoundingBoxListToNumpyArray', 'BoundingBoxListToNumpyArray', ([], {}), '()\n', (10985, 10987), False, 'from opendr.perception.object_detection_2d.datasets.transforms import BoundingBoxListToNumpyArray\n'), ((11986, 12021), 'os.path.join', 'os.path.join', (['"""images"""', 'filename_gt'], {}), "('images', filename_gt)\n", (11998, 12021), False, 'import os\n'), ((16466, 16503), 'os.path.join', 'os.path.join', (['self.path', '"""detections"""'], {}), "(self.path, 'detections')\n", (16478, 16503), False, 'import os\n'), ((19193, 19240), 'os.path.join', 'os.path.join', (['imgs_split', "img_info['file_name']"], {}), "(imgs_split, img_info['file_name'])\n", (19205, 19240), False, 'import os\n'), ((11350, 11462), 'numpy.array', 'np.array', (['[bboxes_list[b, 0], bboxes_list[b, 1], bboxes_list[b, 2], bboxes_list[b, 3],\n bboxes_list[b, 4][0]]'], {}), '([bboxes_list[b, 0], bboxes_list[b, 1], bboxes_list[b, 2],\n bboxes_list[b, 3], bboxes_list[b, 4][0]])\n', (11358, 11462), True, 'import numpy as np\n'), ((12137, 12151), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (12147, 12151), True, 'import numpy as np\n'), ((12153, 12173), 'numpy.asarray', 'np.asarray', (['gt_boxes'], {}), '(gt_boxes)\n', (12163, 12173), True, 'import numpy as np\n'), ((12217, 12231), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (12227, 12231), True, 'import numpy as np\n'), ((12233, 12253), 'numpy.asarray', 'np.asarray', (['dt_boxes'], {}), '(dt_boxes)\n', (12243, 12253), True, 'import numpy as np\n'), ((15265, 15288), 'os.path.join', 'os.path.join', (['self.path'], {}), '(self.path)\n', (15277, 15288), False, 'import os\n'), ((15902, 15940), 'os.path.join', 'os.path.join', (['self.path', '"""annotations"""'], {}), "(self.path, 'annotations')\n", (15914, 15940), False, 'import os\n'), ((17789, 17818), 'opendr.perception.object_detection_2d.datasets.transforms.BoundingBoxListToNumpyArray', 'BoundingBoxListToNumpyArray', ([], {}), '()\n', (17816, 17818), False, 'from opendr.perception.object_detection_2d.datasets.transforms import BoundingBoxListToNumpyArray\n'), ((19358, 19372), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (19368, 19372), True, 'import numpy as np\n'), ((19422, 19436), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (19432, 19436), True, 'import numpy as np\n'), ((11136, 11165), 'numpy.argsort', 'np.argsort', (['bboxes_list[:, 4]'], {}), '(bboxes_list[:, 4])\n', (11146, 11165), True, 'import numpy as np\n'), ((18279, 18391), 'numpy.array', 'np.array', (['[bboxes_list[b, 0], bboxes_list[b, 1], bboxes_list[b, 2], bboxes_list[b, 3],\n bboxes_list[b, 4][0]]'], {}), '([bboxes_list[b, 0], bboxes_list[b, 1], bboxes_list[b, 2],\n bboxes_list[b, 3], bboxes_list[b, 4][0]])\n', (18287, 18391), True, 'import numpy as np\n'), ((20702, 20725), 'os.path.join', 'os.path.join', (['self.path'], {}), '(self.path)\n', (20714, 20725), False, 'import os\n'), ((18073, 18102), 'numpy.argsort', 'np.argsort', (['bboxes_list[:, 4]'], {}), '(bboxes_list[:, 4])\n', (18083, 18102), True, 'import numpy as np\n')]
|
import flask
import random
import sys
import os
import glob
import re
from pathlib import Path
import pickle
import numpy as np
# Import fast.ai Library
from fastai import *
from fastai.vision import *
# Flask utils
from flask import Flask, redirect, url_for, request, render_template,jsonify
from werkzeug.utils import secure_filename
app = flask.Flask(__name__)
UPLOAD_FOLDER = './UPLOAD_FOLDER/'
path=Path("path")
classes = ['stress', 'non-stress']
learn=load_learner(path,'a.pkl')
with open('classifier_pickle','rb') as f:
cls=pickle.load(f)
label_dictionary = {0: 'Healthy Plant', 1: 'Stress but recoverable',2:'Cannot Recover'}
def model_predict(img_path):
"""model_predict will return the preprocessed image
"""
img = open_image(img_path)
pred_class,pred_idx,outputs = learn.predict(img)
return pred_class
@app.route('/upload', methods = ['GET', 'POST'])
def handle_request():
print("hello");
imagefile = flask.request.files['image']
print("hello", flask.request);
filename = UPLOAD_FOLDER + str(random.randint(0, 5000)) + '.png'
#filename = werkzeug.utils.secure_filename(imagefile.filename)
#filename= "photo.jpg";
print("\nReceived image File name : " + imagefile.filename)
imagefile.save(filename)
preds=model_predict(filename)
print(type(preds))
return str(preds)
@app.route('/calculate', methods = ['GET', 'POST'])
def handle_response():
print("Hello");
# getting the data from a separate json file.
json = request.get_json()
# the keys that should be included in the json file.
transaction_keys = ['tdry' , 'twet', 'tcanopy', 'timeDay']
# return a error message if a key is not included in the file.
#stringValues= flask.request.values.get['dry', 'wet', 'canopy', 'time']
#print("Hello", flask.request);
a=json[transaction_keys[0]]
print(a)
b=json[transaction_keys[1]]
print(b)
c=json[transaction_keys[2]]
print(c)
d=json[transaction_keys[3]]
print(d)
pred=np.array([[a,b,c,d]])
pr=cls.predict(pred)
print(pr)
return jsonify(label_dictionary[int(pr)])
#ans=label_dictionary[int(pr)]
#print(ans)
#return ans
app.run(host="127.0.0.1",port=5000, debug=True)
|
[
"random.randint",
"flask.Flask",
"pathlib.Path",
"pickle.load",
"numpy.array",
"flask.request.get_json"
] |
[((346, 367), 'flask.Flask', 'flask.Flask', (['__name__'], {}), '(__name__)\n', (357, 367), False, 'import flask\n'), ((409, 421), 'pathlib.Path', 'Path', (['"""path"""'], {}), "('path')\n", (413, 421), False, 'from pathlib import Path\n'), ((544, 558), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (555, 558), False, 'import pickle\n'), ((1508, 1526), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1524, 1526), False, 'from flask import Flask, redirect, url_for, request, render_template, jsonify\n'), ((1990, 2014), 'numpy.array', 'np.array', (['[[a, b, c, d]]'], {}), '([[a, b, c, d]])\n', (1998, 2014), True, 'import numpy as np\n'), ((1054, 1077), 'random.randint', 'random.randint', (['(0)', '(5000)'], {}), '(0, 5000)\n', (1068, 1077), False, 'import random\n')]
|
import time
from collections import defaultdict
from datetime import timedelta
import cvxpy as cp
import empiricalutilities as eu
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
from transfer_entropy import TransferEntropy
plt.style.use('fivethirtyeight')
# %%
eqs = 'SPY DIA XLK XLV XLF IYZ XLY XLP XLI XLE XLU XME IYR XLB XPH IWM PHO ' \
'SOXX WOOD FDN GNR IBB ILF ITA IYT KIE PBW ' \
'AFK EZA ECH EWW EWC EWZ EEM EIDO EPOL EPP EWA EWD EWG EWH EWJ EWI EWK ' \
'EWL EWM EWP EWQ EWS EWT EWU EWY GXC HAO EZU RSX TUR'.split()
fi = 'AGG SHY IEI IEF TLT TIP LQD HYG MBB'.split()
cmdtys = 'GLD SLV DBA DBC USO UNG'.split()
fx = 'FXA FXB FXC FXE FXF FXY'.split()
assets = eqs + fi + cmdtys + fx
def cum_rets(rets):
cum_rets = []
cum_rets.append(1)
for i, ret in enumerate(rets):
cum_rets.append(cum_rets[i]*(1+ret))
return cum_rets
# %%
ete_mats = {}
mod = TransferEntropy(assets=assets)
period = 'Q'
months = mod.prices.index.to_period(period).unique().to_timestamp()
iters = len(months)-24
with tqdm(total=iters) as pbar:
for start, end in zip(months[:-1], months[1:]):
end -= timedelta(1)
mod.set_timeperiod(start, end)
mod.compute_effective_transfer_entropy(sims=30, bins=6,
std_threshold=1)
ete = mod.ete.copy()
ete_mats[start] = ete
pbar.update(1)
ete_df = pd.concat(ete_mats)
ete_df.to_csv(f'../ete_{period}.csv')
# %%
q = 4
res = defaultdict(dict)
mod = TransferEntropy(assets=assets)
iters = len(months)-1
for start, end in zip(months[:-1], months[1:]):
ete = ete_mats[start]
ete_out = ete.sum(axis=0)
ete_in = ete.sum(axis=1)
end -= timedelta(1)
mod.set_timeperiod(start, end)
returns = mod.prices.iloc[-1]/mod.prices.iloc[0]-1
vols = mod.data.std()
names = 'eteout etein etenetout etetotal'.split()
for name, ETE in zip(names, [ete_out, ete_in,
ete_out-ete_in, ete_in+ete_out]):
df = pd.DataFrame({'returns': returns, 'vol': vols, name: ETE})
df['q'] = pd.qcut(ETE, q=q, labels=False)
res[name][start] = df.groupby('q').agg('mean').reset_index().copy()
# %%
q_rets = {}
for name in names:
resdf = res[name]
resdf = pd.concat(resdf)
resdf.index = resdf.index.droplevel(1)
fig, ax = plt.subplots(1, 1, figsize=(10, 6))
cmap = sns.color_palette('Blues_r', n_colors=4)
for c, qtile in zip(cmap, range(q)):
q_rets[qtile] = resdf[resdf['q']==qtile]['returns'].values
ax.plot(months, cum_rets(q_rets[qtile]), c=c,
lw=2, alpha=1, label=f'Quartile {qtile+1}')
fig.autofmt_xdate()
plt.ylabel('Cumulative Return')
plt.xlabel('Time')
plt.legend()
plt.tight_layout()
plt.savefig(f'../plots/{name}_quartile_returns.png', dpi=300)
eu.latex_figure(f'../data/plots/{name}_quartile_returns.png')
# %%
for name in names:
table = defaultdict(dict)
resdf = res[name]
resdf = pd.concat(resdf)
resdf.index = resdf.index.droplevel(1)
table
for qtile in range(q):
table[qtile]['r'] = resdf[resdf['q']==qtile]['returns'].mean()*12
table[qtile]['v'] = resdf[resdf['q']==qtile]['returns'].std()*np.sqrt(12)
table[qtile][name] = resdf[resdf['q']==qtile][name].mean()
table = pd.DataFrame.from_dict(table, orient='index')
table['sr'] = table['r']/table['v']
table = table.reset_index()
table = table[['index', 'r', 'v', 'sr', name]]
cols = 'Quartile Return Volatility Sharpe'.split()
cols += [name]
table.columns = cols
table['Quartile'] += 1
table[['Return', 'Volatility']] *= 100
eu.latex_print(table, prec=2, hide_index=True)
# %%
def get_CAPM_weights(er, cov, gamma):
n = cov.shape[0]
w = cp.Variable((n, 1))
gamma = cp.Parameter(nonneg=True, value=gamma)
ret = w.T @ er
risk = cp.quad_form(w, cov)
constraints = [
cp.sum(w) == 1,
w <= 0.1,
w >= 0,
ret >= 0.02,
]
obj = cp.Maximize(ret - gamma*risk)
prob = cp.Problem(obj, constraints)
prob.solve()
return w.value
def get_MV_weights(er, cov):
n = cov.shape[0]
w = cp.Variable((n, 1))
ret = w.T @ er
risk = cp.quad_form(w, cov)
constraints = [
cp.sum(w) == 1,
w <= 0.1,
w >= 0,
ret >= 0.02,
]
obj = cp.Minimize(risk)
prob = cp.Problem(obj, constraints)
prob.solve()
return w.value
def get_weights(er, start, ete):
n = len(ete)
w = cp.Variable((n, 1))
ret = w.T @ er
obj = cp.Minimize(w.T @ (ete))
constraints = [
cp.sum(w) == 1,
w <= 0.1,
w >= 0,
ret >= 0.02,
]
prob = cp.Problem(obj, constraints)
prob.solve()
return w.value
# %%
ete_mats = pd.read_csv('../ete_Q.csv', index_col=[0, 1], parse_dates=True,
infer_datetime_format=True)
ete_mats = ete_mats[assets].copy()
mod = TransferEntropy(assets=assets)
mo_df = mod.prices.resample('Q').last()
mo_ret_df = (mo_df/mo_df.shift(1).values-1).dropna()
EXP_RETS = mo_ret_df.mean().values.reshape(-1, 1)
e_perf = []
e_perf_ete = []
mv_perf = []
mv_perf_ete = []
capm = defaultdict(list)
capm_ete = defaultdict(list)
gammas = [0.1, 1, 10]
with tqdm(total=iters) as pbar:
for start, end in zip(months[:-1], months[1:]):
end -= timedelta(1)
mod.set_timeperiod(start, end)
# get month's returns, cov, and ete matrices
cov = np.cov(mod.data.values, rowvar=False)
ete_mat = ete_mats.loc[start]
ete_mat = ete_mat.T[assets].T.values.copy()
ete_out = ete_mat.sum(axis=0).reshape(-1, 1)
ete_in = ete_mat.sum(axis=1).reshape(-1, 1)
net_out = ete_out - ete_in
r = (mod.prices.iloc[-1]/mod.prices.iloc[0]-1).values
# get strategy weights
we = get_weights(EXP_RETS, start, net_out)
wmv = get_MV_weights(EXP_RETS, cov)
e_perf.append(np.squeeze(we.T @ r))
e_perf_ete.append(np.squeeze(we.T @ net_out))
mv_perf.append(np.squeeze(wmv.T @ r))
mv_perf_ete.append(np.squeeze(wmv.T @ net_out))
for gamma in gammas:
w_capm = get_CAPM_weights(EXP_RETS, cov, gamma)
capm[gamma].append(np.squeeze(w_capm.T @ r))
capm_ete[gamma].append(np.squeeze(w_capm.T @ net_out))
pbar.update(1)
# %%
alpha=0.75
lw=2
fig, ax = plt.subplots(1, 1, figsize=(10, 6))
cmap2 = sns.color_palette('Reds_r', n_colors=len(gammas)*2)
ax.plot(months, cum_rets(e_perf), alpha=alpha,
label='ETE', lw=lw, c='steelblue')
ax.plot(months, cum_rets(mv_perf), alpha=alpha,
label='MV', lw=lw, c='forestgreen')
for i, gamma in enumerate(reversed(gammas[1:])):
ax.plot(months, cum_rets(capm[gamma]), alpha=alpha,
label=f'CAPM $\\gamma={gamma}$', lw=lw, c=cmap2[i])
fig.autofmt_xdate()
plt.ylabel('Cumulative Return')
plt.xlabel('Time')
plt.legend()
plt.tight_layout()
eu.save_fig(f'../plots/portfolio_comparison', dpi=300)
plt.show()
eu.latex_figure(f'../plots/portfolio_comparison')
# %%
tbl = pd.DataFrame({
'ETE': e_perf,
'MV': mv_perf,
'CAPM 1': capm[1],
'CAPM 10': capm[10],
}, index=months[1:])
tbl = (tbl.mean()*4).to_frame().join((tbl.std()*np.sqrt(4)).to_frame(),
rsuffix='vol')
tbl.columns = 'Return Volatility'.split()
tbl['Sharpe'] = tbl['Return']/tbl['Volatility']
tbl['Return'] *= 100
tbl['Volatility'] *= 100
tbl2 = pd.DataFrame({
'ETE': e_perf_ete,
'MV': mv_perf_ete,
'CAPM 1': capm_ete[1],
'CAPM 10': capm_ete[10],
}, index=months[1:])
tbl = tbl.join(tbl2.mean().to_frame())
tbl.columns = 'Return Volatility Sharpe ETE'.split()
eu.latex_print(tbl, prec=2)
|
[
"pandas.read_csv",
"collections.defaultdict",
"cvxpy.sum",
"matplotlib.pyplot.style.use",
"empiricalutilities.save_fig",
"matplotlib.pyplot.tight_layout",
"cvxpy.Maximize",
"cvxpy.quad_form",
"pandas.DataFrame",
"datetime.timedelta",
"cvxpy.Problem",
"empiricalutilities.latex_figure",
"pandas.qcut",
"numpy.cov",
"matplotlib.pyplot.subplots",
"pandas.concat",
"tqdm.tqdm",
"matplotlib.pyplot.show",
"pandas.DataFrame.from_dict",
"matplotlib.pyplot.legend",
"cvxpy.Variable",
"numpy.squeeze",
"matplotlib.pyplot.ylabel",
"transfer_entropy.TransferEntropy",
"cvxpy.Minimize",
"cvxpy.Parameter",
"empiricalutilities.latex_print",
"seaborn.color_palette",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((293, 325), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (306, 325), True, 'import matplotlib.pyplot as plt\n'), ((959, 989), 'transfer_entropy.TransferEntropy', 'TransferEntropy', ([], {'assets': 'assets'}), '(assets=assets)\n', (974, 989), False, 'from transfer_entropy import TransferEntropy\n'), ((1466, 1485), 'pandas.concat', 'pd.concat', (['ete_mats'], {}), '(ete_mats)\n', (1475, 1485), True, 'import pandas as pd\n'), ((1541, 1558), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (1552, 1558), False, 'from collections import defaultdict\n'), ((1566, 1596), 'transfer_entropy.TransferEntropy', 'TransferEntropy', ([], {'assets': 'assets'}), '(assets=assets)\n', (1581, 1596), False, 'from transfer_entropy import TransferEntropy\n'), ((4887, 4982), 'pandas.read_csv', 'pd.read_csv', (['"""../ete_Q.csv"""'], {'index_col': '[0, 1]', 'parse_dates': '(True)', 'infer_datetime_format': '(True)'}), "('../ete_Q.csv', index_col=[0, 1], parse_dates=True,\n infer_datetime_format=True)\n", (4898, 4982), True, 'import pandas as pd\n'), ((5025, 5055), 'transfer_entropy.TransferEntropy', 'TransferEntropy', ([], {'assets': 'assets'}), '(assets=assets)\n', (5040, 5055), False, 'from transfer_entropy import TransferEntropy\n'), ((5265, 5282), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5276, 5282), False, 'from collections import defaultdict\n'), ((5294, 5311), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5305, 5311), False, 'from collections import defaultdict\n'), ((6485, 6520), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 6)'}), '(1, 1, figsize=(10, 6))\n', (6497, 6520), True, 'import matplotlib.pyplot as plt\n'), ((6942, 6973), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cumulative Return"""'], {}), "('Cumulative Return')\n", (6952, 6973), True, 'import matplotlib.pyplot as plt\n'), ((6974, 6992), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (6984, 6992), True, 'import matplotlib.pyplot as plt\n'), ((6993, 7005), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7003, 7005), True, 'import matplotlib.pyplot as plt\n'), ((7006, 7024), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7022, 7024), True, 'import matplotlib.pyplot as plt\n'), ((7025, 7079), 'empiricalutilities.save_fig', 'eu.save_fig', (['f"""../plots/portfolio_comparison"""'], {'dpi': '(300)'}), "(f'../plots/portfolio_comparison', dpi=300)\n", (7036, 7079), True, 'import empiricalutilities as eu\n'), ((7080, 7090), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7088, 7090), True, 'import matplotlib.pyplot as plt\n'), ((7091, 7140), 'empiricalutilities.latex_figure', 'eu.latex_figure', (['f"""../plots/portfolio_comparison"""'], {}), "(f'../plots/portfolio_comparison')\n", (7106, 7140), True, 'import empiricalutilities as eu\n'), ((7152, 7258), 'pandas.DataFrame', 'pd.DataFrame', (["{'ETE': e_perf, 'MV': mv_perf, 'CAPM 1': capm[1], 'CAPM 10': capm[10]}"], {'index': 'months[1:]'}), "({'ETE': e_perf, 'MV': mv_perf, 'CAPM 1': capm[1], 'CAPM 10':\n capm[10]}, index=months[1:])\n", (7164, 7258), True, 'import pandas as pd\n'), ((7539, 7661), 'pandas.DataFrame', 'pd.DataFrame', (["{'ETE': e_perf_ete, 'MV': mv_perf_ete, 'CAPM 1': capm_ete[1], 'CAPM 10':\n capm_ete[10]}"], {'index': 'months[1:]'}), "({'ETE': e_perf_ete, 'MV': mv_perf_ete, 'CAPM 1': capm_ete[1],\n 'CAPM 10': capm_ete[10]}, index=months[1:])\n", (7551, 7661), True, 'import pandas as pd\n'), ((7793, 7820), 'empiricalutilities.latex_print', 'eu.latex_print', (['tbl'], {'prec': '(2)'}), '(tbl, prec=2)\n', (7807, 7820), True, 'import empiricalutilities as eu\n'), ((1100, 1117), 'tqdm.tqdm', 'tqdm', ([], {'total': 'iters'}), '(total=iters)\n', (1104, 1117), False, 'from tqdm import tqdm\n'), ((1766, 1778), 'datetime.timedelta', 'timedelta', (['(1)'], {}), '(1)\n', (1775, 1778), False, 'from datetime import timedelta\n'), ((2340, 2356), 'pandas.concat', 'pd.concat', (['resdf'], {}), '(resdf)\n', (2349, 2356), True, 'import pandas as pd\n'), ((2415, 2450), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 6)'}), '(1, 1, figsize=(10, 6))\n', (2427, 2450), True, 'import matplotlib.pyplot as plt\n'), ((2462, 2502), 'seaborn.color_palette', 'sns.color_palette', (['"""Blues_r"""'], {'n_colors': '(4)'}), "('Blues_r', n_colors=4)\n", (2479, 2502), True, 'import seaborn as sns\n'), ((2750, 2781), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cumulative Return"""'], {}), "('Cumulative Return')\n", (2760, 2781), True, 'import matplotlib.pyplot as plt\n'), ((2786, 2804), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (2796, 2804), True, 'import matplotlib.pyplot as plt\n'), ((2809, 2821), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2819, 2821), True, 'import matplotlib.pyplot as plt\n'), ((2826, 2844), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2842, 2844), True, 'import matplotlib.pyplot as plt\n'), ((2849, 2910), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""../plots/{name}_quartile_returns.png"""'], {'dpi': '(300)'}), "(f'../plots/{name}_quartile_returns.png', dpi=300)\n", (2860, 2910), True, 'import matplotlib.pyplot as plt\n'), ((2915, 2976), 'empiricalutilities.latex_figure', 'eu.latex_figure', (['f"""../data/plots/{name}_quartile_returns.png"""'], {}), "(f'../data/plots/{name}_quartile_returns.png')\n", (2930, 2976), True, 'import empiricalutilities as eu\n'), ((3015, 3032), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (3026, 3032), False, 'from collections import defaultdict\n'), ((3067, 3083), 'pandas.concat', 'pd.concat', (['resdf'], {}), '(resdf)\n', (3076, 3083), True, 'import pandas as pd\n'), ((3400, 3445), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['table'], {'orient': '"""index"""'}), "(table, orient='index')\n", (3422, 3445), True, 'import pandas as pd\n'), ((3742, 3788), 'empiricalutilities.latex_print', 'eu.latex_print', (['table'], {'prec': '(2)', 'hide_index': '(True)'}), '(table, prec=2, hide_index=True)\n', (3756, 3788), True, 'import empiricalutilities as eu\n'), ((3861, 3880), 'cvxpy.Variable', 'cp.Variable', (['(n, 1)'], {}), '((n, 1))\n', (3872, 3880), True, 'import cvxpy as cp\n'), ((3893, 3931), 'cvxpy.Parameter', 'cp.Parameter', ([], {'nonneg': '(True)', 'value': 'gamma'}), '(nonneg=True, value=gamma)\n', (3905, 3931), True, 'import cvxpy as cp\n'), ((3962, 3982), 'cvxpy.quad_form', 'cp.quad_form', (['w', 'cov'], {}), '(w, cov)\n', (3974, 3982), True, 'import cvxpy as cp\n'), ((4102, 4133), 'cvxpy.Maximize', 'cp.Maximize', (['(ret - gamma * risk)'], {}), '(ret - gamma * risk)\n', (4113, 4133), True, 'import cvxpy as cp\n'), ((4143, 4171), 'cvxpy.Problem', 'cp.Problem', (['obj', 'constraints'], {}), '(obj, constraints)\n', (4153, 4171), True, 'import cvxpy as cp\n'), ((4268, 4287), 'cvxpy.Variable', 'cp.Variable', (['(n, 1)'], {}), '((n, 1))\n', (4279, 4287), True, 'import cvxpy as cp\n'), ((4318, 4338), 'cvxpy.quad_form', 'cp.quad_form', (['w', 'cov'], {}), '(w, cov)\n', (4330, 4338), True, 'import cvxpy as cp\n'), ((4458, 4475), 'cvxpy.Minimize', 'cp.Minimize', (['risk'], {}), '(risk)\n', (4469, 4475), True, 'import cvxpy as cp\n'), ((4487, 4515), 'cvxpy.Problem', 'cp.Problem', (['obj', 'constraints'], {}), '(obj, constraints)\n', (4497, 4515), True, 'import cvxpy as cp\n'), ((4611, 4630), 'cvxpy.Variable', 'cp.Variable', (['(n, 1)'], {}), '((n, 1))\n', (4622, 4630), True, 'import cvxpy as cp\n'), ((4660, 4682), 'cvxpy.Minimize', 'cp.Minimize', (['(w.T @ ete)'], {}), '(w.T @ ete)\n', (4671, 4682), True, 'import cvxpy as cp\n'), ((4805, 4833), 'cvxpy.Problem', 'cp.Problem', (['obj', 'constraints'], {}), '(obj, constraints)\n', (4815, 4833), True, 'import cvxpy as cp\n'), ((5340, 5357), 'tqdm.tqdm', 'tqdm', ([], {'total': 'iters'}), '(total=iters)\n', (5344, 5357), False, 'from tqdm import tqdm\n'), ((1194, 1206), 'datetime.timedelta', 'timedelta', (['(1)'], {}), '(1)\n', (1203, 1206), False, 'from datetime import timedelta\n'), ((2082, 2140), 'pandas.DataFrame', 'pd.DataFrame', (["{'returns': returns, 'vol': vols, name: ETE}"], {}), "({'returns': returns, 'vol': vols, name: ETE})\n", (2094, 2140), True, 'import pandas as pd\n'), ((2159, 2190), 'pandas.qcut', 'pd.qcut', (['ETE'], {'q': 'q', 'labels': '(False)'}), '(ETE, q=q, labels=False)\n', (2166, 2190), True, 'import pandas as pd\n'), ((5434, 5446), 'datetime.timedelta', 'timedelta', (['(1)'], {}), '(1)\n', (5443, 5446), False, 'from datetime import timedelta\n'), ((5554, 5591), 'numpy.cov', 'np.cov', (['mod.data.values'], {'rowvar': '(False)'}), '(mod.data.values, rowvar=False)\n', (5560, 5591), True, 'import numpy as np\n'), ((3308, 3319), 'numpy.sqrt', 'np.sqrt', (['(12)'], {}), '(12)\n', (3315, 3319), True, 'import numpy as np\n'), ((4011, 4020), 'cvxpy.sum', 'cp.sum', (['w'], {}), '(w)\n', (4017, 4020), True, 'import cvxpy as cp\n'), ((4367, 4376), 'cvxpy.sum', 'cp.sum', (['w'], {}), '(w)\n', (4373, 4376), True, 'import cvxpy as cp\n'), ((4713, 4722), 'cvxpy.sum', 'cp.sum', (['w'], {}), '(w)\n', (4719, 4722), True, 'import cvxpy as cp\n'), ((6036, 6056), 'numpy.squeeze', 'np.squeeze', (['(we.T @ r)'], {}), '(we.T @ r)\n', (6046, 6056), True, 'import numpy as np\n'), ((6084, 6110), 'numpy.squeeze', 'np.squeeze', (['(we.T @ net_out)'], {}), '(we.T @ net_out)\n', (6094, 6110), True, 'import numpy as np\n'), ((6136, 6157), 'numpy.squeeze', 'np.squeeze', (['(wmv.T @ r)'], {}), '(wmv.T @ r)\n', (6146, 6157), True, 'import numpy as np\n'), ((6186, 6213), 'numpy.squeeze', 'np.squeeze', (['(wmv.T @ net_out)'], {}), '(wmv.T @ net_out)\n', (6196, 6213), True, 'import numpy as np\n'), ((6336, 6360), 'numpy.squeeze', 'np.squeeze', (['(w_capm.T @ r)'], {}), '(w_capm.T @ r)\n', (6346, 6360), True, 'import numpy as np\n'), ((6397, 6427), 'numpy.squeeze', 'np.squeeze', (['(w_capm.T @ net_out)'], {}), '(w_capm.T @ net_out)\n', (6407, 6427), True, 'import numpy as np\n'), ((7347, 7357), 'numpy.sqrt', 'np.sqrt', (['(4)'], {}), '(4)\n', (7354, 7357), True, 'import numpy as np\n')]
|
"""Experiments and corresponding analysis.
format adapted from https://github.com/gyyang/olfaction_evolution
Each experiment is described by a function that returns a list of configurations
function name is the experiment name
combinatorial mode:
config_ranges should not have repetitive values
sequential mode:
config_ranges values should have equal length,
otherwise this will only loop through the shortest one
control mode:
base_config must contain keys in config_ranges
"""
import os
import copy
from collections import OrderedDict
import logging
import numpy as np
from configs.config_global import ROOT_DIR, LOG_LEVEL
from configs.configs import BaseConfig
from utils.config_utils import vary_config
from analysis.train_analysis import plot_train_log
import evaluate
from analysis import plots
def init_analysis(configs_):
logging.basicConfig(level=LOG_LEVEL)
exp_name = configs_[0].experiment_name
print('Analyzing ' + exp_name)
exp_path = os.path.join(ROOT_DIR, 'experiments', exp_name) + os.sep
plot_train_log([exp_path], exp_name=exp_name)
# -----------------------------------------------------
# experiments
# -----------------------------------------------------
def timescale():
config = BaseConfig()
config.experiment_name = 'timescale'
config.rnn_type = 'plainRNN'
config.t_scale = 1.0
config.augment = None
config.use_velocity = False
config.context = None
config.context_w = 10
config.hidden_size = 64
config.num_ep = 40
config_ranges = OrderedDict()
config_ranges['rnn_type'] = ['plainRNN',
'CTRNN',
'LSTM',
'GRU',
'RNNSTSP']
configs = vary_config(config, config_ranges, mode='combinatorial')
return configs
def timescale_aug():
config = BaseConfig()
config.experiment_name = 'timescale_aug'
config.rnn_type = 'plainRNN'
config.t_scale = 1.0
config.augment = (0.5, 1.5)
config.use_velocity = False
config.context = None
config.context_w = 10
config.hidden_size = 64
config.num_ep = 40
config_ranges = OrderedDict()
config_ranges['rnn_type'] = ['plainRNN',
'CTRNN',
'LSTM',
'GRU',
'RNNSTSP']
configs = vary_config(config, config_ranges, mode='combinatorial')
return configs
def timecode():
config = BaseConfig()
config.experiment_name = 'timecode'
config.rnn_type = 'plainRNN'
config.t_scale = 1.0
config.augment = None
config.use_velocity = False
config.context = 'zero'
config.context_w = 10
config.hidden_size = 64
config.num_ep = 40
config_ranges = OrderedDict()
config_ranges['context'] = ['zero', 'noise', 'scalar', 'ramping',
'clock', 'stairs_end', 'stairs_start']
configs = vary_config(config, config_ranges, mode='combinatorial')
return configs
def timecode_aug():
config = BaseConfig()
config.experiment_name = 'timecode_aug'
config.rnn_type = 'plainRNN'
config.t_scale = 1.0
config.augment = (0.5, 1.5)
config.use_velocity = False
config.context = 'zero'
config.context_w = 10
config.hidden_size = 64
config.num_ep = 40
config_ranges = OrderedDict()
config_ranges['context'] = ['zero', 'noise', 'scalar', 'ramping',
'clock', 'stairs_end', 'stairs_start']
configs = vary_config(config, config_ranges, mode='combinatorial')
return configs
# -----------------------------------------------------
# analysis
# -----------------------------------------------------
def timescale_analysis():
configs = timescale()
init_analysis(configs)
t_scale_list = np.arange(0.1, 2, 0.1)
acc_list = np.zeros_like(t_scale_list)
for cfg in configs:
for i_s, t_scale in enumerate(t_scale_list):
new_cfg = copy.deepcopy(cfg)
new_cfg.t_scale = t_scale
acc_list[i_s] = evaluate.eval_total_acc(new_cfg)
np.save(os.path.join(cfg.save_path, 'tscalelist.npy'), t_scale_list)
np.save(os.path.join(cfg.save_path, 'acclist.npy'), acc_list)
plots.plot_gen(t_scale_list, acc_list, cfg.rnn_type)
plots.plot_group_gen(configs, configs[0].experiment_name, mode='rnn_type')
def timescale_aug_analysis():
configs = timescale_aug()
init_analysis(configs)
t_scale_list = np.arange(0.1, 2, 0.1)
acc_list = np.zeros_like(t_scale_list)
for cfg in configs:
for i_s, t_scale in enumerate(t_scale_list):
new_cfg = copy.deepcopy(cfg)
new_cfg.t_scale = t_scale
acc_list[i_s] = evaluate.eval_total_acc(new_cfg)
np.save(os.path.join(cfg.save_path, 'tscalelist.npy'), t_scale_list)
np.save(os.path.join(cfg.save_path, 'acclist.npy'), acc_list)
plots.plot_gen(t_scale_list, acc_list, cfg.rnn_type)
plots.plot_group_gen(configs, configs[0].experiment_name, mode='rnn_type')
def timecode_analysis():
configs = timecode()
init_analysis(configs)
t_scale_list = np.arange(0.1, 2, 0.1)
acc_list = np.zeros_like(t_scale_list)
for cfg in configs:
for i_s, t_scale in enumerate(t_scale_list):
new_cfg = copy.deepcopy(cfg)
new_cfg.t_scale = t_scale
acc_list[i_s] = evaluate.eval_total_acc(new_cfg)
np.save(os.path.join(cfg.save_path, 'tscalelist.npy'), t_scale_list)
np.save(os.path.join(cfg.save_path, 'acclist.npy'), acc_list)
plots.plot_gen(t_scale_list, acc_list, cfg.context)
plots.plot_group_gen(configs, configs[0].experiment_name, mode='context')
def timecode_aug_analysis():
configs = timecode_aug()
init_analysis(configs)
t_scale_list = np.arange(0.1, 2, 0.1)
acc_list = np.zeros_like(t_scale_list)
for cfg in configs:
for i_s, t_scale in enumerate(t_scale_list):
new_cfg = copy.deepcopy(cfg)
new_cfg.t_scale = t_scale
acc_list[i_s] = evaluate.eval_total_acc(new_cfg)
np.save(os.path.join(cfg.save_path, 'tscalelist.npy'), t_scale_list)
np.save(os.path.join(cfg.save_path, 'acclist.npy'), acc_list)
plots.plot_gen(t_scale_list, acc_list, cfg.context)
plots.plot_group_gen(configs, configs[0].experiment_name, mode='context')
|
[
"utils.config_utils.vary_config",
"copy.deepcopy",
"numpy.zeros_like",
"analysis.plots.plot_gen",
"os.path.join",
"logging.basicConfig",
"evaluate.eval_total_acc",
"numpy.arange",
"analysis.train_analysis.plot_train_log",
"collections.OrderedDict",
"configs.configs.BaseConfig",
"analysis.plots.plot_group_gen"
] |
[((857, 893), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'LOG_LEVEL'}), '(level=LOG_LEVEL)\n', (876, 893), False, 'import logging\n'), ((1048, 1093), 'analysis.train_analysis.plot_train_log', 'plot_train_log', (['[exp_path]'], {'exp_name': 'exp_name'}), '([exp_path], exp_name=exp_name)\n', (1062, 1093), False, 'from analysis.train_analysis import plot_train_log\n'), ((1254, 1266), 'configs.configs.BaseConfig', 'BaseConfig', ([], {}), '()\n', (1264, 1266), False, 'from configs.configs import BaseConfig\n'), ((1549, 1562), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1560, 1562), False, 'from collections import OrderedDict\n'), ((1789, 1845), 'utils.config_utils.vary_config', 'vary_config', (['config', 'config_ranges'], {'mode': '"""combinatorial"""'}), "(config, config_ranges, mode='combinatorial')\n", (1800, 1845), False, 'from utils.config_utils import vary_config\n'), ((1901, 1913), 'configs.configs.BaseConfig', 'BaseConfig', ([], {}), '()\n', (1911, 1913), False, 'from configs.configs import BaseConfig\n'), ((2206, 2219), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2217, 2219), False, 'from collections import OrderedDict\n'), ((2446, 2502), 'utils.config_utils.vary_config', 'vary_config', (['config', 'config_ranges'], {'mode': '"""combinatorial"""'}), "(config, config_ranges, mode='combinatorial')\n", (2457, 2502), False, 'from utils.config_utils import vary_config\n'), ((2553, 2565), 'configs.configs.BaseConfig', 'BaseConfig', ([], {}), '()\n', (2563, 2565), False, 'from configs.configs import BaseConfig\n'), ((2849, 2862), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2860, 2862), False, 'from collections import OrderedDict\n'), ((3018, 3074), 'utils.config_utils.vary_config', 'vary_config', (['config', 'config_ranges'], {'mode': '"""combinatorial"""'}), "(config, config_ranges, mode='combinatorial')\n", (3029, 3074), False, 'from utils.config_utils import vary_config\n'), ((3129, 3141), 'configs.configs.BaseConfig', 'BaseConfig', ([], {}), '()\n', (3139, 3141), False, 'from configs.configs import BaseConfig\n'), ((3435, 3448), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3446, 3448), False, 'from collections import OrderedDict\n'), ((3604, 3660), 'utils.config_utils.vary_config', 'vary_config', (['config', 'config_ranges'], {'mode': '"""combinatorial"""'}), "(config, config_ranges, mode='combinatorial')\n", (3615, 3660), False, 'from utils.config_utils import vary_config\n'), ((3904, 3926), 'numpy.arange', 'np.arange', (['(0.1)', '(2)', '(0.1)'], {}), '(0.1, 2, 0.1)\n', (3913, 3926), True, 'import numpy as np\n'), ((3942, 3969), 'numpy.zeros_like', 'np.zeros_like', (['t_scale_list'], {}), '(t_scale_list)\n', (3955, 3969), True, 'import numpy as np\n'), ((4400, 4474), 'analysis.plots.plot_group_gen', 'plots.plot_group_gen', (['configs', 'configs[0].experiment_name'], {'mode': '"""rnn_type"""'}), "(configs, configs[0].experiment_name, mode='rnn_type')\n", (4420, 4474), False, 'from analysis import plots\n'), ((4583, 4605), 'numpy.arange', 'np.arange', (['(0.1)', '(2)', '(0.1)'], {}), '(0.1, 2, 0.1)\n', (4592, 4605), True, 'import numpy as np\n'), ((4621, 4648), 'numpy.zeros_like', 'np.zeros_like', (['t_scale_list'], {}), '(t_scale_list)\n', (4634, 4648), True, 'import numpy as np\n'), ((5079, 5153), 'analysis.plots.plot_group_gen', 'plots.plot_group_gen', (['configs', 'configs[0].experiment_name'], {'mode': '"""rnn_type"""'}), "(configs, configs[0].experiment_name, mode='rnn_type')\n", (5099, 5153), False, 'from analysis import plots\n'), ((5252, 5274), 'numpy.arange', 'np.arange', (['(0.1)', '(2)', '(0.1)'], {}), '(0.1, 2, 0.1)\n', (5261, 5274), True, 'import numpy as np\n'), ((5290, 5317), 'numpy.zeros_like', 'np.zeros_like', (['t_scale_list'], {}), '(t_scale_list)\n', (5303, 5317), True, 'import numpy as np\n'), ((5747, 5820), 'analysis.plots.plot_group_gen', 'plots.plot_group_gen', (['configs', 'configs[0].experiment_name'], {'mode': '"""context"""'}), "(configs, configs[0].experiment_name, mode='context')\n", (5767, 5820), False, 'from analysis import plots\n'), ((5927, 5949), 'numpy.arange', 'np.arange', (['(0.1)', '(2)', '(0.1)'], {}), '(0.1, 2, 0.1)\n', (5936, 5949), True, 'import numpy as np\n'), ((5965, 5992), 'numpy.zeros_like', 'np.zeros_like', (['t_scale_list'], {}), '(t_scale_list)\n', (5978, 5992), True, 'import numpy as np\n'), ((6422, 6495), 'analysis.plots.plot_group_gen', 'plots.plot_group_gen', (['configs', 'configs[0].experiment_name'], {'mode': '"""context"""'}), "(configs, configs[0].experiment_name, mode='context')\n", (6442, 6495), False, 'from analysis import plots\n'), ((987, 1034), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""experiments"""', 'exp_name'], {}), "(ROOT_DIR, 'experiments', exp_name)\n", (999, 1034), False, 'import os\n'), ((4343, 4395), 'analysis.plots.plot_gen', 'plots.plot_gen', (['t_scale_list', 'acc_list', 'cfg.rnn_type'], {}), '(t_scale_list, acc_list, cfg.rnn_type)\n', (4357, 4395), False, 'from analysis import plots\n'), ((5022, 5074), 'analysis.plots.plot_gen', 'plots.plot_gen', (['t_scale_list', 'acc_list', 'cfg.rnn_type'], {}), '(t_scale_list, acc_list, cfg.rnn_type)\n', (5036, 5074), False, 'from analysis import plots\n'), ((5691, 5742), 'analysis.plots.plot_gen', 'plots.plot_gen', (['t_scale_list', 'acc_list', 'cfg.context'], {}), '(t_scale_list, acc_list, cfg.context)\n', (5705, 5742), False, 'from analysis import plots\n'), ((6366, 6417), 'analysis.plots.plot_gen', 'plots.plot_gen', (['t_scale_list', 'acc_list', 'cfg.context'], {}), '(t_scale_list, acc_list, cfg.context)\n', (6380, 6417), False, 'from analysis import plots\n'), ((4069, 4087), 'copy.deepcopy', 'copy.deepcopy', (['cfg'], {}), '(cfg)\n', (4082, 4087), False, 'import copy\n'), ((4154, 4186), 'evaluate.eval_total_acc', 'evaluate.eval_total_acc', (['new_cfg'], {}), '(new_cfg)\n', (4177, 4186), False, 'import evaluate\n'), ((4204, 4249), 'os.path.join', 'os.path.join', (['cfg.save_path', '"""tscalelist.npy"""'], {}), "(cfg.save_path, 'tscalelist.npy')\n", (4216, 4249), False, 'import os\n'), ((4281, 4323), 'os.path.join', 'os.path.join', (['cfg.save_path', '"""acclist.npy"""'], {}), "(cfg.save_path, 'acclist.npy')\n", (4293, 4323), False, 'import os\n'), ((4748, 4766), 'copy.deepcopy', 'copy.deepcopy', (['cfg'], {}), '(cfg)\n', (4761, 4766), False, 'import copy\n'), ((4833, 4865), 'evaluate.eval_total_acc', 'evaluate.eval_total_acc', (['new_cfg'], {}), '(new_cfg)\n', (4856, 4865), False, 'import evaluate\n'), ((4883, 4928), 'os.path.join', 'os.path.join', (['cfg.save_path', '"""tscalelist.npy"""'], {}), "(cfg.save_path, 'tscalelist.npy')\n", (4895, 4928), False, 'import os\n'), ((4960, 5002), 'os.path.join', 'os.path.join', (['cfg.save_path', '"""acclist.npy"""'], {}), "(cfg.save_path, 'acclist.npy')\n", (4972, 5002), False, 'import os\n'), ((5417, 5435), 'copy.deepcopy', 'copy.deepcopy', (['cfg'], {}), '(cfg)\n', (5430, 5435), False, 'import copy\n'), ((5502, 5534), 'evaluate.eval_total_acc', 'evaluate.eval_total_acc', (['new_cfg'], {}), '(new_cfg)\n', (5525, 5534), False, 'import evaluate\n'), ((5552, 5597), 'os.path.join', 'os.path.join', (['cfg.save_path', '"""tscalelist.npy"""'], {}), "(cfg.save_path, 'tscalelist.npy')\n", (5564, 5597), False, 'import os\n'), ((5629, 5671), 'os.path.join', 'os.path.join', (['cfg.save_path', '"""acclist.npy"""'], {}), "(cfg.save_path, 'acclist.npy')\n", (5641, 5671), False, 'import os\n'), ((6092, 6110), 'copy.deepcopy', 'copy.deepcopy', (['cfg'], {}), '(cfg)\n', (6105, 6110), False, 'import copy\n'), ((6177, 6209), 'evaluate.eval_total_acc', 'evaluate.eval_total_acc', (['new_cfg'], {}), '(new_cfg)\n', (6200, 6209), False, 'import evaluate\n'), ((6227, 6272), 'os.path.join', 'os.path.join', (['cfg.save_path', '"""tscalelist.npy"""'], {}), "(cfg.save_path, 'tscalelist.npy')\n", (6239, 6272), False, 'import os\n'), ((6304, 6346), 'os.path.join', 'os.path.join', (['cfg.save_path', '"""acclist.npy"""'], {}), "(cfg.save_path, 'acclist.npy')\n", (6316, 6346), False, 'import os\n')]
|
from pathlib import Path
import numpy as np
from PIL import Image
def load_light_distribution(name="lamp_spectrum.csv"):
sd_light_source = np.loadtxt(name, skiprows=1, dtype="float")
sd_light_source = sd_light_source[np.where(sd_light_source[:, 0] >= 400)]
# rindx = np.where(sd_light_source[:, 0] >= 400) and np.where(sd_light_source[:, 0] <= 600)
sd_light_source = sd_light_source[:, 1:2]
# print("sum", np.sum(sd_light_source))
sd_light_source = sd_light_source[::20]
sd_light_source = sd_light_source[:44]
# print(sd_light_source.shape)
return sd_light_source
def load_illuminantA(name="A.csv"):
sd_light_source = np.loadtxt(name, skiprows=1, dtype="float")
sd_light_source = sd_light_source[np.where(sd_light_source[:, 0] >= 400)]
sd_light_source = sd_light_source[:, 1:2]
# print("sum",np.sum(sd_light_source))
# sd_light_source = sd_light_source / np.max(sd_light_source)
sd_light_source = sd_light_source[::2]
sd_light_source = sd_light_source[:44]
# print(sd_light_source)
return sd_light_source
def hsi_to_ci31931_rgb(himg, dist_name):
pass
def hsi_to_rgb(himg, dist_name):
"""
input: ハイパースペクトル画像 HSI(numpy型)
return: RGB画像(Image objedct)
"""
# 計測時にノイズとして負の値になった値を0にする
np.where(himg < 0, 0, himg)
cmf = np.loadtxt("./csvs/CIE1931-2deg-XYZ.csv", delimiter=",")
# HSIが400nm以上のため等色関数も400nm以上のみを利用
cmf = cmf[np.where(cmf[:, 0] >= 400)]
# 光源の分光分布の5nm刻みをHSIと同じ10nm刻みに変更
cmf = cmf[::2]
cmf = cmf[:44, :]
stem = Path(dist_name).stem
if stem in ["A"]:
# 標準光源Aは780nmまでを可視光としているため,HSIと等色関数も780nmm以下に制限
nhimg = himg[:, :, :39]
cmf = cmf[:39, :]
sd_light_source = load_illuminantA(name=dist_name)
elif stem in ["D65"]:
nhimg = himg[:, :, :44]
cmf = cmf[:44, :]
sd_light_source = load_illuminantA(name=dist_name)
else:
nhimg = himg[:, :, :44]
sd_light_source = load_light_distribution(name=dist_name)
flag_const_100 = False
ncmf = cmf[:, 1:]
nmf_multi_ld = ncmf * sd_light_source
x = nmf_multi_ld[:, 0]
y = nmf_multi_ld[:, 1]
z = nmf_multi_ld[:, 2]
if flag_const_100:
k = 100 / np.sum(y)
# print(np.sum(y))
else:
k = 1 / np.sum(y)
# print(np.sum(y))
X = np.sum(x * nhimg, axis=2)
Y = np.sum(y * nhimg, axis=2)
Z = np.sum(z * nhimg, axis=2)
XYZ = np.stack([X, Y, Z], 2)
# print(np.max(XYZ), np.min(XYZ))
# print(np.max(Y*k), np.min(Y*k))
XYZ = XYZ * k
XYZ.shape
xyz_to_r = np.array([3.2406255, -1.537208, -0.4986286])
r = np.dot(XYZ, xyz_to_r)
xyz_to_g = np.array([-0.9689307, 1.8757561, 0.0415175])
g = np.dot(XYZ, xyz_to_g)
xyz_to_b = np.array([0.0557101, -0.2040211, 1.0569959])
b = np.dot(XYZ, xyz_to_b)
rgb_img2 = np.stack([r, g, b], axis=2)
rgb_img2 = np.where(rgb_img2 < 0, 0, rgb_img2)
if flag_const_100:
# HSI画像配布元と同じガンマ補正(ガンマ=0.6)をする
# print(np.max(rgb_img2))
rgb_img2 = np.power(rgb_img2/255, 0.6)
else:
# XYZからsRGBへのレンダリングするためのガンマ補正
# print(np.max(255*rgb_img2))
rgb_img2 = np.where(rgb_img2 <= 0.0031308, 12.92 * rgb_img2, 1.055 * np.power(rgb_img2, 1/2.4) - 0.055)
rgb_img2 = np.clip(rgb_img2, 0, 1)
if flag_const_100:
img = Image.fromarray(np.uint8(255*rgb_img2))
else:
img = Image.fromarray(np.uint8(255*rgb_img2))
return img
|
[
"numpy.stack",
"numpy.uint8",
"numpy.sum",
"numpy.power",
"numpy.clip",
"pathlib.Path",
"numpy.where",
"numpy.array",
"numpy.loadtxt",
"numpy.dot"
] |
[((146, 189), 'numpy.loadtxt', 'np.loadtxt', (['name'], {'skiprows': '(1)', 'dtype': '"""float"""'}), "(name, skiprows=1, dtype='float')\n", (156, 189), True, 'import numpy as np\n'), ((663, 706), 'numpy.loadtxt', 'np.loadtxt', (['name'], {'skiprows': '(1)', 'dtype': '"""float"""'}), "(name, skiprows=1, dtype='float')\n", (673, 706), True, 'import numpy as np\n'), ((1287, 1314), 'numpy.where', 'np.where', (['(himg < 0)', '(0)', 'himg'], {}), '(himg < 0, 0, himg)\n', (1295, 1314), True, 'import numpy as np\n'), ((1326, 1382), 'numpy.loadtxt', 'np.loadtxt', (['"""./csvs/CIE1931-2deg-XYZ.csv"""'], {'delimiter': '""","""'}), "('./csvs/CIE1931-2deg-XYZ.csv', delimiter=',')\n", (1336, 1382), True, 'import numpy as np\n'), ((2341, 2366), 'numpy.sum', 'np.sum', (['(x * nhimg)'], {'axis': '(2)'}), '(x * nhimg, axis=2)\n', (2347, 2366), True, 'import numpy as np\n'), ((2375, 2400), 'numpy.sum', 'np.sum', (['(y * nhimg)'], {'axis': '(2)'}), '(y * nhimg, axis=2)\n', (2381, 2400), True, 'import numpy as np\n'), ((2409, 2434), 'numpy.sum', 'np.sum', (['(z * nhimg)'], {'axis': '(2)'}), '(z * nhimg, axis=2)\n', (2415, 2434), True, 'import numpy as np\n'), ((2445, 2467), 'numpy.stack', 'np.stack', (['[X, Y, Z]', '(2)'], {}), '([X, Y, Z], 2)\n', (2453, 2467), True, 'import numpy as np\n'), ((2591, 2635), 'numpy.array', 'np.array', (['[3.2406255, -1.537208, -0.4986286]'], {}), '([3.2406255, -1.537208, -0.4986286])\n', (2599, 2635), True, 'import numpy as np\n'), ((2644, 2665), 'numpy.dot', 'np.dot', (['XYZ', 'xyz_to_r'], {}), '(XYZ, xyz_to_r)\n', (2650, 2665), True, 'import numpy as np\n'), ((2681, 2725), 'numpy.array', 'np.array', (['[-0.9689307, 1.8757561, 0.0415175]'], {}), '([-0.9689307, 1.8757561, 0.0415175])\n', (2689, 2725), True, 'import numpy as np\n'), ((2734, 2755), 'numpy.dot', 'np.dot', (['XYZ', 'xyz_to_g'], {}), '(XYZ, xyz_to_g)\n', (2740, 2755), True, 'import numpy as np\n'), ((2771, 2815), 'numpy.array', 'np.array', (['[0.0557101, -0.2040211, 1.0569959]'], {}), '([0.0557101, -0.2040211, 1.0569959])\n', (2779, 2815), True, 'import numpy as np\n'), ((2824, 2845), 'numpy.dot', 'np.dot', (['XYZ', 'xyz_to_b'], {}), '(XYZ, xyz_to_b)\n', (2830, 2845), True, 'import numpy as np\n'), ((2861, 2888), 'numpy.stack', 'np.stack', (['[r, g, b]'], {'axis': '(2)'}), '([r, g, b], axis=2)\n', (2869, 2888), True, 'import numpy as np\n'), ((2905, 2940), 'numpy.where', 'np.where', (['(rgb_img2 < 0)', '(0)', 'rgb_img2'], {}), '(rgb_img2 < 0, 0, rgb_img2)\n', (2913, 2940), True, 'import numpy as np\n'), ((3297, 3320), 'numpy.clip', 'np.clip', (['rgb_img2', '(0)', '(1)'], {}), '(rgb_img2, 0, 1)\n', (3304, 3320), True, 'import numpy as np\n'), ((228, 266), 'numpy.where', 'np.where', (['(sd_light_source[:, 0] >= 400)'], {}), '(sd_light_source[:, 0] >= 400)\n', (236, 266), True, 'import numpy as np\n'), ((745, 783), 'numpy.where', 'np.where', (['(sd_light_source[:, 0] >= 400)'], {}), '(sd_light_source[:, 0] >= 400)\n', (753, 783), True, 'import numpy as np\n'), ((1435, 1461), 'numpy.where', 'np.where', (['(cmf[:, 0] >= 400)'], {}), '(cmf[:, 0] >= 400)\n', (1443, 1461), True, 'import numpy as np\n'), ((1552, 1567), 'pathlib.Path', 'Path', (['dist_name'], {}), '(dist_name)\n', (1556, 1567), False, 'from pathlib import Path\n'), ((3056, 3085), 'numpy.power', 'np.power', (['(rgb_img2 / 255)', '(0.6)'], {}), '(rgb_img2 / 255, 0.6)\n', (3064, 3085), True, 'import numpy as np\n'), ((2233, 2242), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (2239, 2242), True, 'import numpy as np\n'), ((2296, 2305), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (2302, 2305), True, 'import numpy as np\n'), ((3374, 3398), 'numpy.uint8', 'np.uint8', (['(255 * rgb_img2)'], {}), '(255 * rgb_img2)\n', (3382, 3398), True, 'import numpy as np\n'), ((3438, 3462), 'numpy.uint8', 'np.uint8', (['(255 * rgb_img2)'], {}), '(255 * rgb_img2)\n', (3446, 3462), True, 'import numpy as np\n'), ((3247, 3274), 'numpy.power', 'np.power', (['rgb_img2', '(1 / 2.4)'], {}), '(rgb_img2, 1 / 2.4)\n', (3255, 3274), True, 'import numpy as np\n')]
|
import pyaudio
import numpy as np
import sys
import time
import asyncio
from aiohttp import web, WSMsgType
import json
import os
import struct
import websocket
HOST = os.getenv('HOST', '0.0.0.0')
PORT = int(os.getenv('PORT', 8080))
SAMPLE_RATE = 44100
CHUNK_SIZE = 4096
AUDIO_FORMAT = pyaudio.paInt16
FORMAT = np.int16
def calculate_levels(data, chunk,sample_rate):
# Apply FFT - real data so rfft used
fourier=np.fft.rfft(data)
# Remove last element in array to make it the same size as chunk
fourier=np.delete(fourier,len(fourier)-1)
#fourier = fourier[0:256]
# Find amplitude
power = np.log10(np.abs(fourier))**2
# Arrange array into 256 rows for the Unicorn HAT HD
power = np.reshape(power,(256,8))
matrix= np.average(power,axis=1)
return list(matrix.astype(int).astype(float))
def calculate_spect(data, chunk):
data_int = struct.unpack(str(2 * chunk) + 'B', data)
yf = np.fft.rfft(data_int)
spect = np.abs(yf[256:512]) / (128 * chunk)
max_v = np.max(spect)
# hist = np.histogram(spect, 256)
return list(spect.astype(float)), max_v.astype(float)
# return list(hist[0].astype(float)), max_v.astype(float)
def audio_analyse(stream):
signal = np.frombuffer(stream.read(CHUNK_SIZE, exception_on_overflow = False), FORMAT)
# levels = calculate_levels(signal, CHUNK_SIZE, SAMPLE_RATE)
levels, max_v = calculate_spect(signal, CHUNK_SIZE)
return json.dumps({'data':levels,'max':max_v})
async def connection_test(request):
return web.Response(text='Connection test')
async def websocket_handler(request):
print('Websocket connection starting')
ws = web.WebSocketResponse()
await ws.prepare(request)
print('Websocket connection ready')
# rgb = audio_analyse(stream)
async for msg in ws:
levels = audio_analyse(stream)
if msg.type == WSMsgType.TEXT:
if msg.data == 'close':
await ws.close()
else:
await ws.send_str(levels)
print('Websocket connection closed')
return ws
def main():
loop = asyncio.get_event_loop()
app = web.Application(loop=loop)
app.router.add_route('GET', '/', connection_test)
app.router.add_route('GET', '/ws', websocket_handler)
web.run_app(app, host=HOST, port=PORT)
if __name__ == '__main__':
p = pyaudio.PyAudio()
stream = p.open(format=AUDIO_FORMAT, channels=1, rate=SAMPLE_RATE, input=True, frames_per_buffer=CHUNK_SIZE)
main()
|
[
"numpy.fft.rfft",
"numpy.average",
"aiohttp.web.Response",
"aiohttp.web.WebSocketResponse",
"asyncio.get_event_loop",
"numpy.abs",
"aiohttp.web.Application",
"json.dumps",
"numpy.max",
"numpy.reshape",
"aiohttp.web.run_app",
"pyaudio.PyAudio",
"os.getenv"
] |
[((169, 197), 'os.getenv', 'os.getenv', (['"""HOST"""', '"""0.0.0.0"""'], {}), "('HOST', '0.0.0.0')\n", (178, 197), False, 'import os\n'), ((209, 232), 'os.getenv', 'os.getenv', (['"""PORT"""', '(8080)'], {}), "('PORT', 8080)\n", (218, 232), False, 'import os\n'), ((424, 441), 'numpy.fft.rfft', 'np.fft.rfft', (['data'], {}), '(data)\n', (435, 441), True, 'import numpy as np\n'), ((718, 745), 'numpy.reshape', 'np.reshape', (['power', '(256, 8)'], {}), '(power, (256, 8))\n', (728, 745), True, 'import numpy as np\n'), ((756, 781), 'numpy.average', 'np.average', (['power'], {'axis': '(1)'}), '(power, axis=1)\n', (766, 781), True, 'import numpy as np\n'), ((932, 953), 'numpy.fft.rfft', 'np.fft.rfft', (['data_int'], {}), '(data_int)\n', (943, 953), True, 'import numpy as np\n'), ((1014, 1027), 'numpy.max', 'np.max', (['spect'], {}), '(spect)\n', (1020, 1027), True, 'import numpy as np\n'), ((1438, 1480), 'json.dumps', 'json.dumps', (["{'data': levels, 'max': max_v}"], {}), "({'data': levels, 'max': max_v})\n", (1448, 1480), False, 'import json\n'), ((1526, 1562), 'aiohttp.web.Response', 'web.Response', ([], {'text': '"""Connection test"""'}), "(text='Connection test')\n", (1538, 1562), False, 'from aiohttp import web, WSMsgType\n'), ((1654, 1677), 'aiohttp.web.WebSocketResponse', 'web.WebSocketResponse', ([], {}), '()\n', (1675, 1677), False, 'from aiohttp import web, WSMsgType\n'), ((2097, 2121), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2119, 2121), False, 'import asyncio\n'), ((2132, 2158), 'aiohttp.web.Application', 'web.Application', ([], {'loop': 'loop'}), '(loop=loop)\n', (2147, 2158), False, 'from aiohttp import web, WSMsgType\n'), ((2275, 2313), 'aiohttp.web.run_app', 'web.run_app', (['app'], {'host': 'HOST', 'port': 'PORT'}), '(app, host=HOST, port=PORT)\n', (2286, 2313), False, 'from aiohttp import web, WSMsgType\n'), ((2351, 2368), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (2366, 2368), False, 'import pyaudio\n'), ((966, 985), 'numpy.abs', 'np.abs', (['yf[256:512]'], {}), '(yf[256:512])\n', (972, 985), True, 'import numpy as np\n'), ((629, 644), 'numpy.abs', 'np.abs', (['fourier'], {}), '(fourier)\n', (635, 644), True, 'import numpy as np\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from functools import partial
import numpy as np
import torch
import torch.nn as nn
import torchvision
from omegaconf import OmegaConf
import hydra
import phyre
from phyre_simulator import PhyreSimulator # pylint: disable=unused-import
from losses import * # pylint: disable=wildcard-import,unused-wildcard-import
from preproc import * # pylint: disable=wildcard-import,unused-wildcard-import
USE_CUDA = torch.cuda.is_available()
DEVICE = torch.device('cuda:0' if USE_CUDA else 'cpu')
np.random.seed(42)
class ActionNetwork(nn.Module):
def __init__(self, action_size, output_size, hidden_size=256,
num_layers=1):
super().__init__()
self.layers = nn.ModuleList([nn.Linear(action_size, hidden_size)])
for _ in range(1, num_layers):
self.layers.append(nn.Linear(hidden_size, hidden_size))
self.output = nn.Linear(hidden_size, output_size)
def forward(self, tensor):
for layer in self.layers:
tensor = nn.functional.relu(layer(tensor), inplace=True)
return self.output(tensor)
class FilmActionNetwork(nn.Module):
def __init__(self, action_size, output_size, **kwargs):
super().__init__()
self.net = ActionNetwork(action_size, output_size * 2, **kwargs)
def forward(self, actions, image):
beta, gamma = torch.chunk(
self.net(actions).unsqueeze(-1).unsqueeze(-1), chunks=2, dim=1)
return image * beta + gamma
class SimpleNetWithAction(nn.Module):
def __init__(self, action_size, action_network_kwargs=None):
super().__init__()
action_network_kwargs = action_network_kwargs or {}
self.stem = nn.Sequential(
nn.Conv2d(phyre.NUM_COLORS, 3, kernel_size=1, bias=False),
nn.BatchNorm2d(3),
nn.ReLU(inplace=True),
nn.Conv2d(3, 64, kernel_size=7, stride=4, padding=3, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=5, stride=2, padding=2, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=5, stride=2, padding=2, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=5, stride=2, padding=2, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 128, kernel_size=5, stride=2, padding=2, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=5, stride=2, padding=2,
bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=5, stride=2, padding=2,
bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
)
self.action_net = ActionNetwork(action_size, 128,
**action_network_kwargs)
@property
def device(self):
if hasattr(self, 'parameters') and next(self.parameters()).is_cuda:
return 'cuda'
else:
return 'cpu'
def preprocess(self, observations):
device = self.device
image = _image_colors_to_onehot(
observations.to(dtype=torch.long, device=device))
return dict(features=self.stem(image).squeeze(-1).squeeze(-1))
def forward(self, observations, actions, preprocessed=None):
if preprocessed is None:
preprocessed = self.preprocess(observations)
return self._forward(actions, **preprocessed)
def _forward(self, actions, features):
actions = self.action_net(actions.to(features.device))
return (actions * features).sum(-1) / (actions.shape[-1]**0.5)
def ce_loss(self, decisions, targets):
targets = torch.ByteTensor(targets).float().to(decisions.device)
return nn.functional.binary_cross_entropy_with_logits(
decisions, targets)
def _get_fusution_points(fusion_place_spec, max_points):
if fusion_place_spec == 'all':
return tuple(range(max_points))
elif fusion_place_spec == 'none':
return tuple()
else:
return tuple(int(fusion_place_spec), )
class ResNet18FilmAction(nn.Module):
def __init__(self,
action_size,
action_layers=1,
action_hidden_size=256,
fusion_place='last'):
super().__init__()
net = torchvision.models.resnet18(pretrained=False)
conv1 = nn.Conv2d(phyre.NUM_COLORS,
64,
kernel_size=7,
stride=2,
padding=3,
bias=False)
self.register_buffer('embed_weights', torch.eye(phyre.NUM_COLORS))
self.stem = nn.Sequential(conv1, net.bn1, net.relu, net.maxpool)
self.stages = nn.ModuleList(
[net.layer1, net.layer2, net.layer3, net.layer4])
def build_film(output_size):
return FilmActionNetwork(action_size,
output_size,
hidden_size=action_hidden_size,
num_layers=action_layers)
assert fusion_place in ('first', 'last', 'all', 'none', 'last_single')
self.last_network = None
if fusion_place == 'all':
self.action_networks = nn.ModuleList(
[build_film(size) for size in (64, 64, 128, 256)])
elif fusion_place == 'last':
# Save module as attribute.
self._action_network = build_film(256)
self.action_networks = [None, None, None, self._action_network]
elif fusion_place == 'first':
# Save module as attribute.
self._action_network = build_film(64)
self.action_networks = [self._action_network, None, None, None]
elif fusion_place == 'last_single':
# Save module as attribute.
self.last_network = build_film(512)
self.action_networks = [None, None, None, None]
elif fusion_place == 'none':
self.action_networks = [None, None, None, None]
else:
raise Exception('Unknown fusion place: %s' % fusion_place)
self.reason = nn.Linear(512, 1)
@property
def device(self):
if hasattr(self, 'parameters') and next(self.parameters()).is_cuda:
return 'cuda'
else:
return 'cpu'
def preprocess(self, observations):
image = self._image_colors_to_onehot(observations)
features = self.stem(image)
for stage, act_layer in zip(self.stages, self.action_networks):
if act_layer is not None:
break
features = stage(features)
else:
features = nn.functional.adaptive_max_pool2d(features, 1)
return dict(features=features)
def forward(self, observations, actions, preprocessed=None):
if preprocessed is None:
preprocessed = self.preprocess(observations)
return self._forward(actions, **preprocessed)
def _forward(self, actions, features):
actions = actions.to(features.device)
skip_compute = True
for stage, film_layer in zip(self.stages, self.action_networks):
if film_layer is not None:
skip_compute = False
features = film_layer(actions, features)
if skip_compute:
continue
features = stage(features)
if not skip_compute:
features = nn.functional.adaptive_max_pool2d(features, 1)
if self.last_network is not None:
features = self.last_network(actions, features)
features = features.flatten(1)
if features.shape[0] == 1 and actions.shape[0] != 1:
# Haven't had a chance to use actions. So will match batch size as
# in actions manually.
features = features.expand(actions.shape[0], -1)
return self.reason(features).squeeze(-1)
def ce_loss(self, decisions, targets):
targets = targets.to(dtype=torch.float, device=decisions.device)
return nn.functional.binary_cross_entropy_with_logits(
decisions, targets)
def _image_colors_to_onehot(self, indices):
onehot = torch.nn.functional.embedding(
indices.to(dtype=torch.long, device=self.embed_weights.device),
self.embed_weights)
onehot = onehot.permute(0, 3, 1, 2).contiguous()
return onehot
def _image_colors_to_onehot(indices):
onehot = torch.nn.functional.embedding(
indices, torch.eye(phyre.NUM_COLORS, device=indices.device))
onehot = onehot.permute(0, 3, 1, 2).contiguous()
return onehot
def gen_dyn_conv(dim_in, dim_out):
# Switched to 1x1 kernels since I might be running it on 1x1 features too.
# Using vector features when using object representation
conv = nn.Conv2d(dim_in,
dim_out,
kernel_size=1,
stride=1,
padding=0,
bias=False)
return conv
class DynConcat(nn.Module):
"""Simple dynamics model, that concats the features and 2 layer MLP."""
def __init__(self, encoder, dim, n, nobj):
super().__init__()
del encoder # This one doesn't need it
self.dyn = nn.Sequential(gen_dyn_conv(dim * n * nobj, dim * nobj),
nn.ReLU(inplace=True),
gen_dyn_conv(dim * nobj, dim * nobj),
nn.ReLU(inplace=True),
gen_dyn_conv(dim * nobj, dim * nobj))
def forward(self, features, pixels):
"""
This dyn model does not use pixels, so will just return the last history
frame
Args:
features: (B, T, Nobj, D, H', W')
pixels: (B, T, Nobj, C, H, W)
Returns:
pred: (B, Nobj, D, H', W')
pixels: (B, Nobj, C, H, W)
addl_losses: {}
"""
cat_feats = torch.reshape(features, (features.shape[0], -1) +
features.shape[-2:])
future_feat = torch.reshape(self.dyn(cat_feats),
features.shape[:1] + features.shape[2:])
# Skip connection, add the last frames features, so it stops
# deleting things
pred = features[:, -1, ...] + future_feat
return pred, pixels[:, -1, ...], {}
class MultiSTN(nn.Module):
"""Multi spatial transformer network: predicts multiple transformations
and applies to parts of the input feature, split on the channel dim."""
def __init__(self,
input_dim,
num_tx,
dof='affine',
inp_type='pix',
affine_tx_mode='bilinear',
kernel_size=3,
stochastic=False):
"""
Args:
input_dim (int): Dimension of the features used to predict the STN
parameters
num_tx (int): Number of transformations to predict, will apply to
the tensor, split along some dimension
dof (str): Controls how generic of a affine matrix to predict.
If 'affine', will predict a generic 3x2 matrix
If 'rot-trans-only', it will only predict theta, x, y,
and use those to construct the affine matrix. So it will force
the matrix to not do any shear, scale etc.
Similarly for 'rot-only' and 'trans-only'
inp_type (str): Defines the type of the input. 'pix' is the default,
to directly transform the grid and move the pixels. 'pt' is the
PointNet style format, where the first 2 dimensions of each
split of the channels must correspond to the X, Y location, and
the transforms will just modify those dimensions, and not
touch the pixel values at all.
affine_tx_mode (str): The mode to use for grid_sample
kernel_size (int)
stochastic (bool): If true, predict a distribution over the affine
matrix, instead of deterministically.
"""
super().__init__()
self.num_tx = num_tx
self.dof = dof
self.inp_type = inp_type
self.affine_tx_mode = affine_tx_mode
# Spatial transformer localization-network
self.localization = nn.Sequential(
nn.Conv2d(input_dim,
8 * num_tx,
kernel_size=kernel_size,
padding=kernel_size // 2), nn.ReLU(True),
nn.Conv2d(8 * num_tx,
10 * num_tx,
kernel_size=kernel_size,
padding=kernel_size // 2), nn.ReLU(True))
# Regressor for the affine matrices
# Predicting 3x2 parameters that should be enough for any generic
# affine transformation, though will subselect in case only few
# parameters are needed
self.stochastic = stochastic
if self.stochastic:
self.fc_loc_mean = nn.Linear(10 * num_tx, 10 * num_tx)
self.fc_loc_logvar = nn.Linear(10 * num_tx, 10 * num_tx)
self.fc_loc = nn.Sequential(nn.Linear(10 * num_tx, 32 * num_tx),
nn.ReLU(True),
nn.Linear(32 * num_tx, num_tx * 3 * 2))
# Initialize the weights/bias with identity transformation
self.fc_loc[2].weight.data.zero_()
if self.dof != 'affine': # The paramters would be used for rot/trans
self.fc_loc[2].bias.data.zero_() # 0 rot/translation by default
else:
self.fc_loc[2].bias.data.copy_(
torch.from_numpy(
np.array([1, 0, 0, 0, 1, 0] * num_tx, dtype=np.float)))
def transform_pix(self, feat, theta, mode='bilinear'):
"""Transform the features using theta."""
grid = nn.functional.affine_grid(theta,
feat.size(),
align_corners=True)
return nn.functional.grid_sample(feat,
grid,
mode=mode,
align_corners=True)
def transform_pt(self, feat, theta):
"""Transform pt-net style feature using theta.
Here, it assumes the first 2 dimensions of the feature are loc.
Args:
feat (B, C, H, W), C >= 2
Returns:
tx feat (B, C, H, W)
"""
assert feat.shape[1] >= 2
feat_pos = feat[:, :2, ...]
feat_pos_ones = torch.ones_like(feat[:, :1, ...])
feat_pos_aug = torch.cat([feat_pos, feat_pos_ones], dim=1)
feat_pos_aug = feat_pos_aug.view(feat.shape[:1] + (3, -1))
feat_pos_aug_end = feat_pos_aug.transpose(1, 2).unsqueeze(-1)
txed = torch.matmul(theta.unsqueeze(1), feat_pos_aug_end)
tx_feat_pos = txed.squeeze(-1).transpose(1, 2).view(feat_pos.shape)
# Attach the features to it
tx_feat = torch.cat([tx_feat_pos, feat[:, 2:, ...]], dim=1)
return tx_feat
def _compute_loc_stochastic(self, feat_hist):
# from https://github.com/pytorch/examples/blob/master/vae/main.py#L53
mean = self.fc_loc_mean(feat_hist)
logvar = self.fc_loc_logvar(feat_hist)
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
latent_var_z = mean + eps * std
kl_loss = -0.5 * torch.sum(1 + logvar - mean.pow(2) - logvar.exp())
return self.fc_loc(latent_var_z), kl_loss
def forward(self, feat_for_tx, feat_to_tx, split_dim=1):
"""
Args:
feat_for_tx (B, D, H, W): The features to use to compute the
transformation
feat_to_tx (B, D', H, W): Features to apply the tx onto
split_dim (int): Dimension to split on
"""
feat_hist_embed = self.localization(feat_for_tx)
# Average out the spatial dimension
feat_hist_embed = torch.mean(feat_hist_embed, dim=[-2, -1])
addl_losses = {}
if self.stochastic:
pred, kl_loss = self._compute_loc_stochastic(feat_hist_embed)
addl_losses['kl'] = kl_loss
else:
pred = self.fc_loc(feat_hist_embed)
if self.dof != 'affine':
pred = pred.view(-1, self.num_tx, 3 * 2)
# Say the first number is actual angle, and next 2 are x, y
angle = pred[..., :1]
pos_x = pred[..., 1:2]
pos_y = pred[..., 2:3]
if self.dof == 'rot-only':
pos_x = torch.zeros_like(pos_x)
pos_y = torch.zeros_like(pos_y)
elif self.dof == 'trans-only':
angle = torch.zeros_like(angle)
else:
assert self.dof == 'rot-trans-only', 'The only other option'
cos_angle = torch.cos(angle)
sin_angle = torch.sin(angle)
# create the 2x3 matrix out of this
theta = torch.cat(
[cos_angle, sin_angle, pos_x, -sin_angle, cos_angle, pos_y],
dim=-1)
theta = theta.view(theta.shape[:-1] + (2, 3))
elif self.dof == 'affine':
theta = pred.view(-1, self.num_tx, 2, 3)
else:
raise NotImplementedError('Unknown {}'.format(self.dof))
# Split the channels of feat_to_tx into num_tx groups, and apply the
# transformations to each of those groups
assert feat_to_tx.shape[split_dim] % self.num_tx == 0, (
'Must be divisible to ensure equal sized chunks')
# Chunk it
feat_to_tx_parts = torch.chunk(feat_to_tx, self.num_tx, split_dim)
# Apply the corresponding transformation to each part
if self.inp_type == 'pix':
tx_fn = partial(self.transform_pix, mode=self.affine_tx_mode)
elif self.inp_type == 'pt':
tx_fn = self.transform_pt
else:
raise NotImplementedError('Unknown type {}'.format(self.inp_type))
feat_to_tx_parts_txed = [
tx_fn(el, theta[:, i, ...])
for i, el in enumerate(feat_to_tx_parts)
]
return torch.cat(feat_to_tx_parts_txed, dim=split_dim), addl_losses
class DynSTN(nn.Module):
"""Spatial Transformer based dynamics model."""
def __init__(self, encoder, dim, n, nobj, num_tx, base_stn):
super().__init__()
del encoder # This one doesn't need it
assert nobj == 1 or nobj == num_tx, (
'Either split the 1 object features and tx, or tx each obj sep')
self.dyn = hydra.utils.instantiate(base_stn, dim * n * nobj, num_tx)
def forward(self, features, pixels):
"""
This dyn model does not use pixels, so will just return the last history
frame
Args:
features: (B, T, Nobj, D, H', W')
pixels: (B, T, Nobj, C, H, W)
Returns:
pred: (B, Nobj, D, H', W')
pix
addl_losses
"""
cat_feats = torch.reshape(features, (features.shape[0], -1) +
features.shape[-2:])
# For > 1 objs, just flatten Nobj and D channels, and the STN class
# will split it back to do the transformations
feat_obj_flat = torch.flatten(features, 2, 3)
new_feat, addl_loses = self.dyn(cat_feats, feat_obj_flat[:, -1, ...])
future_feat = torch.reshape(new_feat,
features.shape[:1] + features.shape[2:])
return future_feat, pixels[:, -1, ...], addl_loses
class DynSTNPixels_DEPRECATED(nn.Module):
"""Spatial Transformer based dynamics model, applied on pixels.
Use DynSTNPixelChannelsDetBg"""
def __init__(self, encoder, dim, n, nobj, num_tx, base_stn):
super().__init__()
self.enc = encoder
self.dyn = hydra.utils.instantiate(base_stn, dim * n * nobj, num_tx)
self.num_tx = num_tx
# A network to predict num_tx attention maps
self.attention = nn.Sequential(
gen_deconv(dim * n * nobj, num_tx),
*([gen_deconv(num_tx, num_tx, upsample_factor=4)] * 2),
nn.Conv2d(num_tx, num_tx, kernel_size=1, padding=0, bias=False),
nn.Softmax(dim=1))
def forward(self, features, pixels):
"""
Args:
features: (B, T, Nobj, D, H', W')
pixels: (B, T, C, H, W)
Returns:
pred: (B, Nobj, D, H', W')
"""
raise NotImplementedError('Deal with objectified pixel input. '
'Also deal with addl losses. ')
cat_feats = torch.reshape(features, (features.shape[0], -1) +
features.shape[-2:])
assert features.shape[2] == 1, 'Not implemented yet for >1 objs'
# Repmat the image channels num_tx times, so STN can predict those many
# transformations
pixels_tiled = pixels.repeat(1, 1, self.num_tx, 1, 1)
future_pixels_tiled = self.dyn(cat_feats, pixels_tiled[:, -1, ...])
# Compute attention maps for compositing
attention_maps = self.attention(cat_feats)
# Do a weighted sum of the channels using the attention maps
attention_maps_split = torch.chunk(attention_maps, self.num_tx, 1)
future_pixels_split = torch.chunk(future_pixels_tiled, self.num_tx, 1)
weighted = [
att * pix
for att, pix in zip(attention_maps_split, future_pixels_split)
]
future_pixels = torch.mean(torch.stack(weighted), dim=0)
# Since this is a new image being generated, need to pass through the
# encoder to get the features for this image
future_feat = self.enc(future_pixels.unsqueeze(1))[:, 0, ...]
return future_feat, future_pixels
class DynSTNPixelChannels_DEPRECATED(nn.Module):
"""Spatial Transformer based dynamics model, applied on channels of img.
Use DynSTNPixelChannelsDetBg"""
def __init__(self, encoder, dim, n, nobj, base_stn):
super().__init__()
self.enc = encoder
self.num_tx = phyre.NUM_COLORS # One tx per color
self.dyn = hydra.utils.instantiate(base_stn, dim * n * nobj,
self.num_tx)
def forward(self, features, pixels):
"""
Args:
features: (B, T, Nobj, D, H', W')
pixels: (B, T, C, H, W)
Returns:
pred: (B, Nobj, D, H', W')
"""
raise NotImplementedError('Deal with objectified pixel input. '
'Also deal with addl losses. ')
assert (pixels.shape[2] == self.num_tx or
pixels.shape[2] == self.num_tx * 3), 'In pix or pt mode so far'
cat_feats = torch.reshape(features, (features.shape[0], -1) +
features.shape[-2:])
assert features.shape[2] == 1, 'Not implemented yet for >1 objs'
future_pixels = self.dyn(cat_feats, pixels[:, -1, ...])
# Since this is a new image being generated, need to pass through the
# encoder to get the features for this image
future_feat = self.enc(future_pixels.unsqueeze(1))[:, 0, ...]
return future_feat, future_pixels
class DynSTNPixelChannelsGenBg_DEPRECATED(nn.Module):
"""Spatial Transformer based dynamics model, applied on channels of img.
Generates the background.
Use DynSTNPixelChannelsDetBg
"""
def __init__(self, encoder, dim, n, nobj, base_stn):
super().__init__()
self.enc = encoder
# One tx per color, except background that is generated since it's not
# an object that can be moved like others. Just a 1x1 convolution on
# the predicted image to gen the last channel
self.num_tx = phyre.NUM_COLORS - 1
self.dyn = hydra.utils.instantiate(base_stn, dim * n * nobj,
self.num_tx)
# Just a couple layer should suffice, over the last frame, and new frame
# feature
self.bg_dec = nn.Sequential(
nn.Conv2d(2 * phyre.NUM_COLORS - 1,
8,
kernel_size=1,
stride=1,
padding=0,
bias=False), nn.ReLU(inplace=True),
nn.Conv2d(8, 1, kernel_size=1, stride=1, padding=0, bias=False))
def forward(self, features, pixels):
"""
Args:
features: (B, T, Nobj, D, H', W')
pixels: (B, T, C, H, W)
Returns:
pred: (B, Nobj, D, H', W')
"""
raise NotImplementedError('Deal with objectified pixel input. '
'Also deal with addl losses. ')
assert (pixels.shape[2] - 1) == self.num_tx
cat_feats = torch.reshape(features, (features.shape[0], -1) +
features.shape[-2:])
assert features.shape[2] == 1, 'Not implemented yet for >1 objs'
future_pixels_obj = self.dyn(cat_feats, pixels[:, -1, 1:, ...])
future_pixels_bg = self.bg_dec(
torch.cat([pixels[:, -1, ...], future_pixels_obj], dim=1))
future_pixels = torch.cat([future_pixels_bg, future_pixels_obj], dim=1)
# Since this is a new image being generated, need to pass through the
# encoder to get the features for this image
future_feat = self.enc(future_pixels.unsqueeze(1))[:, 0, ...]
return future_feat, future_pixels
class DynSTNPixelChannelsDetBg(nn.Module):
"""Spatial Transformer based dynamics model, applied on channels of img.
Generates the background deterministically, using the change.
"""
def __init__(self,
encoder,
dim,
n,
nobj,
base_stn,
movable_ch,
movable_only=False):
super().__init__()
self.enc = encoder
self.movable_only = movable_only
# One tx per color (or movable colors, if that is set),
# except background that is generated since it's not
# an object that can be moved like others.
if self.movable_only:
self.movable_channels = torch.LongTensor(movable_ch)
else:
self.movable_channels = torch.arange(1, phyre.NUM_COLORS)
self.num_tx = len(self.movable_channels)
self.nobj = nobj
self.dyn = hydra.utils.instantiate(base_stn, dim * n * nobj,
self.num_tx * nobj)
def forward(self, features, pixels):
"""
Args:
features: (B, T, Nobj, D, H', W')
pixels: (B, T, Nobj, C, H, W)
Returns:
pred: (B, Nobj, D, H', W')
pix
addl_losses
"""
assert pixels.shape[3] >= self.num_tx
cat_feats = torch.reshape(features, (features.shape[0], -1) +
features.shape[-2:])
pixels_movable = pixels[:, -1, :, self.movable_channels, ...]
# combine all channels of objects and transform
pixels_movable_flat = torch.flatten(pixels_movable, 1, 2)
future_pixels_flat_movable, addl_losses = self.dyn(
cat_feats, pixels_movable_flat)
future_pixels_movable = future_pixels_flat_movable.view(
pixels_movable.shape)
future_pixels = pixels[:, -1, ...] # Copy most of the channels
future_pixels[:, :, self.movable_channels, ...] = future_pixels_movable
# Compute the background deterministically, where all other channels
# are 0s, it has to be 1. So make channels sum to 1.
future_pixels_bg = 1.0 - torch.sum(
future_pixels[:, :, 1:, ...], dim=2, keepdims=True)
future_pixels[:, :, :1, ...] = future_pixels_bg
# Since this is a new image being generated, need to pass through the
# encoder to get the features for this image
future_feat = self.enc(future_pixels.unsqueeze(1))[:, 0, ...]
return future_feat, future_pixels, addl_losses
def gen_deconv(in_dim,
out_dim,
stride=1,
kernel_size=3,
padding=1,
upsample_factor=2,
inst_norm=False,
activation=nn.ReLU(inplace=True)):
return nn.Sequential(
nn.ConvTranspose2d(in_dim,
out_dim,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=False),
# nn.Sequential() simulates identity, if no instance norm to be added
nn.InstanceNorm2d(out_dim, affine=True)
if inst_norm else nn.Sequential(),
activation,
nn.Upsample(scale_factor=upsample_factor,
mode='bilinear',
align_corners=True))
class BasicDecoder(nn.Module):
"""Simple decoder, goes from features to frame representation."""
def __init__(self, in_dim, out_dim, nlayers, kernel_size, padding,
upsample_factor, decode_from, backprop_feat_ext, inst_norm,
activation):
super().__init__()
decoder_dim = 256
self.backprop_feat_ext = backprop_feat_ext
self.decode_from = decode_from
assert self.decode_from in ['pixels', 'features']
if self.decode_from == 'pixels':
in_dim = phyre.NUM_COLORS
decoder_dim = 16
activation = hydra.utils.instantiate(activation)
logging.warning('Using %s activation for decoders', activation)
inter_layers = [
gen_deconv(decoder_dim,
decoder_dim,
1,
kernel_size,
padding,
upsample_factor,
inst_norm,
activation=activation) for _ in range(nlayers)
]
self.deconv_net = nn.Sequential(
gen_deconv(in_dim,
decoder_dim,
1,
kernel_size,
padding,
upsample_factor,
activation=activation), *inter_layers,
gen_deconv(
decoder_dim,
out_dim,
1,
kernel_size,
padding,
upsample_factor,
activation=nn.Sequential())) # No activation on the last
def forward(self, features, pixels):
"""
Args:
features (BxNobjxDxH'xW'): Features to be decoded
pixels (BxNobjxCxHxW): Pixels generated by the dynamics model
Returns:
imgs (BxNobjxD_outxHxW): Output frames (per obj, aggregation is
done later in the Fwd class)
"""
if self.decode_from == 'pixels':
decode_feature = pixels
else:
decode_feature = features
if not self.backprop_feat_ext:
# Means train the decoder separately from the rest of the network,
# don't backprop gradients to the feature extractor
decode_feature = decode_feature.detach()
# Summing the features over all the objects, and doing one decode.
# Separate decodes takes just way too much time, so need to do it once
decode_feature = torch.sum(decode_feature, dim=1, keepdims=True)
features_flatten_obj = torch.flatten(decode_feature, 0, 1)
images = self.deconv_net(features_flatten_obj)
# Reshape back into object level
out = torch.reshape(images,
decode_feature.shape[:2] + images.shape[1:])
return out
class TrivialDecoder(nn.Module):
"""Trivial decoder, simply outputs the frames from the dynamics model."""
def __init__(self, in_dim, out_dim):
super().__init__()
del in_dim, out_dim
def forward(self, features, pixels):
"""
Args:
features (BxNobjxDxH'xW'): Features to be decoded
pixels (BxNobjxCxHxW): Pixels generated by the dynamics model
Returns:
imgs (BxNobjxCxHxW): Output frames
"""
del features # assumes the dynamics model will do all decoding
return pixels
def average_losses(all_losses):
"""Average the losses into one dict of losses.
Args:
all_losses: List of dictionary of losses.
Returns:
combined: A dictionary with same keys as individual dicts, with
all losses combined.
"""
if len(all_losses) == 0:
return {}
combined = {}
for key, val in all_losses[0].items():
if not isinstance(val, torch.Tensor):
# If it's none or sth.. eg some loss was not active
combined[key] = val
else:
# Average all the values
stkd = torch.stack([el[key] for el in all_losses])
# Average the losses that are positive, since I set undefined
# losses to -1 (where not enough GT is available, etc)
combined[key] = torch.mean(stkd * (stkd >= 0), dim=0)
return combined
class BasicObjEncoder(nn.Module):
"""Takes objectified representation, and puts it through more layers."""
def __init__(self,
in_dim,
out_dim,
nlayers,
kernel_size=3,
stride=1,
padding=1,
spatial_mean=True):
super().__init__()
if nlayers > 0:
self.out_dim = out_dim
else:
logging.warning('Ignoring the out_dim (%d) for ObjEncoder',
out_dim)
self.out_dim = in_dim
layers_lst = [[
nn.Conv2d(in_dim if i == 0 else out_dim,
out_dim,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=False),
nn.ReLU(inplace=True)
] for i in range(nlayers)]
layers_lst_flat = [item for sublist in layers_lst for item in sublist]
if len(layers_lst_flat) > 0:
layers_lst_flat = layers_lst_flat[:-1] # Remove the last relu
self.encoder = nn.Sequential(*layers_lst_flat)
else:
self.encoder = None
self.spatial_mean = spatial_mean
def forward(self, feat):
"""
Args:
feat: (B, T, Nobj, D, H', W')
"""
if self.encoder:
feat_flat = torch.flatten(feat, 0, 2)
obj_embed_flat = self.encoder(feat_flat)
obj_embed = torch.reshape(
obj_embed_flat, feat.shape[:3] + obj_embed_flat.shape[1:])
else:
obj_embed = feat
if self.spatial_mean:
obj_embed = torch.mean(obj_embed, dim=[-1, -2], keepdims=True)
return obj_embed
class ContextGatingObjectifier(nn.Module):
"""Takes intermediate representation and converts into object-level rep."""
def __init__(self, dim, obj_encoder, nobj=1):
super().__init__()
self.obj_mapper = nn.Sequential(
nn.Conv2d(dim, dim, kernel_size=1, stride=1, padding=0,
bias=False), nn.ReLU(inplace=True),
nn.Conv2d(dim,
nobj,
kernel_size=1,
stride=1,
padding=0,
bias=False))
self.obj_encoder = hydra.utils.instantiate(obj_encoder, dim)
self.out_dim = self.obj_encoder.out_dim
def forward(self, vid_feat):
"""
Decompose the video features into object level representation.
Args:
vid_feat: (BxTxDxH'xW')
nobj (int): Max number of objects in the scene. The hope is that the
extra channels will just have some degenerate information
Returns:
BxTxNobjxDxH''xW''
"""
raise NotImplementedError('The inp is now objfied, TODO deal with it')
batch_size = vid_feat.shape[0]
# Use context gating: generate a heatmap for each object at each time
# step, and weight using that heatmap to get an object representation
flatten_feat = torch.flatten(vid_feat, 0, 1)
# Unsqueeze to add a channel dimension to the attention maps
obj_map = self.obj_mapper(flatten_feat).unsqueeze(2)
# Add a 1-D object dimension
flatten_feat = flatten_feat.unsqueeze(1)
# Weight the feats with the attention maps to get the object-features
mapped_feat = flatten_feat * obj_map
# Reshape to add the time dimension back
mapped_feat = torch.reshape(mapped_feat,
(batch_size, -1) + mapped_feat.shape[1:])
final_feat = self.obj_encoder(mapped_feat)
return final_feat
class ChannelSplitObjectifier(nn.Module):
"""Splits the channel of image representation to get obj rep."""
def __init__(self, dim, obj_encoder, nobj=1):
super().__init__()
self.nobj = nobj
self.obj_encoder = hydra.utils.instantiate(obj_encoder, dim // nobj)
self.out_dim = self.obj_encoder.out_dim
def forward(self, vid_feat):
"""
Decompose the video features into object level representation.
Args:
vid_feat: (BxTxNobjxDxH'xW')
Returns:
BxTxNobjx(D/Nobj)xH'xW'
"""
assert vid_feat.shape[2] == 1, (
'Channel split can not deal with pre objectified {} input'.format(
vid_feat.shape[2]))
assert vid_feat.shape[3] % self.nobj == 0, 'Must be divisible'
# Reshape the channel dimension to split into an object dimension
objed = vid_feat.view(vid_feat.shape[:2] + (self.nobj, -1) +
vid_feat.shape[-2:])
assert objed.shape[2] == self.nobj
assert objed.shape[3] == vid_feat.shape[3] / self.nobj
# Apply a little network to get a flat feature
obj_encoded = self.obj_encoder(objed)
return obj_encoded
class TrivialObjectifier(nn.Module):
"""Simply returns the feature.
Earlier version would unsqueeze, but since the component splitting the
input at least has 1 obj, so no need to unsqueeze it further.
"""
def __init__(self, dim, obj_encoder, nobj=1):
super().__init__()
del obj_encoder
self.nobj = nobj
self.out_dim = dim
def forward(self, vid_feat):
assert vid_feat.shape[2] == self.nobj, ('{} != {}'.format(
vid_feat.shape[2], self.nobj))
return vid_feat
class SimpleBaseEncoder(nn.Module):
"""Simple network, simplified from Anton's version."""
def __init__(self, in_dim, width_scale_factor):
"""Simple encoder weights.
For a 256x256 input, it'll give a 4x4 output."""
super().__init__()
self.width_scale_factor = width_scale_factor
_s = self._scale_int
self.stem = nn.Sequential(
nn.Conv2d(in_dim, 3, kernel_size=1, bias=False),
nn.BatchNorm2d(3),
nn.ReLU(inplace=True),
nn.Conv2d(3,
_s(64),
kernel_size=7,
stride=2,
padding=3,
bias=False),
nn.BatchNorm2d(_s(64)),
nn.ReLU(inplace=True),
nn.Conv2d(_s(64),
_s(64),
kernel_size=5,
stride=2,
padding=2,
bias=False),
nn.BatchNorm2d(_s(64)),
nn.ReLU(inplace=True),
nn.Conv2d(_s(64),
_s(64),
kernel_size=5,
stride=2,
padding=2,
bias=False),
nn.BatchNorm2d(_s(64)),
nn.ReLU(inplace=True),
nn.Conv2d(_s(64),
_s(64),
kernel_size=5,
stride=2,
padding=2,
bias=False),
nn.BatchNorm2d(_s(64)),
nn.ReLU(inplace=True),
nn.Conv2d(_s(64),
_s(128),
kernel_size=5,
stride=2,
padding=2,
bias=False),
nn.BatchNorm2d(_s(128)),
nn.ReLU(inplace=True),
)
self.out_dim = _s(128)
def _scale_int(self, n):
"""Scale the number by a factor. To control width of this network."""
return int(self.width_scale_factor * n)
def forward(self, image):
return self.stem(image)
class ResNetBaseEncoder(nn.Module):
"""ResNet based feature extractor."""
def __init__(self, in_dim, base_model, nlayers):
super().__init__()
net = hydra.utils.instantiate(base_model)
conv1 = nn.Conv2d(in_dim,
64,
kernel_size=7,
stride=2,
padding=3,
bias=False)
self.stem = nn.Sequential(conv1, net.bn1, net.relu, net.maxpool)
self.stages = nn.ModuleList(
[getattr(net, 'layer%d' % (i + 1)) for i in range(nlayers)])
last_stage = self.stages[-1][-1]
if hasattr(last_stage, 'bn3'):
self.out_dim = last_stage.bn3.num_features
elif hasattr(last_stage, 'bn2'):
self.out_dim = last_stage.bn2.num_features
else:
raise ValueError('This should not happen')
def forward(self, image):
features = self.stem(image)
for stage in self.stages:
features = stage(features)
return features
class BasicEncoder(nn.Module):
"""Encode pixels to features."""
def __init__(self, in_dim, nobj, feat_ext, objectifier, obj_encoder,
spatial_mean, feat_ext_eval_mode, process_objs_together):
"""
Args:
obj_before_enc: If true, do the objectify in the input (pixel) space
before running the encode (so each object is encoded separately)
spatial_mean: Avg pool the features to 1x1
feat_ext_eval_mode: Set the feature extractor to eval mode for BN,
dropout etc
process_objs_together: If true, it will concatenate all objs on the
channel dimension, extract features, and split the features
in channel dimensions to get features for each obj
"""
super().__init__()
self.nobj = nobj
self.process_objs_together = process_objs_together
# The image embedding model
self.feat_ext = hydra.utils.instantiate(
feat_ext, in_dim * nobj if self.process_objs_together else in_dim)
initial_dim = self.feat_ext.out_dim
# The objects model
self.objectifier = hydra.utils.instantiate(objectifier, initial_dim,
obj_encoder)
self.out_dim = self.objectifier.out_dim
if self.process_objs_together:
assert self.out_dim % nobj == 0
self.out_dim //= nobj
self.spatial_mean = spatial_mean
self.feat_ext_eval_mode = feat_ext_eval_mode
def _forward_vid(self, batch_vid_obs, l2_norm_feats=False):
"""
Convert a video into images to run the forward model.
Args:
batch_vid_obs: BxTxCxHxW or BxTxNobjxCxHxW
Returns:
features: BxTxDxH'xW' or BxTxNobjxDxH'xW'
"""
# Add an object dimension, so the rest of the code doesn't have to
# deal with edge cases
added_obj_dim = False
if len(batch_vid_obs.shape) == 4:
added_obj_dim = True
batch_vid_obs = batch_vid_obs.unsqueeze(2) # BxTxNobjxCxHxW
# Flatten videos into frames to extract out the features
# resulting shape B'xC'xHxW
if self.process_objs_together:
# resulting shape B' = B * T, C' = Nobj * C
flat_obs = batch_vid_obs.reshape((-1, ) + batch_vid_obs.shape[-4:])
flat_obs = torch.flatten(flat_obs, 1, 2)
else:
# resulting shape B' = B * T * Nobj, C' = C
flat_obs = batch_vid_obs.reshape((-1, ) + batch_vid_obs.shape[-3:])
# Extract features
if self.feat_ext_eval_mode:
self.feat_ext.eval()
features = self.feat_ext(flat_obs)
if self.spatial_mean:
# Mean over spatial dimensions
features = torch.mean(features, dim=[-2, -1], keepdims=True)
if l2_norm_feats:
# L2 normalize the features -- MemoryBank, MoCo and PIRL do that
features = nn.functional.normalize(features, p=2, dim=-1)
# Reshape back to original batch dimension
if self.process_objs_together:
features_batched = features.reshape(batch_vid_obs.shape[:2] +
(self.nobj, -1) +
features.shape[-2:])
else:
features_batched = features.reshape(batch_vid_obs.shape[:-3] +
features.shape[1:])
if added_obj_dim:
features_batched = features_batched.squeeze(2)
assert features_batched.shape[-3] == self.out_dim
return features_batched
def forward(self, vid):
"""
Args:
vid (B, T, Nobj, C, H, W): Input video, in preprocessed form; i.e.
one-hot
Returns:
obj_feat (B, T, Nobj', D, H', W'): Features with objects, if needed
"""
vid_feat = self._forward_vid(vid)
vid_feat = self.objectifier(vid_feat)
return vid_feat
def combine_obj_pixels(obj_pix, obj_dim):
"""Combine obj-split pixels into a single image.
Args:
obj_pix: B, ..., Nobj, ..., C, H, W
obj_dim: The dimension to reduce over -- which corresponds to objs
Returns
B, ..., ..., C, H, W
"""
if obj_pix is None:
return None
return torch.max(obj_pix, dim=obj_dim)[0]
class MLPClassifier(nn.Module):
"""Simple classifier on top of the intermediate features."""
def __init__(self, in_dim, nlayers, match_inp_sz_layer=False):
super().__init__()
self.nlayers = nlayers
if nlayers == 0:
return
# First linear layer, to project to the in_dim dimension, if not
self.match_inp_sz_layer = match_inp_sz_layer
if self.match_inp_sz_layer:
raise NotImplementedError('Doesnt work with multi-gpu yet..')
self.register_parameter('init_linear_wt', None)
self.in_dim = in_dim
layers = [[nn.Linear(in_dim, in_dim),
nn.ReLU(inplace=True)] for _ in range(nlayers - 1)]
layers_flat = [item for sublist in layers for item in sublist]
self.cls = nn.Sequential(*(layers_flat[:-1] + [nn.Linear(in_dim, 1)]))
def reset_parameters(self, inp, in_dim, out_dim):
self.init_linear_wt = nn.Parameter(
inp.new(in_dim, out_dim).normal_(0, 1))
def forward(self, preds, pixs, process_all_frames=False):
"""
Run the classifier on the predictions.
Args:
preds: (BxTx1xDxH'xW')
pixs: (BxTx1xDxHxW)
Retuns:
solved: (BxT)
process_all_frames: Set true when used by other classifiers for
intermediate feature extraction, so to get features for each
frame.
"""
del pixs # This does not use it
if self.nlayers == 0:
return preds
# Since this classifier doesn't take into account context and the final
# _cls is going to look at the last frame, so might as well only process
# that last frame
if not process_all_frames:
preds = preds[:, -1:, ...]
mean_feat = torch.mean(preds, axis=[2, -1, -2])
if self.match_inp_sz_layer:
if self.init_linear_wt is None:
logging.warning(
'Creating a linear layer to map the input '
'dims (%d) to MLP input dim (%d)', mean_feat.shape[-1],
self.in_dim)
self.reset_parameters(preds, self.in_dim,
preds.shape[1] * preds.shape[3])
mean_feat = nn.functional.linear(mean_feat, self.init_linear_wt)
mean_feat = nn.ReLU(inplace=True)(mean_feat)
return self.cls(mean_feat).squeeze(-1)
class ConvNetClassifier(nn.Module):
"""ConvNet classifier on top of the intermediate features."""
def __init__(self, feat_in_dim, num_conv_blocks, num_fc_layers):
super().__init__()
del feat_in_dim
nobj = 1
self.enc = BasicEncoder(
phyre.NUM_COLORS,
nobj,
OmegaConf.create({
'class': 'nets.ResNetBaseEncoder',
'params': {
'base_model': {
'class': 'torchvision.models.resnet18',
'params': {
'pretrained': False,
}
},
'nlayers': num_conv_blocks,
}
}),
OmegaConf.create({
'class': 'nets.TrivialObjectifier',
'params': {
'nobj': nobj, # will sum into 1 obj
}
}),
OmegaConf.create({
'class': 'nets.BasicObjEncoder',
'params': {
'out_dim': 16,
'nlayers': 0,
'spatial_mean': True,
}
}),
spatial_mean=False,
feat_ext_eval_mode=False,
process_objs_together=False, # Doesn't matter, 1 obj
)
self.cls = MLPClassifier(self.enc.out_dim, num_fc_layers)
def forward(self, preds, pixs, process_all_frames=False):
"""
Run the classifier on the predictions.
Args:
preds: (BxTx1xDxH'xW')
pixs: (BxTx1xDxHxW)
process_all_frames: Set true when used by other classifiers for
intermediate feature extraction, so to get features for each
frame.
Retuns:
solved: (BxT)
"""
# Not enforcing the assert here if pred is None, since this module
# is usually used by other modules as a way to extract features,
# and it might pass in None for preds. But rest assured, this check
# would have been done on the caller side.
assert preds is None or preds.shape[1] == pixs.shape[1], (
'Must pass in run_decode=True if using a pixel-based classifier!!')
del preds # This does not use it
# Since this classifier doesn't take into account context and the final
# _cls is going to look at the last frame, so might as well only process
# that last frame
if not process_all_frames:
pixs = pixs[:, -1:, ...]
obj_feats = self.enc(pixs)
return self.cls(obj_feats, None, process_all_frames=process_all_frames)
class TxClassifier(nn.Module):
"""Transformer on top of the intermediate features over time."""
def __init__(self, in_dim, nheads, nlayers):
super().__init__()
self.tx_enc = TxEncoder(in_dim, nheads, nlayers)
self.cls = nn.Linear(self.tx_enc.out_dim, 1)
def forward(self, preds, pixs):
"""
Run the classifier on the predictions.
Args:
preds: (BxTx1xDxH'xW')
pixs: (BxTx1xDxHxW)
Retuns:
solved: (Bx1)
"""
del pixs # This does not use it
# Spatial mean the features
stacked_mean_feat = torch.flatten(torch.mean(preds, axis=[-1, -2]), 1,
2)
feat_enc_time = self.cls(self.tx_enc(stacked_mean_feat))
# Max pool over time to get the final prediction
# Keepdims since the output format expects a time dimension and does
# a max pool over it at the end
cls_pred = torch.max(feat_enc_time, dim=1,
keepdims=True)[0].squeeze(-1)
return cls_pred
class ConvTxClassifier(nn.Module):
"""Transformer on top of the Conv features learned over time."""
def __init__(self, in_dim, nconvblocks, nheads, nlayers):
super().__init__()
self.conv_feat = ConvNetClassifier(in_dim, nconvblocks, 0)
self.tx_cls = TxClassifier(self.conv_feat.enc.out_dim, nheads, nlayers)
def forward(self, preds, pixs):
"""
Run the classifier on the predictions.
Args:
preds: (BxTx1xDxH'xW')
pixs: (BxTx1xDxHxW)
Retuns:
solved: (Bx1)
"""
assert preds.shape[1] == pixs.shape[1], (
'Must pass in run_decode=True if using a pixel-based classifier!!')
del preds
feats = self.conv_feat(None, pixs, process_all_frames=True)
preds = self.tx_cls(feats, None)
return preds
class Conv3dClassifier(nn.Module):
"""3D conv over features learned over time."""
def __init__(self, in_dim, num_3d_layers):
super().__init__()
layers = [[
nn.Conv3d(in_dim, in_dim, 3, stride=2, padding=1, bias=False),
nn.ReLU(inplace=True)
] for _ in range(num_3d_layers - 1)]
layers_flat = [item for sublist in layers for item in sublist]
self.enc = nn.Sequential(*(layers_flat[:-1]))
self.cls = nn.Linear(in_dim, 1)
def forward(self, preds, pixs):
"""
Run the classifier on the predictions.
Args:
preds: (BxTx1xDxH'xW')
pixs: (BxTx1xDxHxW)
Retuns:
solved: (Bx1)
"""
del pixs
enc_preds = self.enc(preds.squeeze(2).transpose(1, 2))
cls_preds = self.cls(torch.mean(enc_preds, [-1, -2, -3]))
# It has 1 extra dim in the end from the fc layer which should be
# removed, but since I need to add a time dimension anyway, just leave
# this there (will end up the same)
return cls_preds
class ConvConv3dClassifier(nn.Module):
"""Conv3D on top of the Conv features learned over time."""
def __init__(self, in_dim, nconvblocks, n3dlayers):
super().__init__()
self.conv_feat = ConvNetClassifier(in_dim, nconvblocks, 0)
self.td_cls = Conv3dClassifier(self.conv_feat.enc.out_dim, n3dlayers)
def forward(self, preds, pixs):
"""
Run the classifier on the predictions.
Args:
preds: (BxTx1xDxH'xW')
pixs: (BxTx1xDxHxW)
Retuns:
solved: (Bx1)
"""
assert preds.shape[1] == pixs.shape[1], (
'Must pass in run_decode=True if using a pixel-based classifier!!')
del preds
feats = self.conv_feat(None, pixs, process_all_frames=True)
preds = self.td_cls(feats, None)
return preds
class ConcatClassifier(nn.Module):
"""Concat the features and classify."""
def __init__(self, in_dim, nlayers):
super().__init__()
self.cls = MLPClassifier(in_dim, nlayers, match_inp_sz_layer=True)
def forward(self, preds, pixs):
"""
Run the classifier on the predictions.
Args:
preds: (BxTx1xDxH'xW')
pixs: (BxTx1xDxHxW)
Retuns:
solved: (Bx1)
"""
del pixs
# Concatenate over the time dimension
preds_flat = preds.view(preds.shape[0], 1, 1, -1, preds.shape[-2],
preds.shape[-1])
return self.cls(preds_flat, None, process_all_frames=True)
class ConvConcatClassifier(nn.Module):
"""Concat the Conv features and classify."""
def __init__(self, in_dim, nconvblocks, nclslayers):
super().__init__()
self.conv_feat = ConvNetClassifier(in_dim, nconvblocks, 0)
self.concat_cls = ConcatClassifier(self.conv_feat.enc.out_dim,
nclslayers)
def forward(self, preds, pixs):
"""
Run the classifier on the predictions.
Args:
preds: (BxTx1xDxH'xW')
pixs: (BxTx1xDxHxW)
Retuns:
solved: (Bx1)
"""
assert preds.shape[1] == pixs.shape[1], (
'Must pass in run_decode=True if using a pixel-based classifier!!')
del preds
feats = self.conv_feat(None, pixs, process_all_frames=True)
preds = self.concat_cls(feats, None)
return preds
class TrivialInteractor(nn.Module):
"""Model interactions btw objects: do nothing."""
def __init__(self, in_dim):
super().__init__()
del in_dim
@classmethod
def forward(cls, feat):
"""
Args:
feat: (B, T, Nobj, C, H', W')
Returns:
feat as is
"""
return feat
class TxEncoder(nn.Module):
"""Transformer based encoder, generates a feature combining the context."""
def __init__(self, in_dim, nheads, nlayers, maintain_dim=False):
"""
Args:
maintain_dim (bool): If true, it maps the final output to the same
dimensionality as the input
"""
super().__init__()
# Very basic position encoding
self.loc_embed = nn.Sequential(nn.Linear(1, 4), nn.ReLU(inplace=True),
nn.Linear(4, 8))
self.nheads = nheads
self.nlayers = nlayers
in_dim_loc = in_dim + 8 * nheads
self.loc_mixer = nn.Linear(in_dim_loc, in_dim_loc)
layer = nn.TransformerEncoderLayer(in_dim_loc, nheads)
self.encoder = nn.TransformerEncoder(layer, nlayers)
if maintain_dim:
self.back_to_orig_dim = nn.Linear(in_dim_loc, in_dim)
self.out_dim = in_dim
else:
self.back_to_orig_dim = lambda x: x # Identity
self.out_dim = in_dim_loc
def forward(self, feat):
"""
Args:
feat: (B, T, C)
Returns:
Same shape as input
"""
# Add a location embedding (over time), since time axis will flatten
loc_embedding = self.loc_embed(
torch.arange(feat.shape[1],
device=feat.device).unsqueeze(-1).float())
# Make into the shape of the feature
loc_embedding = loc_embedding.unsqueeze(0).repeat(
feat.shape[0], 1, self.nheads)
feat = torch.cat([feat, loc_embedding], dim=-1)
# Mix up the location information throughout the features so each head
# would have it
mixed_feat = self.loc_mixer(feat)
# Transformer encoder expects the time dimension as the 0th! So gotta
# permute things around
return self.back_to_orig_dim(
self.encoder(mixed_feat.permute(1, 0, 2)).permute(1, 0, 2))
class TxInteractor(nn.Module):
"""Model interactions btw objects: using Transformer."""
def __init__(self, in_dim, nheads, nlayers):
super().__init__()
self.in_dim = in_dim
self.tx_enc = TxEncoder(in_dim, nheads, nlayers, maintain_dim=True)
def forward(self, feat):
"""
Args:
feat: (B, T, Nobj, C, H', W')
Returns:
Same shape as input
"""
# Mean reduce the spatial dimensions for tx, then add it back to the
# original feature as a residual connection
feat_spat_mean = torch.mean(feat, dim=[-1, -2])
feat_flat = feat_spat_mean.flatten(1, 2)
tx_feat = self.tx_enc(feat_flat)
tx_feat = tx_feat.view(
feat_spat_mean.shape).unsqueeze(-1).unsqueeze(-1)
return feat + tx_feat
class TrivialSpatialAttention(nn.Module):
def __init__(self, in_dim):
super().__init__()
del in_dim
def forward(self, feat):
return feat
class TxSpatialAttention(nn.Module):
def __init__(self, in_dim, nheads, nlayers):
super().__init__()
self.tx_enc = TxEncoder(in_dim, nheads, nlayers, maintain_dim=True)
def forward(self, feat):
"""
Args:
feats (B, T, Nobj, D, H', W')
"""
feat_flat = torch.flatten(torch.flatten(feat, 0, 2), -2, -1)
feat_att = self.tx_enc(feat_flat.transpose(1, 2)).transpose(1, 2)
return feat_att.view(feat.shape)
class Fwd(nn.Module):
"""The master class with Forward model."""
def __init__(self, agent_cfg):
"""
Args:
dyn_type: The type of dynamics model to use.
dyn_n: Number of previous features used for prediction.
"""
super().__init__()
# The image embedding model
self.preproc = VideoPreprocessor(agent_cfg)
self.enc = hydra.utils.instantiate(agent_cfg.encoder,
self.preproc.out_dim,
agent_cfg.nobj)
dim = self.enc.out_dim
self.interactor = hydra.utils.instantiate(agent_cfg.interactor, dim)
# The dynamics model
self.dyn = hydra.utils.instantiate(agent_cfg.dyn, self.enc, dim)
# Classifier model
self.nframes_to_cls = agent_cfg.nframes_to_cls
# A attention of the latent features before passing them through the
# classifier.
self.spat_att = hydra.utils.instantiate(agent_cfg.spat_att, dim)
self.cls = hydra.utils.instantiate(agent_cfg.cls, dim)
# Decoder model
self.dec = hydra.utils.instantiate(agent_cfg.decoder, dim,
phyre.NUM_COLORS)
# Other loss functions
self.pix_loss = hydra.utils.instantiate(agent_cfg.loss_fn.pix)
self.nce_loss = hydra.utils.instantiate(agent_cfg.loss_fn.nce, dim)
@property
def device(self):
if hasattr(self, 'parameters') and next(self.parameters()).is_cuda:
return 'cuda'
else:
return 'cpu'
def _forward_dyn(self, feats, vids, n_fwd_times, need_intermediate=False):
"""
Args:
feats: (BxT_histxNobjxDxH'xW')
vids: (BxT_histxCxHxW) The video corresponding to the feats, some
dyn models might use them.
n_fwd_times: Number of times to run the fwd model on the last frames
need_intermediate: If true, give all the intermediate features
Returns:
all_preds: The predictions at each time step, in n_fwd_times
all_pixs: The predictions in pixels. Note all dynamics models don't
use pixels, so it might just give the last frame as output
all_solved: The classification at each time step, for n_fwd_times
"""
all_preds = []
all_pixs = []
all_addl_losses = []
if n_fwd_times == 0:
return [all_preds, all_pixs, all_addl_losses]
def run_fwd_append(feats, pixs):
pred, pred_pix, addl_losses = self.dyn(feats, pixs)
all_preds.append(pred)
all_pixs.append(pred_pix)
all_addl_losses.append(addl_losses)
run_fwd_append(feats, vids)
n_fwd_times_copy = n_fwd_times
while n_fwd_times - 1 > 0:
feats = torch.cat(
[feats[:, 1:, ...],
torch.unsqueeze(all_preds[-1], axis=1)],
dim=1)
vids = torch.cat(
[vids[:, 1:, ...],
torch.unsqueeze(all_pixs[-1], axis=1)],
dim=1)
run_fwd_append(feats, vids)
n_fwd_times -= 1
assert len(all_preds) == n_fwd_times_copy, (
'%d %d' % (len(all_preds), n_fwd_times_copy))
if not need_intermediate:
all_preds = [all_preds[-1]]
all_pixs = [all_pixs[-1]]
all_addl_losses = [all_addl_losses[-1]]
# Will compute solved or not later, after decode, in case the classifier
# needs that information
return all_preds, all_pixs, all_addl_losses
def _slice_for_dyn(self, features_batched, n_hist_frames, nslices=-1):
"""
Args:
features_batched: BxTx.... can deal with any following
dimensions, typically it is (BxTxNobjxDxH'xW')
n_hist_frames (int): Number of frames to use as history
nslices (int): If -1, make as many slices of the training data
as possible. If 1, keep only the first one. (1 used when
training classifier on top, which should always see videos
from the start)
Returns:
B'x n_hist_frames x ... (B'x n_hist_frames x Nobj x D x H' x W')
"""
clip_hist = []
assert features_batched.shape[1] >= n_hist_frames
for i in range((features_batched.shape[1] - n_hist_frames + 1)):
if nslices > 0 and i >= nslices:
break
clip_hist.append(features_batched[:, i:i + n_hist_frames, ...])
clip_hist = torch.cat(clip_hist, dim=0)
return clip_hist
def _forward_dec(self, feats, pixels):
"""
Args:
feats: List of features (BxD) from the dynamics prediction stage,
one for each time step predicted.
pixels: List of corresponding pixels from the dynamics model. The
dyn model may or may not actually generate new pixels.
"""
return [self.dec(feat, pix) for feat, pix in zip(feats, pixels)]
# Loss functions ###########################################################
def cswm_loss(self, pred, gt, hinge=1.0):
"""
The energy based contrastive loss.
Args:
pred (BxNobjxDxH'xW')
gt (BxNobjxDxH'xW')
From https://github.com/tkipf/c-swm/blob/master/modules.py#L94
"""
pred = pred.view(pred.shape[:2] + (-1, ))
gt = gt.view(gt.shape[:2] + (-1, ))
batch_size = gt.size(0)
perm = np.random.permutation(batch_size)
neg = gt[perm]
def energy(pred, gt, sigma=0.5):
"""Energy function based on normalized squared L2 norm.
Args:
pred (B, Nobj, D')
gt (B, Nobj, D')
"""
norm = 0.5 / (sigma**2)
diff = pred - gt
return norm * diff.pow(2).sum(2).mean(1)
pos_loss = energy(pred, gt)
zeros = torch.zeros_like(pos_loss)
pos_loss = pos_loss.mean()
neg_loss = torch.max(zeros, hinge - energy(pred, neg)).mean()
return pos_loss + neg_loss
def ce_loss(self, decisions, targets):
targets = targets.to(dtype=torch.float, device=decisions.device)
return torch.nn.functional.binary_cross_entropy_with_logits(
decisions, targets)
def autoencoder_loss(self, pix, latent, autoenc_loss_ratio):
"""
Runs a random portion of the actual frames through decoder to incur a
loss to encourage the intermediate representation to learn a good
autoencoder as well. Random fraction only for compute reasons.
Ideally would run every frame (ratio = 1)
Args:
pix (B, T, H, W): Actual pixels of the input frames
latent (B, T, Nobj, D, H', W'): Latent representation of the input
frames
autoenc_loss_ratio (float): What percentage of the input frames to
run it on. Only for compute reasons, ideally run it on all.
Returns:
loss {'autoenc': (1,) <float>} for the loss
"""
# Flatten the Batch and time dimension to get all the frames
pix_flat = torch.flatten(pix, 0, 1)
latent_flat = torch.flatten(latent, 0, 1)
# Select a subset of the frames to run the loss on
assert pix_flat.shape[0] == latent_flat.shape[0]
idx = np.arange(pix_flat.shape[0])
np.random.shuffle(idx)
sel_cnt = int(autoenc_loss_ratio * len(idx))
idx_sel = np.sort(idx[:sel_cnt])
pix_flat_sel = pix_flat[idx_sel, ...]
latent_flat_sel = latent_flat[idx_sel, ...]
# Generate the pixels for the latent, and incur loss
pred_flat_sel = combine_obj_pixels(self.dec(latent_flat_sel, None), 1)
loss = self.pix_loss(pred_flat_sel, pix_flat_sel).unsqueeze(0)
return {'autoenc_pix': loss}
def solved_or_not_loss(self, clip_preds_solved, vid_is_solved):
"""
Repeat the is_solved to as many times the batch was repeated to get
the class label at each forward prediction
Args:
clip_preds_solved (B',)
vid_is_solved (B,)
B and B' might be different but B' must be a multiple of B, since
it happens when num_slices > 1
Returns:
loss {'ce': (1,) <float>} for the loss
"""
assert clip_preds_solved.shape[0] % vid_is_solved.shape[0] == 0
return {
'ce':
self.ce_loss(
clip_preds_solved,
vid_is_solved.repeat((clip_preds_solved.shape[0] //
vid_is_solved.shape[0], ))).unsqueeze(0)
}
############################################################################
def _compute_losses(self, clip_pred, clip_pred_pix, vid_feat, vid,
n_hist_frames, n_fwd_times):
"""
Compute all losses possible.
"""
dummy_loss = torch.Tensor([-1]).to(clip_pred.device)
losses = {}
# NCE and pixel loss
# find the GT for each clip, note that all predictions may not have a GT
# since the last n_hist_frames for a video will make a prediction that
# goes out of the list of frames that were extracted for that video.
feat_preds = []
feat_gt = []
pix_preds = []
pix_gt = []
batch_size = vid_feat.shape[0]
gt_max_time = vid_feat.shape[1]
# Max slices that could have been made of the data, to use all of the
# training clip
max_slices_with_gt = gt_max_time - n_hist_frames - n_fwd_times + 1
num_slices = clip_pred.shape[0] // batch_size
for i in range(min(max_slices_with_gt, num_slices)):
corr_pred = clip_pred[i * batch_size:(i + 1) * batch_size, ...]
# Get the corresponding GT predictions for this pred
corr_gt = vid_feat[:, i + n_hist_frames + n_fwd_times - 1]
assert corr_gt.shape == corr_pred.shape
feat_preds.append(corr_pred)
feat_gt.append(corr_gt)
# Same thing for pix
if clip_pred_pix is not None:
corr_pix_pred = clip_pred_pix[i * vid_feat.shape[0]:(i + 1) *
vid_feat.shape[0], ...]
corr_pix_gt = vid[:, i + n_hist_frames + n_fwd_times - 1]
pix_preds.append(corr_pix_pred)
pix_gt.append(corr_pix_gt)
if len(feat_gt) > 0:
# Keep a batch dimension to the loss, since it will be run over
# multiple GPUs
feat_preds = torch.cat(feat_preds)
feat_gt = torch.cat(feat_gt)
losses['nce'] = self.nce_loss(feat_preds, feat_gt).unsqueeze(0)
losses['cswm'] = self.cswm_loss(feat_preds, feat_gt).unsqueeze(0)
else:
losses['nce'] = dummy_loss
losses['cswm'] = dummy_loss
# Reconstruction loss
if len(pix_gt) > 0:
losses['pix'] = self.pix_loss(torch.cat(pix_preds),
torch.cat(pix_gt)).unsqueeze(0)
else:
losses['pix'] = dummy_loss
return losses
def _cls(self, feat_hist, pix_hist, feat_preds, pix_preds):
"""
Wrapper around the classifier, collates all the input frames/features
and predicted future frames/features.
The images, features are already summed over the objects
Args:
feat_hist: (B, T, C, H', W')
pix_hist: (B, T, 7, H, W)
feat_preds [list of (B, C, H', W')] -- len = num predictions
pix_preds [list of (B, 7, H, W)] -- len = num predictions
The elements could be None, since not all models predict pixels
Returns:
(B,) predicted scores for the clips
"""
feats_combined = feat_hist
if feat_preds is not None and len(feat_preds) > 0:
feats_combined = torch.cat([feat_hist] +
[el.unsqueeze(1) for el in feat_preds],
dim=1)
pix_combined = pix_hist
if (pix_preds is not None and len(pix_preds) > 0
and pix_preds[0] is not None):
pix_combined = torch.cat([pix_combined] +
[el.unsqueeze(1) for el in pix_preds],
dim=1)
# Sum over objs -- we want the classifier model to see everything
# at the same time
# They are summed now, but need the dimension still
pix_combined = pix_combined.unsqueeze(2)
feats_combined = feats_combined.unsqueeze(2)
# If need to keep only a subset of the frames
if self.nframes_to_cls > 0:
pix_combined = pix_combined[:, :self.nframes_to_cls, ...]
feats_combined = feats_combined[:, :self.nframes_to_cls, ...]
feats_combined = self.spat_att(feats_combined)
# Keep the last prediction, as that should ideally be the best
# prediction of whether it was solved or not
# torch.max was hard to optimize through
return self.cls(feats_combined, pix_combined)[:, -1]
def forward(self,
vid,
vid_is_solved,
n_hist_frames=3,
n_fwd_times=1,
n_fwd_times_incur_loss=999999,
run_decode=False,
compute_losses=False,
need_intermediate=False,
autoenc_loss_ratio=0.0,
nslices=-1):
"""
Args:
vid: (BxTxNobjxHxW) The input video
vid_is_solved: (Bx1) Whether the video is solved in the end of not.
Could be None at test time.
n_hist_frames: (int) Number of frames to use as history for
prediction
n_fwd_times: (int) How many times to run the forward dynamics model
n_fwd_times_incur_loss (int): Upto how many of these forwards to
incur loss on.
run_decode: (bool) Decode the features into pixel output
compute_losses: Should be set at train time. Will compute losses,
whatever it can given the data (eg, if vid_is_solved is not
passed to the function, it will not compute the CE loss).
need_intermediate (bool): Set true if you want to run the dynamics
model and need all the intermediate results. Else, will return
a list with only 1 element, the final output.
autoenc_loss_ratio (float btw 0-1): Set to 1 to run auto-encoder
style loss on all frames when run_decode is set.
num_slices (int): See in the _slice_for_dyn fn
Returns:
clip_feat: BxTxD
"""
vid_preproc = self.preproc.preprocess_vid(vid)
obj_feat = self.enc(vid_preproc)
clip_hist = self._slice_for_dyn(obj_feat,
n_hist_frames,
nslices=nslices)
vid_hist = self._slice_for_dyn(vid_preproc,
n_hist_frames,
nslices=nslices)
assert clip_hist.shape[1] == n_hist_frames
clip_hist = self.interactor(clip_hist)
clip_preds, clip_preds_pix, clip_preds_addl_losses = self._forward_dyn(
clip_hist, vid_hist, n_fwd_times, need_intermediate)
if run_decode:
clip_preds_pix = self._forward_dec(clip_preds, clip_preds_pix)
else:
clip_preds_pix = [None] * len(clip_preds)
# Compute the solved or not, will only do for the ones asked for
clip_preds_solved = self._cls(
combine_obj_pixels(clip_hist, 2), combine_obj_pixels(vid_hist, 2),
[combine_obj_pixels(el, 1) for el in clip_preds],
[combine_obj_pixels(el, 1) for el in clip_preds_pix])
all_losses = []
clip_preds_pix_unpreproc_for_loss = [
self.preproc.unpreprocess_frame_for_loss(el)
for el in clip_preds_pix
]
if compute_losses:
for i in range(min(len(clip_preds), n_fwd_times_incur_loss)):
# Compute losses at each prediction step, if need_intermediate
# is set. Else, it will only return a single output
# (at the last prediction), and then we can only incur loss at
# that point.
if not need_intermediate:
assert len(clip_preds) == 1
pred_id = -1
# Only loss on predicting the final rolled out obs
this_fwd_times = n_fwd_times
else:
assert len(clip_preds) == n_fwd_times
pred_id = i
this_fwd_times = i + 1
all_losses.append(
self._compute_losses(
# For the loss, using only the last prediction (for now)
clip_preds[pred_id],
combine_obj_pixels(
clip_preds_pix_unpreproc_for_loss[pred_id], 1),
obj_feat,
combine_obj_pixels(vid, 2),
n_hist_frames,
this_fwd_times))
all_losses = average_losses(all_losses)
all_losses.update(average_losses(clip_preds_addl_losses))
all_losses.update(
self.solved_or_not_loss(clip_preds_solved, vid_is_solved))
# Add losses on the provided frames if requested
if run_decode and autoenc_loss_ratio > 0:
all_losses.update(
self.autoencoder_loss(combine_obj_pixels(vid, 2), obj_feat,
autoenc_loss_ratio))
clip_preds_pix_unpreproc = [
combine_obj_pixels(self.preproc.unpreprocess_frame_after_loss(el),
1) for el in clip_preds_pix_unpreproc_for_loss
]
all_preds = {
'feats': clip_preds,
'is_solved': clip_preds_solved,
'pixels': clip_preds_pix_unpreproc,
}
return all_preds, all_losses
|
[
"numpy.random.seed",
"torch.eye",
"torch.ByteTensor",
"torch.cat",
"torch.nn.InstanceNorm2d",
"torch.cos",
"numpy.arange",
"torch.nn.Softmax",
"torch.arange",
"torch.device",
"torch.nn.functional.normalize",
"torch.flatten",
"torch.nn.functional.grid_sample",
"logging.warning",
"torch.nn.Conv3d",
"torch.nn.TransformerEncoderLayer",
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.exp",
"torch.nn.Upsample",
"torch.Tensor",
"torch.nn.functional.adaptive_max_pool2d",
"torch.nn.Linear",
"numpy.random.shuffle",
"torchvision.models.resnet18",
"torch.mean",
"torch.nn.TransformerEncoder",
"functools.partial",
"torch.randn_like",
"torch.nn.ModuleList",
"torch.zeros_like",
"torch.nn.Conv2d",
"numpy.sort",
"torch.nn.BatchNorm2d",
"torch.cuda.is_available",
"torch.max",
"numpy.random.permutation",
"torch.unsqueeze",
"torch.reshape",
"torch.sum",
"torch.ones_like",
"torch.nn.ReLU",
"torch.nn.ConvTranspose2d",
"torch.stack",
"torch.nn.Sequential",
"hydra.utils.instantiate",
"torch.LongTensor",
"torch.nn.functional.linear",
"omegaconf.OmegaConf.create",
"numpy.array",
"torch.chunk",
"torch.sin"
] |
[((1022, 1047), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1045, 1047), False, 'import torch\n'), ((1057, 1102), 'torch.device', 'torch.device', (["('cuda:0' if USE_CUDA else 'cpu')"], {}), "('cuda:0' if USE_CUDA else 'cpu')\n", (1069, 1102), False, 'import torch\n'), ((1103, 1121), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1117, 1121), True, 'import numpy as np\n'), ((9710, 9784), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim_in', 'dim_out'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(dim_in, dim_out, kernel_size=1, stride=1, padding=0, bias=False)\n', (9719, 9784), True, 'import torch.nn as nn\n'), ((29393, 29414), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (29400, 29414), True, 'import torch.nn as nn\n'), ((1484, 1519), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (1493, 1519), True, 'import torch.nn as nn\n'), ((4587, 4653), 'torch.nn.functional.binary_cross_entropy_with_logits', 'nn.functional.binary_cross_entropy_with_logits', (['decisions', 'targets'], {}), '(decisions, targets)\n', (4633, 4653), True, 'import torch.nn as nn\n'), ((5166, 5211), 'torchvision.models.resnet18', 'torchvision.models.resnet18', ([], {'pretrained': '(False)'}), '(pretrained=False)\n', (5193, 5211), False, 'import torchvision\n'), ((5228, 5307), 'torch.nn.Conv2d', 'nn.Conv2d', (['phyre.NUM_COLORS', '(64)'], {'kernel_size': '(7)', 'stride': '(2)', 'padding': '(3)', 'bias': '(False)'}), '(phyre.NUM_COLORS, 64, kernel_size=7, stride=2, padding=3, bias=False)\n', (5237, 5307), True, 'import torch.nn as nn\n'), ((5533, 5585), 'torch.nn.Sequential', 'nn.Sequential', (['conv1', 'net.bn1', 'net.relu', 'net.maxpool'], {}), '(conv1, net.bn1, net.relu, net.maxpool)\n', (5546, 5585), True, 'import torch.nn as nn\n'), ((5608, 5671), 'torch.nn.ModuleList', 'nn.ModuleList', (['[net.layer1, net.layer2, net.layer3, net.layer4]'], {}), '([net.layer1, net.layer2, net.layer3, net.layer4])\n', (5621, 5671), True, 'import torch.nn as nn\n'), ((7024, 7041), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(1)'], {}), '(512, 1)\n', (7033, 7041), True, 'import torch.nn as nn\n'), ((8934, 9000), 'torch.nn.functional.binary_cross_entropy_with_logits', 'nn.functional.binary_cross_entropy_with_logits', (['decisions', 'targets'], {}), '(decisions, targets)\n', (8980, 9000), True, 'import torch.nn as nn\n'), ((9399, 9449), 'torch.eye', 'torch.eye', (['phyre.NUM_COLORS'], {'device': 'indices.device'}), '(phyre.NUM_COLORS, device=indices.device)\n', (9408, 9449), False, 'import torch\n'), ((10873, 10943), 'torch.reshape', 'torch.reshape', (['features', '((features.shape[0], -1) + features.shape[-2:])'], {}), '(features, (features.shape[0], -1) + features.shape[-2:])\n', (10886, 10943), False, 'import torch\n'), ((15038, 15106), 'torch.nn.functional.grid_sample', 'nn.functional.grid_sample', (['feat', 'grid'], {'mode': 'mode', 'align_corners': '(True)'}), '(feat, grid, mode=mode, align_corners=True)\n', (15063, 15106), True, 'import torch.nn as nn\n'), ((15607, 15640), 'torch.ones_like', 'torch.ones_like', (['feat[:, :1, ...]'], {}), '(feat[:, :1, ...])\n', (15622, 15640), False, 'import torch\n'), ((15664, 15707), 'torch.cat', 'torch.cat', (['[feat_pos, feat_pos_ones]'], {'dim': '(1)'}), '([feat_pos, feat_pos_ones], dim=1)\n', (15673, 15707), False, 'import torch\n'), ((16041, 16090), 'torch.cat', 'torch.cat', (['[tx_feat_pos, feat[:, 2:, ...]]'], {'dim': '(1)'}), '([tx_feat_pos, feat[:, 2:, ...]], dim=1)\n', (16050, 16090), False, 'import torch\n'), ((16348, 16371), 'torch.exp', 'torch.exp', (['(0.5 * logvar)'], {}), '(0.5 * logvar)\n', (16357, 16371), False, 'import torch\n'), ((16386, 16407), 'torch.randn_like', 'torch.randn_like', (['std'], {}), '(std)\n', (16402, 16407), False, 'import torch\n'), ((17024, 17065), 'torch.mean', 'torch.mean', (['feat_hist_embed'], {'dim': '[-2, -1]'}), '(feat_hist_embed, dim=[-2, -1])\n', (17034, 17065), False, 'import torch\n'), ((18670, 18717), 'torch.chunk', 'torch.chunk', (['feat_to_tx', 'self.num_tx', 'split_dim'], {}), '(feat_to_tx, self.num_tx, split_dim)\n', (18681, 18717), False, 'import torch\n'), ((19632, 19689), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['base_stn', '(dim * n * nobj)', 'num_tx'], {}), '(base_stn, dim * n * nobj, num_tx)\n', (19655, 19689), False, 'import hydra\n'), ((20073, 20143), 'torch.reshape', 'torch.reshape', (['features', '((features.shape[0], -1) + features.shape[-2:])'], {}), '(features, (features.shape[0], -1) + features.shape[-2:])\n', (20086, 20143), False, 'import torch\n'), ((20333, 20362), 'torch.flatten', 'torch.flatten', (['features', '(2)', '(3)'], {}), '(features, 2, 3)\n', (20346, 20362), False, 'import torch\n'), ((20463, 20527), 'torch.reshape', 'torch.reshape', (['new_feat', '(features.shape[:1] + features.shape[2:])'], {}), '(new_feat, features.shape[:1] + features.shape[2:])\n', (20476, 20527), False, 'import torch\n'), ((20909, 20966), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['base_stn', '(dim * n * nobj)', 'num_tx'], {}), '(base_stn, dim * n * nobj, num_tx)\n', (20932, 20966), False, 'import hydra\n'), ((21689, 21759), 'torch.reshape', 'torch.reshape', (['features', '((features.shape[0], -1) + features.shape[-2:])'], {}), '(features, (features.shape[0], -1) + features.shape[-2:])\n', (21702, 21759), False, 'import torch\n'), ((22311, 22354), 'torch.chunk', 'torch.chunk', (['attention_maps', 'self.num_tx', '(1)'], {}), '(attention_maps, self.num_tx, 1)\n', (22322, 22354), False, 'import torch\n'), ((22385, 22433), 'torch.chunk', 'torch.chunk', (['future_pixels_tiled', 'self.num_tx', '(1)'], {}), '(future_pixels_tiled, self.num_tx, 1)\n', (22396, 22433), False, 'import torch\n'), ((23223, 23285), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['base_stn', '(dim * n * nobj)', 'self.num_tx'], {}), '(base_stn, dim * n * nobj, self.num_tx)\n', (23246, 23285), False, 'import hydra\n'), ((23835, 23905), 'torch.reshape', 'torch.reshape', (['features', '((features.shape[0], -1) + features.shape[-2:])'], {}), '(features, (features.shape[0], -1) + features.shape[-2:])\n', (23848, 23905), False, 'import torch\n'), ((24907, 24969), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['base_stn', '(dim * n * nobj)', 'self.num_tx'], {}), '(base_stn, dim * n * nobj, self.num_tx)\n', (24930, 24969), False, 'import hydra\n'), ((25887, 25957), 'torch.reshape', 'torch.reshape', (['features', '((features.shape[0], -1) + features.shape[-2:])'], {}), '(features, (features.shape[0], -1) + features.shape[-2:])\n', (25900, 25957), False, 'import torch\n'), ((26272, 26327), 'torch.cat', 'torch.cat', (['[future_pixels_bg, future_pixels_obj]'], {'dim': '(1)'}), '([future_pixels_bg, future_pixels_obj], dim=1)\n', (26281, 26327), False, 'import torch\n'), ((27518, 27587), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['base_stn', '(dim * n * nobj)', '(self.num_tx * nobj)'], {}), '(base_stn, dim * n * nobj, self.num_tx * nobj)\n', (27541, 27587), False, 'import hydra\n'), ((27961, 28031), 'torch.reshape', 'torch.reshape', (['features', '((features.shape[0], -1) + features.shape[-2:])'], {}), '(features, (features.shape[0], -1) + features.shape[-2:])\n', (27974, 28031), False, 'import torch\n'), ((28222, 28257), 'torch.flatten', 'torch.flatten', (['pixels_movable', '(1)', '(2)'], {}), '(pixels_movable, 1, 2)\n', (28235, 28257), False, 'import torch\n'), ((29451, 29559), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['in_dim', 'out_dim'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'bias': '(False)'}), '(in_dim, out_dim, kernel_size=kernel_size, stride=stride,\n padding=padding, bias=False)\n', (29469, 29559), True, 'import torch.nn as nn\n'), ((29889, 29967), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'upsample_factor', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(scale_factor=upsample_factor, mode='bilinear', align_corners=True)\n", (29900, 29967), True, 'import torch.nn as nn\n'), ((30620, 30655), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['activation'], {}), '(activation)\n', (30643, 30655), False, 'import hydra\n'), ((30664, 30727), 'logging.warning', 'logging.warning', (['"""Using %s activation for decoders"""', 'activation'], {}), "('Using %s activation for decoders', activation)\n", (30679, 30727), False, 'import logging\n'), ((32532, 32579), 'torch.sum', 'torch.sum', (['decode_feature'], {'dim': '(1)', 'keepdims': '(True)'}), '(decode_feature, dim=1, keepdims=True)\n', (32541, 32579), False, 'import torch\n'), ((32611, 32646), 'torch.flatten', 'torch.flatten', (['decode_feature', '(0)', '(1)'], {}), '(decode_feature, 0, 1)\n', (32624, 32646), False, 'import torch\n'), ((32757, 32823), 'torch.reshape', 'torch.reshape', (['images', '(decode_feature.shape[:2] + images.shape[1:])'], {}), '(images, decode_feature.shape[:2] + images.shape[1:])\n', (32770, 32823), False, 'import torch\n'), ((36681, 36722), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['obj_encoder', 'dim'], {}), '(obj_encoder, dim)\n', (36704, 36722), False, 'import hydra\n'), ((37450, 37479), 'torch.flatten', 'torch.flatten', (['vid_feat', '(0)', '(1)'], {}), '(vid_feat, 0, 1)\n', (37463, 37479), False, 'import torch\n'), ((37890, 37958), 'torch.reshape', 'torch.reshape', (['mapped_feat', '((batch_size, -1) + mapped_feat.shape[1:])'], {}), '(mapped_feat, (batch_size, -1) + mapped_feat.shape[1:])\n', (37903, 37958), False, 'import torch\n'), ((38314, 38363), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['obj_encoder', '(dim // nobj)'], {}), '(obj_encoder, dim // nobj)\n', (38337, 38363), False, 'import hydra\n'), ((42136, 42171), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['base_model'], {}), '(base_model)\n', (42159, 42171), False, 'import hydra\n'), ((42188, 42257), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_dim', '(64)'], {'kernel_size': '(7)', 'stride': '(2)', 'padding': '(3)', 'bias': '(False)'}), '(in_dim, 64, kernel_size=7, stride=2, padding=3, bias=False)\n', (42197, 42257), True, 'import torch.nn as nn\n'), ((42408, 42460), 'torch.nn.Sequential', 'nn.Sequential', (['conv1', 'net.bn1', 'net.relu', 'net.maxpool'], {}), '(conv1, net.bn1, net.relu, net.maxpool)\n', (42421, 42460), True, 'import torch.nn as nn\n'), ((44009, 44104), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['feat_ext', '(in_dim * nobj if self.process_objs_together else in_dim)'], {}), '(feat_ext, in_dim * nobj if self.\n process_objs_together else in_dim)\n', (44032, 44104), False, 'import hydra\n'), ((44212, 44274), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['objectifier', 'initial_dim', 'obj_encoder'], {}), '(objectifier, initial_dim, obj_encoder)\n', (44235, 44274), False, 'import hydra\n'), ((47437, 47468), 'torch.max', 'torch.max', (['obj_pix'], {'dim': 'obj_dim'}), '(obj_pix, dim=obj_dim)\n', (47446, 47468), False, 'import torch\n'), ((49293, 49328), 'torch.mean', 'torch.mean', (['preds'], {'axis': '[2, -1, -2]'}), '(preds, axis=[2, -1, -2])\n', (49303, 49328), False, 'import torch\n'), ((52865, 52898), 'torch.nn.Linear', 'nn.Linear', (['self.tx_enc.out_dim', '(1)'], {}), '(self.tx_enc.out_dim, 1)\n', (52874, 52898), True, 'import torch.nn as nn\n'), ((54981, 55013), 'torch.nn.Sequential', 'nn.Sequential', (['*layers_flat[:-1]'], {}), '(*layers_flat[:-1])\n', (54994, 55013), True, 'import torch.nn as nn\n'), ((55035, 55055), 'torch.nn.Linear', 'nn.Linear', (['in_dim', '(1)'], {}), '(in_dim, 1)\n', (55044, 55055), True, 'import torch.nn as nn\n'), ((59109, 59142), 'torch.nn.Linear', 'nn.Linear', (['in_dim_loc', 'in_dim_loc'], {}), '(in_dim_loc, in_dim_loc)\n', (59118, 59142), True, 'import torch.nn as nn\n'), ((59159, 59205), 'torch.nn.TransformerEncoderLayer', 'nn.TransformerEncoderLayer', (['in_dim_loc', 'nheads'], {}), '(in_dim_loc, nheads)\n', (59185, 59205), True, 'import torch.nn as nn\n'), ((59229, 59266), 'torch.nn.TransformerEncoder', 'nn.TransformerEncoder', (['layer', 'nlayers'], {}), '(layer, nlayers)\n', (59250, 59266), True, 'import torch.nn as nn\n'), ((60036, 60076), 'torch.cat', 'torch.cat', (['[feat, loc_embedding]'], {'dim': '(-1)'}), '([feat, loc_embedding], dim=-1)\n', (60045, 60076), False, 'import torch\n'), ((61030, 61060), 'torch.mean', 'torch.mean', (['feat'], {'dim': '[-1, -2]'}), '(feat, dim=[-1, -2])\n', (61040, 61060), False, 'import torch\n'), ((62335, 62420), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['agent_cfg.encoder', 'self.preproc.out_dim', 'agent_cfg.nobj'], {}), '(agent_cfg.encoder, self.preproc.out_dim, agent_cfg.nobj\n )\n', (62358, 62420), False, 'import hydra\n'), ((62559, 62609), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['agent_cfg.interactor', 'dim'], {}), '(agent_cfg.interactor, dim)\n', (62582, 62609), False, 'import hydra\n'), ((62658, 62711), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['agent_cfg.dyn', 'self.enc', 'dim'], {}), '(agent_cfg.dyn, self.enc, dim)\n', (62681, 62711), False, 'import hydra\n'), ((62917, 62965), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['agent_cfg.spat_att', 'dim'], {}), '(agent_cfg.spat_att, dim)\n', (62940, 62965), False, 'import hydra\n'), ((62985, 63028), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['agent_cfg.cls', 'dim'], {}), '(agent_cfg.cls, dim)\n', (63008, 63028), False, 'import hydra\n'), ((63072, 63137), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['agent_cfg.decoder', 'dim', 'phyre.NUM_COLORS'], {}), '(agent_cfg.decoder, dim, phyre.NUM_COLORS)\n', (63095, 63137), False, 'import hydra\n'), ((63236, 63282), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['agent_cfg.loss_fn.pix'], {}), '(agent_cfg.loss_fn.pix)\n', (63259, 63282), False, 'import hydra\n'), ((63307, 63358), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['agent_cfg.loss_fn.nce', 'dim'], {}), '(agent_cfg.loss_fn.nce, dim)\n', (63330, 63358), False, 'import hydra\n'), ((66579, 66606), 'torch.cat', 'torch.cat', (['clip_hist'], {'dim': '(0)'}), '(clip_hist, dim=0)\n', (66588, 66606), False, 'import torch\n'), ((67555, 67588), 'numpy.random.permutation', 'np.random.permutation', (['batch_size'], {}), '(batch_size)\n', (67576, 67588), True, 'import numpy as np\n'), ((67995, 68021), 'torch.zeros_like', 'torch.zeros_like', (['pos_loss'], {}), '(pos_loss)\n', (68011, 68021), False, 'import torch\n'), ((68294, 68366), 'torch.nn.functional.binary_cross_entropy_with_logits', 'torch.nn.functional.binary_cross_entropy_with_logits', (['decisions', 'targets'], {}), '(decisions, targets)\n', (68346, 68366), False, 'import torch\n'), ((69239, 69263), 'torch.flatten', 'torch.flatten', (['pix', '(0)', '(1)'], {}), '(pix, 0, 1)\n', (69252, 69263), False, 'import torch\n'), ((69286, 69313), 'torch.flatten', 'torch.flatten', (['latent', '(0)', '(1)'], {}), '(latent, 0, 1)\n', (69299, 69313), False, 'import torch\n'), ((69444, 69472), 'numpy.arange', 'np.arange', (['pix_flat.shape[0]'], {}), '(pix_flat.shape[0])\n', (69453, 69472), True, 'import numpy as np\n'), ((69481, 69503), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (69498, 69503), True, 'import numpy as np\n'), ((69575, 69597), 'numpy.sort', 'np.sort', (['idx[:sel_cnt]'], {}), '(idx[:sel_cnt])\n', (69582, 69597), True, 'import numpy as np\n'), ((2314, 2371), 'torch.nn.Conv2d', 'nn.Conv2d', (['phyre.NUM_COLORS', '(3)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(phyre.NUM_COLORS, 3, kernel_size=1, bias=False)\n', (2323, 2371), True, 'import torch.nn as nn\n'), ((2385, 2402), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(3)'], {}), '(3)\n', (2399, 2402), True, 'import torch.nn as nn\n'), ((2416, 2437), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2423, 2437), True, 'import torch.nn as nn\n'), ((2451, 2515), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)'], {'kernel_size': '(7)', 'stride': '(4)', 'padding': '(3)', 'bias': '(False)'}), '(3, 64, kernel_size=7, stride=4, padding=3, bias=False)\n', (2460, 2515), True, 'import torch.nn as nn\n'), ((2529, 2547), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (2543, 2547), True, 'import torch.nn as nn\n'), ((2561, 2582), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2568, 2582), True, 'import torch.nn as nn\n'), ((2596, 2661), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': '(False)'}), '(64, 64, kernel_size=5, stride=2, padding=2, bias=False)\n', (2605, 2661), True, 'import torch.nn as nn\n'), ((2675, 2693), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (2689, 2693), True, 'import torch.nn as nn\n'), ((2707, 2728), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2714, 2728), True, 'import torch.nn as nn\n'), ((2742, 2807), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': '(False)'}), '(64, 64, kernel_size=5, stride=2, padding=2, bias=False)\n', (2751, 2807), True, 'import torch.nn as nn\n'), ((2821, 2839), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (2835, 2839), True, 'import torch.nn as nn\n'), ((2853, 2874), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2860, 2874), True, 'import torch.nn as nn\n'), ((2888, 2953), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': '(False)'}), '(64, 64, kernel_size=5, stride=2, padding=2, bias=False)\n', (2897, 2953), True, 'import torch.nn as nn\n'), ((2967, 2985), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (2981, 2985), True, 'import torch.nn as nn\n'), ((2999, 3020), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3006, 3020), True, 'import torch.nn as nn\n'), ((3034, 3100), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': '(False)'}), '(64, 128, kernel_size=5, stride=2, padding=2, bias=False)\n', (3043, 3100), True, 'import torch.nn as nn\n'), ((3114, 3133), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (3128, 3133), True, 'import torch.nn as nn\n'), ((3147, 3168), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3154, 3168), True, 'import torch.nn as nn\n'), ((3182, 3249), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': '(False)'}), '(128, 128, kernel_size=5, stride=2, padding=2, bias=False)\n', (3191, 3249), True, 'import torch.nn as nn\n'), ((3285, 3304), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (3299, 3304), True, 'import torch.nn as nn\n'), ((3318, 3339), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3325, 3339), True, 'import torch.nn as nn\n'), ((3353, 3420), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': '(False)'}), '(128, 128, kernel_size=5, stride=2, padding=2, bias=False)\n', (3362, 3420), True, 'import torch.nn as nn\n'), ((3456, 3475), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (3470, 3475), True, 'import torch.nn as nn\n'), ((3489, 3510), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3496, 3510), True, 'import torch.nn as nn\n'), ((5484, 5511), 'torch.eye', 'torch.eye', (['phyre.NUM_COLORS'], {}), '(phyre.NUM_COLORS)\n', (5493, 5511), False, 'import torch\n'), ((7564, 7610), 'torch.nn.functional.adaptive_max_pool2d', 'nn.functional.adaptive_max_pool2d', (['features', '(1)'], {}), '(features, 1)\n', (7597, 7610), True, 'import torch.nn as nn\n'), ((8329, 8375), 'torch.nn.functional.adaptive_max_pool2d', 'nn.functional.adaptive_max_pool2d', (['features', '(1)'], {}), '(features, 1)\n', (8362, 8375), True, 'import torch.nn as nn\n'), ((10242, 10263), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (10249, 10263), True, 'import torch.nn as nn\n'), ((10369, 10390), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (10376, 10390), True, 'import torch.nn as nn\n'), ((13346, 13434), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_dim', '(8 * num_tx)'], {'kernel_size': 'kernel_size', 'padding': '(kernel_size // 2)'}), '(input_dim, 8 * num_tx, kernel_size=kernel_size, padding=\n kernel_size // 2)\n', (13355, 13434), True, 'import torch.nn as nn\n'), ((13497, 13510), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (13504, 13510), True, 'import torch.nn as nn\n'), ((13524, 13614), 'torch.nn.Conv2d', 'nn.Conv2d', (['(8 * num_tx)', '(10 * num_tx)'], {'kernel_size': 'kernel_size', 'padding': '(kernel_size // 2)'}), '(8 * num_tx, 10 * num_tx, kernel_size=kernel_size, padding=\n kernel_size // 2)\n', (13533, 13614), True, 'import torch.nn as nn\n'), ((13677, 13690), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (13684, 13690), True, 'import torch.nn as nn\n'), ((14011, 14046), 'torch.nn.Linear', 'nn.Linear', (['(10 * num_tx)', '(10 * num_tx)'], {}), '(10 * num_tx, 10 * num_tx)\n', (14020, 14046), True, 'import torch.nn as nn\n'), ((14080, 14115), 'torch.nn.Linear', 'nn.Linear', (['(10 * num_tx)', '(10 * num_tx)'], {}), '(10 * num_tx, 10 * num_tx)\n', (14089, 14115), True, 'import torch.nn as nn\n'), ((14152, 14187), 'torch.nn.Linear', 'nn.Linear', (['(10 * num_tx)', '(32 * num_tx)'], {}), '(10 * num_tx, 32 * num_tx)\n', (14161, 14187), True, 'import torch.nn as nn\n'), ((14225, 14238), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (14232, 14238), True, 'import torch.nn as nn\n'), ((14276, 14314), 'torch.nn.Linear', 'nn.Linear', (['(32 * num_tx)', '(num_tx * 3 * 2)'], {}), '(32 * num_tx, num_tx * 3 * 2)\n', (14285, 14314), True, 'import torch.nn as nn\n'), ((17902, 17918), 'torch.cos', 'torch.cos', (['angle'], {}), '(angle)\n', (17911, 17918), False, 'import torch\n'), ((17943, 17959), 'torch.sin', 'torch.sin', (['angle'], {}), '(angle)\n', (17952, 17959), False, 'import torch\n'), ((18028, 18106), 'torch.cat', 'torch.cat', (['[cos_angle, sin_angle, pos_x, -sin_angle, cos_angle, pos_y]'], {'dim': '(-1)'}), '([cos_angle, sin_angle, pos_x, -sin_angle, cos_angle, pos_y], dim=-1)\n', (18037, 18106), False, 'import torch\n'), ((18836, 18889), 'functools.partial', 'partial', (['self.transform_pix'], {'mode': 'self.affine_tx_mode'}), '(self.transform_pix, mode=self.affine_tx_mode)\n', (18843, 18889), False, 'from functools import partial\n'), ((19210, 19257), 'torch.cat', 'torch.cat', (['feat_to_tx_parts_txed'], {'dim': 'split_dim'}), '(feat_to_tx_parts_txed, dim=split_dim)\n', (19219, 19257), False, 'import torch\n'), ((21217, 21280), 'torch.nn.Conv2d', 'nn.Conv2d', (['num_tx', 'num_tx'], {'kernel_size': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(num_tx, num_tx, kernel_size=1, padding=0, bias=False)\n', (21226, 21280), True, 'import torch.nn as nn\n'), ((21294, 21311), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (21304, 21311), True, 'import torch.nn as nn\n'), ((22597, 22618), 'torch.stack', 'torch.stack', (['weighted'], {}), '(weighted)\n', (22608, 22618), False, 'import torch\n'), ((25161, 25251), 'torch.nn.Conv2d', 'nn.Conv2d', (['(2 * phyre.NUM_COLORS - 1)', '(8)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(2 * phyre.NUM_COLORS - 1, 8, kernel_size=1, stride=1, padding=0,\n bias=False)\n', (25170, 25251), True, 'import torch.nn as nn\n'), ((25359, 25380), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (25366, 25380), True, 'import torch.nn as nn\n'), ((25394, 25457), 'torch.nn.Conv2d', 'nn.Conv2d', (['(8)', '(1)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(8, 1, kernel_size=1, stride=1, padding=0, bias=False)\n', (25403, 25457), True, 'import torch.nn as nn\n'), ((26189, 26246), 'torch.cat', 'torch.cat', (['[pixels[:, -1, ...], future_pixels_obj]'], {'dim': '(1)'}), '([pixels[:, -1, ...], future_pixels_obj], dim=1)\n', (26198, 26246), False, 'import torch\n'), ((27312, 27340), 'torch.LongTensor', 'torch.LongTensor', (['movable_ch'], {}), '(movable_ch)\n', (27328, 27340), False, 'import torch\n'), ((27391, 27424), 'torch.arange', 'torch.arange', (['(1)', 'phyre.NUM_COLORS'], {}), '(1, phyre.NUM_COLORS)\n', (27403, 27424), False, 'import torch\n'), ((28784, 28845), 'torch.sum', 'torch.sum', (['future_pixels[:, :, 1:, ...]'], {'dim': '(2)', 'keepdims': '(True)'}), '(future_pixels[:, :, 1:, ...], dim=2, keepdims=True)\n', (28793, 28845), False, 'import torch\n'), ((29778, 29817), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['out_dim'], {'affine': '(True)'}), '(out_dim, affine=True)\n', (29795, 29817), True, 'import torch.nn as nn\n'), ((29844, 29859), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (29857, 29859), True, 'import torch.nn as nn\n'), ((34045, 34088), 'torch.stack', 'torch.stack', (['[el[key] for el in all_losses]'], {}), '([el[key] for el in all_losses])\n', (34056, 34088), False, 'import torch\n'), ((34258, 34295), 'torch.mean', 'torch.mean', (['(stkd * (stkd >= 0))'], {'dim': '(0)'}), '(stkd * (stkd >= 0), dim=0)\n', (34268, 34295), False, 'import torch\n'), ((34765, 34833), 'logging.warning', 'logging.warning', (['"""Ignoring the out_dim (%d) for ObjEncoder"""', 'out_dim'], {}), "('Ignoring the out_dim (%d) for ObjEncoder', out_dim)\n", (34780, 34833), False, 'import logging\n'), ((35449, 35480), 'torch.nn.Sequential', 'nn.Sequential', (['*layers_lst_flat'], {}), '(*layers_lst_flat)\n', (35462, 35480), True, 'import torch.nn as nn\n'), ((35727, 35752), 'torch.flatten', 'torch.flatten', (['feat', '(0)', '(2)'], {}), '(feat, 0, 2)\n', (35740, 35752), False, 'import torch\n'), ((35830, 35902), 'torch.reshape', 'torch.reshape', (['obj_embed_flat', '(feat.shape[:3] + obj_embed_flat.shape[1:])'], {}), '(obj_embed_flat, feat.shape[:3] + obj_embed_flat.shape[1:])\n', (35843, 35902), False, 'import torch\n'), ((36017, 36067), 'torch.mean', 'torch.mean', (['obj_embed'], {'dim': '[-1, -2]', 'keepdims': '(True)'}), '(obj_embed, dim=[-1, -2], keepdims=True)\n', (36027, 36067), False, 'import torch\n'), ((36348, 36415), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim', 'dim'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(dim, dim, kernel_size=1, stride=1, padding=0, bias=False)\n', (36357, 36415), True, 'import torch.nn as nn\n'), ((36439, 36460), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (36446, 36460), True, 'import torch.nn as nn\n'), ((36474, 36542), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim', 'nobj'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(dim, nobj, kernel_size=1, stride=1, padding=0, bias=False)\n', (36483, 36542), True, 'import torch.nn as nn\n'), ((40250, 40297), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_dim', '(3)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(in_dim, 3, kernel_size=1, bias=False)\n', (40259, 40297), True, 'import torch.nn as nn\n'), ((40311, 40328), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(3)'], {}), '(3)\n', (40325, 40328), True, 'import torch.nn as nn\n'), ((40342, 40363), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (40349, 40363), True, 'import torch.nn as nn\n'), ((40605, 40626), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (40612, 40626), True, 'import torch.nn as nn\n'), ((40873, 40894), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (40880, 40894), True, 'import torch.nn as nn\n'), ((41141, 41162), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (41148, 41162), True, 'import torch.nn as nn\n'), ((41409, 41430), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (41416, 41430), True, 'import torch.nn as nn\n'), ((41679, 41700), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (41686, 41700), True, 'import torch.nn as nn\n'), ((45459, 45488), 'torch.flatten', 'torch.flatten', (['flat_obs', '(1)', '(2)'], {}), '(flat_obs, 1, 2)\n', (45472, 45488), False, 'import torch\n'), ((45874, 45923), 'torch.mean', 'torch.mean', (['features'], {'dim': '[-2, -1]', 'keepdims': '(True)'}), '(features, dim=[-2, -1], keepdims=True)\n', (45884, 45923), False, 'import torch\n'), ((46050, 46096), 'torch.nn.functional.normalize', 'nn.functional.normalize', (['features'], {'p': '(2)', 'dim': '(-1)'}), '(features, p=2, dim=-1)\n', (46073, 46096), True, 'import torch.nn as nn\n'), ((49768, 49820), 'torch.nn.functional.linear', 'nn.functional.linear', (['mean_feat', 'self.init_linear_wt'], {}), '(mean_feat, self.init_linear_wt)\n', (49788, 49820), True, 'import torch.nn as nn\n'), ((50259, 50451), 'omegaconf.OmegaConf.create', 'OmegaConf.create', (["{'class': 'nets.ResNetBaseEncoder', 'params': {'base_model': {'class':\n 'torchvision.models.resnet18', 'params': {'pretrained': False}},\n 'nlayers': num_conv_blocks}}"], {}), "({'class': 'nets.ResNetBaseEncoder', 'params': {\n 'base_model': {'class': 'torchvision.models.resnet18', 'params': {\n 'pretrained': False}}, 'nlayers': num_conv_blocks}})\n", (50275, 50451), False, 'from omegaconf import OmegaConf\n'), ((50685, 50770), 'omegaconf.OmegaConf.create', 'OmegaConf.create', (["{'class': 'nets.TrivialObjectifier', 'params': {'nobj': nobj}}"], {}), "({'class': 'nets.TrivialObjectifier', 'params': {'nobj': nobj}}\n )\n", (50701, 50770), False, 'from omegaconf import OmegaConf\n'), ((50887, 51005), 'omegaconf.OmegaConf.create', 'OmegaConf.create', (["{'class': 'nets.BasicObjEncoder', 'params': {'out_dim': 16, 'nlayers': 0,\n 'spatial_mean': True}}"], {}), "({'class': 'nets.BasicObjEncoder', 'params': {'out_dim': 16,\n 'nlayers': 0, 'spatial_mean': True}})\n", (50903, 51005), False, 'from omegaconf import OmegaConf\n'), ((53249, 53281), 'torch.mean', 'torch.mean', (['preds'], {'axis': '[-1, -2]'}), '(preds, axis=[-1, -2])\n', (53259, 53281), False, 'import torch\n'), ((55396, 55431), 'torch.mean', 'torch.mean', (['enc_preds', '[-1, -2, -3]'], {}), '(enc_preds, [-1, -2, -3])\n', (55406, 55431), False, 'import torch\n'), ((58887, 58902), 'torch.nn.Linear', 'nn.Linear', (['(1)', '(4)'], {}), '(1, 4)\n', (58896, 58902), True, 'import torch.nn as nn\n'), ((58904, 58925), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (58911, 58925), True, 'import torch.nn as nn\n'), ((58966, 58981), 'torch.nn.Linear', 'nn.Linear', (['(4)', '(8)'], {}), '(4, 8)\n', (58975, 58981), True, 'import torch.nn as nn\n'), ((59328, 59357), 'torch.nn.Linear', 'nn.Linear', (['in_dim_loc', 'in_dim'], {}), '(in_dim_loc, in_dim)\n', (59337, 59357), True, 'import torch.nn as nn\n'), ((61782, 61807), 'torch.flatten', 'torch.flatten', (['feat', '(0)', '(2)'], {}), '(feat, 0, 2)\n', (61795, 61807), False, 'import torch\n'), ((72724, 72745), 'torch.cat', 'torch.cat', (['feat_preds'], {}), '(feat_preds)\n', (72733, 72745), False, 'import torch\n'), ((72768, 72786), 'torch.cat', 'torch.cat', (['feat_gt'], {}), '(feat_gt)\n', (72777, 72786), False, 'import torch\n'), ((1317, 1352), 'torch.nn.Linear', 'nn.Linear', (['action_size', 'hidden_size'], {}), '(action_size, hidden_size)\n', (1326, 1352), True, 'import torch.nn as nn\n'), ((1425, 1460), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (1434, 1460), True, 'import torch.nn as nn\n'), ((17620, 17643), 'torch.zeros_like', 'torch.zeros_like', (['pos_x'], {}), '(pos_x)\n', (17636, 17643), False, 'import torch\n'), ((17668, 17691), 'torch.zeros_like', 'torch.zeros_like', (['pos_y'], {}), '(pos_y)\n', (17684, 17691), False, 'import torch\n'), ((34932, 35054), 'torch.nn.Conv2d', 'nn.Conv2d', (['(in_dim if i == 0 else out_dim)', 'out_dim'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'bias': '(False)'}), '(in_dim if i == 0 else out_dim, out_dim, kernel_size=kernel_size,\n stride=stride, padding=padding, bias=False)\n', (34941, 35054), True, 'import torch.nn as nn\n'), ((35174, 35195), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (35181, 35195), True, 'import torch.nn as nn\n'), ((48084, 48109), 'torch.nn.Linear', 'nn.Linear', (['in_dim', 'in_dim'], {}), '(in_dim, in_dim)\n', (48093, 48109), True, 'import torch.nn as nn\n'), ((48130, 48151), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (48137, 48151), True, 'import torch.nn as nn\n'), ((49425, 49559), 'logging.warning', 'logging.warning', (['"""Creating a linear layer to map the input dims (%d) to MLP input dim (%d)"""', 'mean_feat.shape[-1]', 'self.in_dim'], {}), "(\n 'Creating a linear layer to map the input dims (%d) to MLP input dim (%d)',\n mean_feat.shape[-1], self.in_dim)\n", (49440, 49559), False, 'import logging\n'), ((49845, 49866), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (49852, 49866), True, 'import torch.nn as nn\n'), ((54749, 54810), 'torch.nn.Conv3d', 'nn.Conv3d', (['in_dim', 'in_dim', '(3)'], {'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(in_dim, in_dim, 3, stride=2, padding=1, bias=False)\n', (54758, 54810), True, 'import torch.nn as nn\n'), ((54824, 54845), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (54831, 54845), True, 'import torch.nn as nn\n'), ((71052, 71070), 'torch.Tensor', 'torch.Tensor', (['[-1]'], {}), '([-1])\n', (71064, 71070), False, 'import torch\n'), ((14694, 14747), 'numpy.array', 'np.array', (['([1, 0, 0, 0, 1, 0] * num_tx)'], {'dtype': 'np.float'}), '([1, 0, 0, 0, 1, 0] * num_tx, dtype=np.float)\n', (14702, 14747), True, 'import numpy as np\n'), ((17759, 17782), 'torch.zeros_like', 'torch.zeros_like', (['angle'], {}), '(angle)\n', (17775, 17782), False, 'import torch\n'), ((31588, 31603), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (31601, 31603), True, 'import torch.nn as nn\n'), ((53589, 53635), 'torch.max', 'torch.max', (['feat_enc_time'], {'dim': '(1)', 'keepdims': '(True)'}), '(feat_enc_time, dim=1, keepdims=True)\n', (53598, 53635), False, 'import torch\n'), ((64881, 64919), 'torch.unsqueeze', 'torch.unsqueeze', (['all_preds[-1]'], {'axis': '(1)'}), '(all_preds[-1], axis=1)\n', (64896, 64919), False, 'import torch\n'), ((65027, 65064), 'torch.unsqueeze', 'torch.unsqueeze', (['all_pixs[-1]'], {'axis': '(1)'}), '(all_pixs[-1], axis=1)\n', (65042, 65064), False, 'import torch\n'), ((4517, 4542), 'torch.ByteTensor', 'torch.ByteTensor', (['targets'], {}), '(targets)\n', (4533, 4542), False, 'import torch\n'), ((48308, 48328), 'torch.nn.Linear', 'nn.Linear', (['in_dim', '(1)'], {}), '(in_dim, 1)\n', (48317, 48328), True, 'import torch.nn as nn\n'), ((73135, 73155), 'torch.cat', 'torch.cat', (['pix_preds'], {}), '(pix_preds)\n', (73144, 73155), False, 'import torch\n'), ((73199, 73216), 'torch.cat', 'torch.cat', (['pix_gt'], {}), '(pix_gt)\n', (73208, 73216), False, 'import torch\n'), ((59778, 59825), 'torch.arange', 'torch.arange', (['feat.shape[1]'], {'device': 'feat.device'}), '(feat.shape[1], device=feat.device)\n', (59790, 59825), False, 'import torch\n')]
|
#!python3
# -*- coding:utf-8 -*-
# 『Pythonで始めるOpenCV4プログラミング』
# 北山尚洋
import cv2
import sys, traceback
import numpy as np
def add(imgName1, imgName2):
try:
img1 = cv2.imread(imgName1)
img2 = cv2.imread(imgName2)
if img1 is None or img2 is None:
print("no file reading...")
sys.exit(1)
#caution!
#src file size have to same size each img1 and img2. and same type.
img1 = cv2.resize(img1, (500,500))
img2 = cv2.resize(img2, (500,500))
cv2.imshow('image1', img1)
cv2.imshow('image2', img2)
dst = cv2.add(img1, img2)
cv2.imshow('synthesize', dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
except Exception as ex:
print("Error:", sys.exc_info()[0])
print(sys.exc_info()[1])
print(traceback.format_tb(sys.exc_info()[2]))
finally:
pass
def addScolor(imgName1):
try:
img1 = cv2.imread(imgName1)
if img1 is None:
print("no file reading...")
sys.exit(1)
cv2.imshow("img1",img1)
height = img1.shape[0]
width = img1.shape[1]
blue = np.zeros((height, width, 3), np.uint8)
blue[:,:] = [128, 0, 0]
dst = cv2.add(img1, blue)
cv2.imshow("after", dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
except Exception as ex:
print("Error:", sys.exc_info()[0])
print(sys.exc_info()[1])
print(traceback.format_tb(sys.exc_info()[2]))
finally:
pass
def addMask(imgName1, imgName2):
try:
img1 = cv2.imread(imgName1)
img2 = cv2.imread(imgName2)
if img1 is None or img2 is None:
print("no file reading...")
sys.exit(1)
#caution!
#src file size have to same size each img1 and img2. and same type.
img1 = cv2.resize(img1, (500,500))
img2 = cv2.resize(img2, (500,500))
cv2.imshow('image1', img1)
cv2.imshow('image2', img2)
#create mask
height = img1.shape[0]
width = img1.shape[1]
img_mask = np.zeros((height, width), np.uint8)
img_mask[ height//4:height*3//4, width//4:width*3//4 ] = [255]
#add two image with mask.
dst = cv2.add(img1, img2, mask = img_mask)
cv2.imshow('dst1', dst)
#add two image with mask.
dst = cv2.add(img1, img2, mask = img_mask)
cv2.imshow('dst1', dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
except Exception as ex:
print("Error:", sys.exc_info()[0])
print(sys.exc_info()[1])
print(traceback.format_tb(sys.exc_info()[2]))
finally:
pass
#add(sys.argv[1], sys.argv[2])
#addScolor(sys.argv[1])
addMask(sys.argv[1], sys.argv[2])
|
[
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.zeros",
"cv2.imread",
"sys.exc_info",
"sys.exit",
"cv2.imshow",
"cv2.add",
"cv2.resize"
] |
[((177, 197), 'cv2.imread', 'cv2.imread', (['imgName1'], {}), '(imgName1)\n', (187, 197), False, 'import cv2\n'), ((213, 233), 'cv2.imread', 'cv2.imread', (['imgName2'], {}), '(imgName2)\n', (223, 233), False, 'import cv2\n'), ((466, 494), 'cv2.resize', 'cv2.resize', (['img1', '(500, 500)'], {}), '(img1, (500, 500))\n', (476, 494), False, 'import cv2\n'), ((509, 537), 'cv2.resize', 'cv2.resize', (['img2', '(500, 500)'], {}), '(img2, (500, 500))\n', (519, 537), False, 'import cv2\n'), ((554, 580), 'cv2.imshow', 'cv2.imshow', (['"""image1"""', 'img1'], {}), "('image1', img1)\n", (564, 580), False, 'import cv2\n'), ((589, 615), 'cv2.imshow', 'cv2.imshow', (['"""image2"""', 'img2'], {}), "('image2', img2)\n", (599, 615), False, 'import cv2\n'), ((639, 658), 'cv2.add', 'cv2.add', (['img1', 'img2'], {}), '(img1, img2)\n', (646, 658), False, 'import cv2\n'), ((667, 696), 'cv2.imshow', 'cv2.imshow', (['"""synthesize"""', 'dst'], {}), "('synthesize', dst)\n", (677, 696), False, 'import cv2\n'), ((714, 728), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (725, 728), False, 'import cv2\n'), ((737, 760), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (758, 760), False, 'import cv2\n'), ((1009, 1029), 'cv2.imread', 'cv2.imread', (['imgName1'], {}), '(imgName1)\n', (1019, 1029), False, 'import cv2\n'), ((1149, 1173), 'cv2.imshow', 'cv2.imshow', (['"""img1"""', 'img1'], {}), "('img1', img1)\n", (1159, 1173), False, 'import cv2\n'), ((1258, 1296), 'numpy.zeros', 'np.zeros', (['(height, width, 3)', 'np.uint8'], {}), '((height, width, 3), np.uint8)\n', (1266, 1296), True, 'import numpy as np\n'), ((1352, 1371), 'cv2.add', 'cv2.add', (['img1', 'blue'], {}), '(img1, blue)\n', (1359, 1371), False, 'import cv2\n'), ((1380, 1404), 'cv2.imshow', 'cv2.imshow', (['"""after"""', 'dst'], {}), "('after', dst)\n", (1390, 1404), False, 'import cv2\n'), ((1422, 1436), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1433, 1436), False, 'import cv2\n'), ((1445, 1468), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1466, 1468), False, 'import cv2\n'), ((1720, 1740), 'cv2.imread', 'cv2.imread', (['imgName1'], {}), '(imgName1)\n', (1730, 1740), False, 'import cv2\n'), ((1756, 1776), 'cv2.imread', 'cv2.imread', (['imgName2'], {}), '(imgName2)\n', (1766, 1776), False, 'import cv2\n'), ((2009, 2037), 'cv2.resize', 'cv2.resize', (['img1', '(500, 500)'], {}), '(img1, (500, 500))\n', (2019, 2037), False, 'import cv2\n'), ((2052, 2080), 'cv2.resize', 'cv2.resize', (['img2', '(500, 500)'], {}), '(img2, (500, 500))\n', (2062, 2080), False, 'import cv2\n'), ((2097, 2123), 'cv2.imshow', 'cv2.imshow', (['"""image1"""', 'img1'], {}), "('image1', img1)\n", (2107, 2123), False, 'import cv2\n'), ((2132, 2158), 'cv2.imshow', 'cv2.imshow', (['"""image2"""', 'img2'], {}), "('image2', img2)\n", (2142, 2158), False, 'import cv2\n'), ((2269, 2304), 'numpy.zeros', 'np.zeros', (['(height, width)', 'np.uint8'], {}), '((height, width), np.uint8)\n', (2277, 2304), True, 'import numpy as np\n'), ((2433, 2467), 'cv2.add', 'cv2.add', (['img1', 'img2'], {'mask': 'img_mask'}), '(img1, img2, mask=img_mask)\n', (2440, 2467), False, 'import cv2\n'), ((2478, 2501), 'cv2.imshow', 'cv2.imshow', (['"""dst1"""', 'dst'], {}), "('dst1', dst)\n", (2488, 2501), False, 'import cv2\n'), ((2559, 2593), 'cv2.add', 'cv2.add', (['img1', 'img2'], {'mask': 'img_mask'}), '(img1, img2, mask=img_mask)\n', (2566, 2593), False, 'import cv2\n'), ((2604, 2627), 'cv2.imshow', 'cv2.imshow', (['"""dst1"""', 'dst'], {}), "('dst1', dst)\n", (2614, 2627), False, 'import cv2\n'), ((2653, 2667), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2664, 2667), False, 'import cv2\n'), ((2676, 2699), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2697, 2699), False, 'import cv2\n'), ((336, 347), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (344, 347), False, 'import sys, traceback\n'), ((1116, 1127), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1124, 1127), False, 'import sys, traceback\n'), ((1879, 1890), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1887, 1890), False, 'import sys, traceback\n'), ((822, 836), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (834, 836), False, 'import sys, traceback\n'), ((855, 869), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (867, 869), False, 'import sys, traceback\n'), ((1530, 1544), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1542, 1544), False, 'import sys, traceback\n'), ((1563, 1577), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1575, 1577), False, 'import sys, traceback\n'), ((2761, 2775), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2773, 2775), False, 'import sys, traceback\n'), ((2794, 2808), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2806, 2808), False, 'import sys, traceback\n'), ((908, 922), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (920, 922), False, 'import sys, traceback\n'), ((1616, 1630), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1628, 1630), False, 'import sys, traceback\n'), ((2847, 2861), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2859, 2861), False, 'import sys, traceback\n')]
|
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms, datasets, models
from collections import OrderedDict
from PIL import Image
import matplotlib.pyplot as plt
import json
import argparse
def load_checkpoint(checkpoint_path, model):
checkpoint = torch.load(checkpoint_path)
if(model == "vgg"):
nhu = checkpoint['nhu']
model = models.vgg11(pretrained=True)
for param in model.parameters():
param.requires_grad = False
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(25088, nhu)),
('relu', nn.ReLU()),
('fc2', nn.Linear(nhu, 102)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
model.load_state_dict(checkpoint['state_dict'])
model.class_to_idx = checkpoint['class_to_idx']
elif(model == "densenet"):
nhu = checkpoint['nhu']
model = models.densenet121(pretrained=True)
for param in model.parameters():
param.requires_grad = False
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(1024, nhu)),
('relu', nn.ReLU()),
('fc2', nn.Linear(nhu, 102)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
model.load_state_dict(checkpoint['state_dict'])
model.class_to_idx = checkpoint['class_to_idx']
return model
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
pil_image = Image.open(image)
pil_image = pil_image.resize((256, 256))
width, height = pil_image.size # Get dimensions
left = (width - 224)/2
top = (height - 224)/2
right = (width + 224)/2
bottom = (height + 224)/2
pil_image = pil_image.crop((left, top, right, bottom))
pil_image = pil_image.convert('RGB')
np_image = np.array(pil_image)/255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_image = (np_image-mean)/std
np_image = np_image.transpose((2, 0, 1))
return torch.from_numpy(np_image)
def predict(image_path, model, device="cpu", topk=5):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
print(device)
output_image = process_image(image_path).to(device)
image = torch.zeros([64, 3, 224, 224], dtype=torch.float64).to(device)
image += output_image.to(device)
model.to(device)
model.eval()
torch.no_grad()
logps = model.forward(image.float())
ps = torch.exp(logps)
probability, index = torch.topk(ps, topk, dim=1)
return probability.to(device), index.to(device)
def get_input_args():
"""
Retrieves and parses the 3 command line arguments provided by the user when
they run the program from a terminal window. This function uses Python's
argparse module to created and defined these 3 command line arguments. If
the user fails to provide some or all of the 3 arguments, then the default
values are used for the missing arguments.
Command Line Arguments:
1. Image Folder as --dir with default value 'flowers'
2. CNN Model Architecture as --arch with default value 'vgg'
3. GPU as --GPU with default value 'cpu'
This function returns these arguments as an ArgumentParser object.
Parameters:
None - simply using argparse module to create & store command line arguments
Returns:
parse_args() -data structure that stores the command line arguments object
"""
# Replace None with parser.parse_args() parsed argument collection that
# you created with this function
# Creates Argument Parser object named parser
parser = argparse.ArgumentParser()
# Argument 1: that's a path to a folder
parser.add_argument('input', type=str,
help='path to the image')
parser.add_argument('ckpdir', type=str,
help='path to the folder of check point')
parser.add_argument('--arch', type=str, default='vgg',
help='The Network architecture')
parser.add_argument('--gpu', type=bool, default=False,
help='gpu enable')
parser.add_argument('--topk', type=float, default=5,
help='topk')
parser.add_argument('--category_names', type=str, default='cat_to_name.json',
help='directory of jason file')
# Assigns variable in_args to parse_args()
in_args = parser.parse_args()
return in_args
|
[
"torch.topk",
"argparse.ArgumentParser",
"torchvision.models.vgg11",
"torch.nn.ReLU",
"torch.nn.LogSoftmax",
"torch.load",
"torchvision.models.densenet121",
"PIL.Image.open",
"torch.exp",
"numpy.array",
"torch.nn.Linear",
"torch.zeros",
"torch.no_grad",
"torch.from_numpy"
] |
[((329, 356), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (339, 356), False, 'import torch\n'), ((1656, 1673), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (1666, 1673), False, 'from PIL import Image\n'), ((2036, 2067), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (2044, 2067), True, 'import numpy as np\n'), ((2078, 2109), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (2086, 2109), True, 'import numpy as np\n'), ((2201, 2227), 'torch.from_numpy', 'torch.from_numpy', (['np_image'], {}), '(np_image)\n', (2217, 2227), False, 'import torch\n'), ((2608, 2623), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2621, 2623), False, 'import torch\n'), ((2674, 2690), 'torch.exp', 'torch.exp', (['logps'], {}), '(logps)\n', (2683, 2690), False, 'import torch\n'), ((2716, 2743), 'torch.topk', 'torch.topk', (['ps', 'topk'], {'dim': '(1)'}), '(ps, topk, dim=1)\n', (2726, 2743), False, 'import torch\n'), ((3843, 3868), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3866, 3868), False, 'import argparse\n'), ((429, 458), 'torchvision.models.vgg11', 'models.vgg11', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (441, 458), False, 'from torchvision import transforms, datasets, models\n'), ((2001, 2020), 'numpy.array', 'np.array', (['pil_image'], {}), '(pil_image)\n', (2009, 2020), True, 'import numpy as np\n'), ((994, 1029), 'torchvision.models.densenet121', 'models.densenet121', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (1012, 1029), False, 'from torchvision import transforms, datasets, models\n'), ((2466, 2517), 'torch.zeros', 'torch.zeros', (['[64, 3, 224, 224]'], {'dtype': 'torch.float64'}), '([64, 3, 224, 224], dtype=torch.float64)\n', (2477, 2517), False, 'import torch\n'), ((609, 630), 'torch.nn.Linear', 'nn.Linear', (['(25088)', 'nhu'], {}), '(25088, nhu)\n', (618, 630), True, 'import torch.nn as nn\n'), ((654, 663), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (661, 663), True, 'import torch.nn as nn\n'), ((686, 705), 'torch.nn.Linear', 'nn.Linear', (['nhu', '(102)'], {}), '(nhu, 102)\n', (695, 705), True, 'import torch.nn as nn\n'), ((731, 751), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (744, 751), True, 'import torch.nn as nn\n'), ((1180, 1200), 'torch.nn.Linear', 'nn.Linear', (['(1024)', 'nhu'], {}), '(1024, nhu)\n', (1189, 1200), True, 'import torch.nn as nn\n'), ((1224, 1233), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1231, 1233), True, 'import torch.nn as nn\n'), ((1256, 1275), 'torch.nn.Linear', 'nn.Linear', (['nhu', '(102)'], {}), '(nhu, 102)\n', (1265, 1275), True, 'import torch.nn as nn\n'), ((1301, 1321), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (1314, 1321), True, 'import torch.nn as nn\n')]
|
# Ad Soyad: <NAME> No: 180401041
#import numpy.matlib #Matrislerde kullanılabilir
from matrix_operations import matrix_transpose # gerekli matris işlemleri için fonksiyonların dahil edilmesi.
# D=A^T --> D= np.transpose(A) --> transpose kullanımı
from matrix_operations import matrix_multiplication # C=AxB(AB=C) --> C=np.dot(A,B) --> matrislerde çarpma işlemi
from matrix_operations import matrix_inverse # D=A^-1 --> D= np.linalg.inv --> matrisin tersinin alınması için..
from copy import copy #ilk veriyi kaybetmeden kopyası üzerinde işlem yapmak için kullanılır..
from __init__ import __data_file__ #veri girişi için kullandım , init=initialization(başlatma) ile daha kolay bir görünüm elde ettik..
from __init__ import __output_file__ #veri çıkışı için kullandım
import numpy as np # numpy artık np değişkeniyle ifade ediliyor..
import sys #dir(sys) sys.argv komutu, programın ismi ile birlikte, bu programa parametre olarak verilen değerleri de bir liste halinde saklıyor.
import os #gerek yok silinebilir
# import sys sayesinde %d,%s gibi C diline ait göstergelerle sayısal ifadelerimiz daha da kolaylaştı
'''
sys kullanımı ....--> en aşağıda arguments=sys.argv..
def çık():
print('Çıkılıyor...')
sys.exit() # programı kapanmaya zorlamak için kullanılabilir.
if len(sys.argv) < 2: #eğer parametre veya verilerde istenilenden fazlası veya azı varsa kullanılabilir
print('Gerekli parametreleri girmediniz!')
çık()
elif len(sys.argv) > 2: #sys.argv kullandığım parametreleri liste halinde tutar
print('Çok fazla parametre girdiniz!')
çık()
elif sys.argv[1] in ['-v', '-V']:
print('Program sürümü: 0.8')
else:
mesaj = 'Girdiğiniz parametre ({}) anlaşılamadı!'
print(mesaj.format(sys.argv[1]))
çık()
---> BU ŞEKİLDE YETERLİ DEĞİL DETAYLANMASI LAZIM..
'''
'''
class BColors:
ENDC = '\033[0m'-->kullanılır
BOLD = '\033[1m'-->??
UNDERLINE = '\033[4m'-->??
INACTIVE = '\033[90m'
FAIL = '\033[91m'-->kullanılır
OKGREEN = '\033[92m'
WARNING = '\033[93m'-->kullanılır
OKBLUE = '\033[94m'
HEADER = '\033[95m'
COMMENT = '\033[96m'
BLOCK = '\033[97m'
CODE = '\033[98m'
'''
class term_renkleri:
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
#BOLD = '\033[1m'
#UNDERLINE = '\033[4m'
#COMMENT = '\033[96m'
class process:#süreç
def __init__(self, veri_içerik: list): # init yapıcı(constructor) fonksiyondur , self parametresi zorunludur.
"""
Bu sınıf tüm ana işlemleri yürütür
: param veri_içerik: Bir veri listesi
"""
self.veri_ = veri_içerik
self.first_var = 0 #ilk varyasyon = 0 , başlangıc
self.last_var = len(self.veri_) #son varyasyon =len(self.veri)
self.grade_regression_result = None # regresyon iki ya da daha fazla değişken arasındaki değişimleri ölçmeye yarar.(Grafikler ile görselleşmesi sonucunda da anlaşılırlığı yükselir.)
self.results = list() #sonuçları listeye atacağız aşağıdaki işlemlerle
self.error_results = list() #hata sonuçları için yine bir boş liste kullanımı yapıldı.(for error results..)
def set_data_range(self, first=0, last=0, all_=False):
"""
Regresyon için veri aralığını ayarlama
: param first: Verilerin ilk dizini
: param last: Son dizin veri verileri
: param all_: Tüm verileri seç
: dönüş: Yok(None)
"""
if all_:#if dene
self.first_var = 0
self.last_var = len(self.veri_)
else:
self.first_var = first #ilk varyasyon ilk son v.. son
self.last_var = last
print(
term_renkleri.WARNING + "Veri aralığının %d dan %d a kadar ayarlanması!" % (self.first_var, self.last_var) + term_renkleri.ENDC)
# veri aralığının 0 dan 59 a ayarlaması gibi
def regression_for_grade(self, derece=0, no_print=False):#sınıf için regresyon
# çözüme(solution) ulaşmam için :
# X * çözümler = Y
# çözümler = (X_t * X)^-1 * X_t * Y
solution_matrix = np.zeros(shape=(derece + 1, 1), dtype=float)#çözelti matrix
x_matrix = np.zeros(shape=((self.last_var - self.first_var), derece + 1), dtype=float)
y_matrix = np.zeros(shape=((self.last_var - self.first_var), 1), dtype=float)
# Prepair matrixs matris hazırlanışı
y_index = 0
for i in range(0, (self.last_var - self.first_var)):
for j in range(0, x_matrix.shape[1]):
x_matrix[i][j] = pow(float(self.veri_[i + self.first_var]), j)
y_matrix[i][0] = float(y_index)
y_index += 1
x_trans_matrix = matrix_transpose(x_matrix) #transpozunun alınması
multi_matrix = matrix_multiplication(x_trans_matrix, x_matrix)#matris ile transpozunun alınması tersini alma işlemi
inversed_matrix = matrix_inverse(multi_matrix)#matrisin tersinin alınması
multi_two_matrix = matrix_multiplication(x_trans_matrix, y_matrix)
multi_three_matrix = matrix_multiplication(inversed_matrix, multi_two_matrix)
solution_matrix = multi_three_matrix
self.grade_regression_result = copy(solution_matrix)
self.results.append(self.grade_regression_result) #regresyon sonuçları listemize atıldı.. 1
to_printed = ""
to_printed += str(derece) + ". derece regresyon sonuçlarım : \n"
to_printed += str(self.grade_regression_result[0])
for i in range(1, derece + 1):
to_printed += " + " + str(self.grade_regression_result[i]) + "X"
to_printed += "^^" + str(i)
to_printed += " = Y"
if not no_print:
print(to_printed)
def calculate_most_usefull(self): #en yararlı olanı hesapla
for i in range(len(self.results)):
avarage = 0.0
y_index = 0
for x_data in self.veri_:
X = float(x_data)
Y = y_index
y_index += 1
total = 0.0
for j in self.results[i]:
total += float(j) * pow(X, j)
E = total - Y
avarage += E
avarage /= len(self.veri_)
self.error_results.append(avarage)
for i in range(len(self.error_results)):
if self.error_results[i] < 0:
self.error_results[i] *= -1
the_lowest_error = self.error_results[0]
the_lowest_error_index = 0
for i in range(len(self.error_results)):
if self.error_results[i] < the_lowest_error:
the_lowest_error = self.error_results[i]
the_lowest_error_index = i
print("Polinom bölgesindeki en düşük hata (aralıklar karşılaştırıldı): %d .derece regresyon ile E=%s"
% ((the_lowest_error_index + 1), the_lowest_error))
def veri_uzunluk(self): #verileri almak için fonksiyon
return len(self.veri_)
def kill_vars(self): #ölüm varyasyonu
self.grade_regression_result = None
self.results = list()
self.error_results = list()
def write_to_file(self, the_dir): #dosyaya yazma işlemi
with open(the_dir + "/%s" % __output_file__, "w") as fh:
to_printed = ""
for i in range(len(self.results)):
to_printed += str(i + 1) + " Reggression\t"
for j in range(len(self.results[i])):
to_printed += str(self.results[i][j]) + "X^^" + str(j) + "\t"
to_printed += "\n"
fh.write(to_printed)
print(term_renkleri.WARNING + "%s file generated!" % __output_file__ + term_renkleri.ENDC)
def main():
if arguman: # argüman(args)=sys.argv ataması ile ana listede parametre tutmayı tercih ettim.
print(term_renkleri.WARNING + "Argüman işleyici yok!" + term_renkleri.ENDC)
# Aslında buna gerek yok ,çünkü dosyam zaten var
yeni_veri = None
working_directory = os.getcwd()
try: #try exception yapısı bak, hata mesajı vermek için
with open(working_directory + "/%s" % __data_file__, "r") as fh:
string_format = fh.read()
a = string_format.splitlines()
# If last line of file is not /n son dosya satırı değilse
for i in range(len(a)):
if a[i] == "":
a.pop(len(a)-1)
yeni_veri = copy(a) #copy ile ana liste elemanları korundu..
except FileNotFoundError: #dosya bulunamadı hatası buna da gerek yok aslında
raise Exception("Dosya bulunamadı! %s file" % __data_file__)
if not yeni_veri: #dosya var ama veri yoksa verilecek mesaj
raise Exception("Dosya bulundu ancak okuma başarısız oldu. ")
print(term_renkleri.WARNING + "Dosya açıldı ve başarıyla okundu" + term_renkleri.ENDC)
#başarılı dosyaaçılması ve veriokunması durumu
print(term_renkleri.WARNING + "İlk soru başlangıç:" + term_renkleri.ENDC)
new_process = process(yeni_veri)
new_process.set_data_range(all_=True)
new_process.regression_for_grade(derece=1)
new_process.regression_for_grade(derece=2)
new_process.regression_for_grade(derece=3)
new_process.regression_for_grade(derece=4)
new_process.regression_for_grade(derece=5)
new_process.regression_for_grade(derece=6)
#new_process.regressionn_for_grade(grade=7)
new_process.write_to_file(working_directory)
print(term_renkleri.WARNING + "İLK SORUNUN BAŞARIYLA SONLANMASI. \t" + term_renkleri.ENDC)#ilk soru bitti ikinci soru işleniyor
print(term_renkleri.WARNING + "Burası ikincinin başlangıc noktası:" + term_renkleri.ENDC)
new_process.calculate_most_usefull()
print(term_renkleri.WARNING + "İKİNCİ SORUNUN BAŞARILA SONLANMASI. \t" + term_renkleri.ENDC)#ikinci soru bitti üçüncü soru işleniyor
print(term_renkleri.WARNING + "Burası üçüncünün başlangıç noktası :" + term_renkleri.ENDC)
print(term_renkleri.FAIL + "Taşma durumuna dikkat edilmeli !!" + term_renkleri.ENDC)# taşmaya dikkat et
for i in range(int(new_process.veri_uzunluk() / 10) + 1):
first = i * 10
last = i * 10 + 10 #last > first
if i >= int(new_process.veri_uzunluk() / 10):
last = new_process.veri_uzunluk()
new_process.kill_vars()
new_process.set_data_range(first, last)# ilk ,son karşılaştırma için olabilir??
new_process.regression_for_grade(derece=1, no_print=True)
new_process.regression_for_grade(derece=2, no_print=True)
new_process.regression_for_grade(derece=3, no_print=True)
new_process.regression_for_grade(derece=4, no_print=True)
new_process.regression_for_grade(derece=5, no_print=True)
new_process.regression_for_grade(derece=6, no_print=True)
new_process.calculate_most_usefull()
if __name__ == '__main__':
arguman = sys.argv[1:]
main()
|
[
"matrix_operations.matrix_transpose",
"os.getcwd",
"numpy.zeros",
"copy.copy",
"matrix_operations.matrix_inverse",
"matrix_operations.matrix_multiplication"
] |
[((8209, 8220), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8218, 8220), False, 'import os\n'), ((4313, 4357), 'numpy.zeros', 'np.zeros', ([], {'shape': '(derece + 1, 1)', 'dtype': 'float'}), '(shape=(derece + 1, 1), dtype=float)\n', (4321, 4357), True, 'import numpy as np\n'), ((4392, 4465), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.last_var - self.first_var, derece + 1)', 'dtype': 'float'}), '(shape=(self.last_var - self.first_var, derece + 1), dtype=float)\n', (4400, 4465), True, 'import numpy as np\n'), ((4487, 4551), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.last_var - self.first_var, 1)', 'dtype': 'float'}), '(shape=(self.last_var - self.first_var, 1), dtype=float)\n', (4495, 4551), True, 'import numpy as np\n'), ((4906, 4932), 'matrix_operations.matrix_transpose', 'matrix_transpose', (['x_matrix'], {}), '(x_matrix)\n', (4922, 4932), False, 'from matrix_operations import matrix_transpose\n'), ((4979, 5026), 'matrix_operations.matrix_multiplication', 'matrix_multiplication', (['x_trans_matrix', 'x_matrix'], {}), '(x_trans_matrix, x_matrix)\n', (5000, 5026), False, 'from matrix_operations import matrix_multiplication\n'), ((5106, 5134), 'matrix_operations.matrix_inverse', 'matrix_inverse', (['multi_matrix'], {}), '(multi_matrix)\n', (5120, 5134), False, 'from matrix_operations import matrix_inverse\n'), ((5198, 5245), 'matrix_operations.matrix_multiplication', 'matrix_multiplication', (['x_trans_matrix', 'y_matrix'], {}), '(x_trans_matrix, y_matrix)\n', (5219, 5245), False, 'from matrix_operations import matrix_multiplication\n'), ((5284, 5340), 'matrix_operations.matrix_multiplication', 'matrix_multiplication', (['inversed_matrix', 'multi_two_matrix'], {}), '(inversed_matrix, multi_two_matrix)\n', (5305, 5340), False, 'from matrix_operations import matrix_multiplication\n'), ((5425, 5446), 'copy.copy', 'copy', (['solution_matrix'], {}), '(solution_matrix)\n', (5429, 5446), False, 'from copy import copy\n'), ((8633, 8640), 'copy.copy', 'copy', (['a'], {}), '(a)\n', (8637, 8640), False, 'from copy import copy\n')]
|
# -*- coding: utf-8 -*-
"""
Class for storing supply curves and calculating marginal costs
Created on Thu Feb 7 15:34:33 2019
@author: elisn
"""
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
class SupplyCurve():
""" Has panda dataframe with list of bids
One or many generators may be added to the supply curve. The generators must be in the form
of a panda data frame, with the columns ['c2','c1','pmax']
The marginal cost of a generator is given by MC = 2*c2*q + c1, where q ranges from 0 to pmax
Hence a generator with c2 = 0 has constant marginal cost
(Thus note that the coefficients are those for the Total Cost function)
It is also possible to add bids, which then must have the columns [cap,mc_min,mc_max]
Note that the internal functions use the bid structure.
Class methods:
price2quantity(price) - calculates the quantity offered for a given price, straightforward calculation
quantity2price(quantity) - calculates price required for given quantity
not straightforward since constant bids produce discrete jumps in the offered quantity
plot() - plots supply curve
"""
def __init__(self,bids = pd.DataFrame(columns=['cap','mc_min','mc_max']),gens = pd.DataFrame(columns=['c2','c1','pmax']) ):
self.bids = bids.append(get_generator_bids(gens),ignore_index=True).sort_values(by=['mc_min','mc_max'])
self._calculate_inflection_points_()
def add_bids(self,bids):
""" Add bids to supply curve, in the form of a data frame """
self.bids = self.bids.append(bids,ignore_index=True).sort_values(by=['mc_min','mc_max'])
self._calculate_inflection_points_()
def add_gens(self,gens):
""" Add generators with c1, c2, pmax coefficients to supply curve """
self.bids = self.bids.append(get_generator_bids(gens),ignore_index=True).sort_values(by=['mc_min','mc_max'])
self._calculate_inflection_points_()
def price2quantity(self,price):
""" Calculate the offered quantity for a given price """
# loop over bids, calculate offer by each
quantity = 0
for i in self.bids.index:
if price >= self.bids.loc[i,'mc_min']:
if self.bids.loc[i,'mc_min'] != self.bids.loc[i,'mc_max']: # variable MC
q = (price - self.bids.loc[i,'mc_min'])/(self.bids.loc[i,'mc_max']-self.bids.loc[i,'mc_min'])*self.bids.loc[i,'cap']
if q > self.bids.loc[i,'cap']:
q = self.bids.loc[i,'cap']
quantity += q
else: # fixed MC
quantity += self.bids.loc[i,'cap']
else:
# mc_min exceeds price, can exit as bids are sorted by increasing mc_min
return quantity
return quantity
def _calculate_inflection_points_(self):
""" Find all inflection points in the supply curve """
ppoints = []
for i in self.bids.index:
if self.bids.loc[i,'mc_min'] not in ppoints:
ppoints.append(self.bids.loc[i,'mc_min'])
if self.bids.loc[i,'mc_max'] not in ppoints:
ppoints.append(self.bids.loc[i,'mc_max'])
ppoints.sort()
# find curresponding quantities
qpoints = []
for point in ppoints:
qpoints.append(self.price2quantity(point))
self.xprice = ppoints
self.xquant = qpoints
def quantity2price(self,quantity,plot=False,verbose=False):
""" Calculate minimum price needed for given quantity """
idx = 0
while True:
if idx == self.xprice.__len__():
# quantity > qmax, not enough capacity
if verbose:
print("Insufficient capacity: {0} MW available, but quantity = {1:.3}".format(self.xquant[-1],quantity))
#return np.nan
p = np.nan
break
elif self.xquant[idx] < quantity:
idx += 1 # go to next price level
else:
if idx == 0:
# quantity <= 0 - return lowest marginal cost
#print("Non-positive quantity = {0:.3}, returning lowest available MC".format(quantity))
#return self.xprice[0]
p = self.xprice[0]
break
elif self.xquant[idx] == quantity:
# price corresponds exactly to quantity
#return self.xprice[idx]
p = self.xprice[idx]
break
else:
# check if offer curve is linear by evaluating quantity between prices
if self.price2quantity(self.xprice[idx-1]+(self.xprice[idx]-self.xprice[idx-1])/2) > self.xquant[idx-1]:
# if offer curve is linear, interpolate to find correct price
# Note: Cannot interpolate linearly to next intersection point, as there
# the curve may consist of a linear horizontal section to the next point
# Thus we must instead find the inverse slope by summing the inverse slopes
# of linear bids at this point
# use inverse slope at price xprice[idx] for interpolation
p = self.xprice[idx-1] + (quantity-self.xquant[idx-1]) / self._find_slope_(self.xprice[idx])
if p > self.xprice[idx]: # cap price increase up to xprice[idx]
# if idx == 3:
# print(p)
# pass
p = self.xprice[idx]
#return p
break
else:
# else return this price
p = self.xprice[idx]
#return self.xprice[idx]
break
if plot:
# plot supply curve with determined point
self.plot(qpoints=[quantity],ppoints=[p])
return p
def _find_slope_(self,price):
""" Find the slope of the supply curve, in MW/EUR (quantity/price) for given price """
# loop over all linear bids and see which are active in this price range
slope = 0 # slope in MW/EUR
for index in self.bids.index:
if self.bids.loc[index,'mc_min'] != self.bids.loc[index,'mc_max'] and \
price > self.bids.loc[index,'mc_min'] and price <= self.bids.loc[index,'mc_max']:
slope += self.bids.loc[index,'cap']/(self.bids.loc[index,'mc_max']-self.bids.loc[index,'mc_min'])
return slope
def plot(self,qpoints=[],ppoints=[]):
""" Plot supply curve """
x_quantity = np.linspace(0,self.xquant[-1])
y_price = np.array([self.quantity2price(x) for x in x_quantity])
y2_price = np.linspace(self.xprice[0],self.xprice[-1])
x2_quantity = np.array([self.price2quantity(p) for p in y2_price])
# # merge data points into single array
# x = np.array([x for x,_ in sorted(zip(list(x_quantity)+list(x2_quantity),list(y_price)+list(y2_price)))])
# y = np.array([y for _,y in sorted(zip(list(x_quantity)+list(x2_quantity),list(y_price)+list(y2_price)))])
#
plt.plot()
plt.plot(x_quantity,y_price,'*')
plt.plot(x2_quantity,y2_price,'*')
#plt.plot(x,y)
# add given points to plot
if qpoints.__len__() > 0:
plt.plot(np.array(qpoints),np.array(ppoints),'r*')
plt.grid()
plt.xlabel('MW')
plt.ylabel('EUR/MWh')
plt.title('Supply curve')
plt.legend(['quantity2price','price2quantity'])
plt.show()
def get_curve(self):
""" Return x and y vector with points to plot the offer curve """
x_quantity = np.linspace(0,self.xquant[-1])
y_price = np.array([self.quantity2price(x) for x in x_quantity])
y2_price = np.linspace(self.xprice[0],self.xprice[-1])
x2_quantity = np.array([self.price2quantity(p) for p in y2_price])
# merge data points into single array
x = np.array([x for x,_ in sorted(zip(list(x_quantity)+list(x2_quantity),list(y_price)+list(y2_price)))])
y = np.array([y for _,y in sorted(zip(list(x_quantity)+list(x2_quantity),list(y_price)+list(y2_price)))])
return x,y
def get_generator_bids(gens):
""" Takes a panda dataframe with generator info, and returns a dataframe with bids
with the columns [cap,mc_min,mc_max]
cap - total capacity of bid
mc_min - minimum marginal cost (=c1)
mc_max - maximum marginal cost (=2*c2)
"""
bids = pd.DataFrame(columns=['cap','mc_min','mc_max'])
bids.loc[:,'cap'] = gens.loc[:,'pmax']
bids.loc[:,'mc_min'] = gens.loc[:,'c1']
bids.loc[:,'mc_max'] = gens.loc[:,'pmax'] * gens.loc[:,'c2']*2 + gens.loc[:,'c1']
bids.index = list(range(bids.__len__()))
return bids
if __name__ == "__main__":
with open('Data/generators.pkl','rb') as f:
gens = pickle.load(f)
# gens = pd.DataFrame(columns=['c1','c2','pmax'],index=[1,2])
# gens.loc[1,:] = [10,0,10000]
# gens.loc[2,:] = [20,0,10000]
# gens.loc[3,:] = [15,0.0005,10000]
s = SupplyCurve(gens=gens)
s.plot()
s.add_bids(pd.DataFrame(np.array([[10000,10,10],[10000,80,80]]),columns=['cap','mc_min','mc_max']))
s.plot()
x,y = s.get_curve()
plt.plot(x,y)
|
[
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"pickle.load",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid"
] |
[((9079, 9128), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['cap', 'mc_min', 'mc_max']"}), "(columns=['cap', 'mc_min', 'mc_max'])\n", (9091, 9128), True, 'import pandas as pd\n'), ((9860, 9874), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (9868, 9874), True, 'import matplotlib.pyplot as plt\n'), ((1279, 1328), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['cap', 'mc_min', 'mc_max']"}), "(columns=['cap', 'mc_min', 'mc_max'])\n", (1291, 1328), True, 'import pandas as pd\n'), ((1334, 1376), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['c2', 'c1', 'pmax']"}), "(columns=['c2', 'c1', 'pmax'])\n", (1346, 1376), True, 'import pandas as pd\n'), ((7080, 7111), 'numpy.linspace', 'np.linspace', (['(0)', 'self.xquant[-1]'], {}), '(0, self.xquant[-1])\n', (7091, 7111), True, 'import numpy as np\n'), ((7212, 7256), 'numpy.linspace', 'np.linspace', (['self.xprice[0]', 'self.xprice[-1]'], {}), '(self.xprice[0], self.xprice[-1])\n', (7223, 7256), True, 'import numpy as np\n'), ((7635, 7645), 'matplotlib.pyplot.plot', 'plt.plot', ([], {}), '()\n', (7643, 7645), True, 'import matplotlib.pyplot as plt\n'), ((7654, 7688), 'matplotlib.pyplot.plot', 'plt.plot', (['x_quantity', 'y_price', '"""*"""'], {}), "(x_quantity, y_price, '*')\n", (7662, 7688), True, 'import matplotlib.pyplot as plt\n'), ((7695, 7731), 'matplotlib.pyplot.plot', 'plt.plot', (['x2_quantity', 'y2_price', '"""*"""'], {}), "(x2_quantity, y2_price, '*')\n", (7703, 7731), True, 'import matplotlib.pyplot as plt\n'), ((7893, 7903), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (7901, 7903), True, 'import matplotlib.pyplot as plt\n'), ((7912, 7928), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""MW"""'], {}), "('MW')\n", (7922, 7928), True, 'import matplotlib.pyplot as plt\n'), ((7937, 7958), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""EUR/MWh"""'], {}), "('EUR/MWh')\n", (7947, 7958), True, 'import matplotlib.pyplot as plt\n'), ((7967, 7992), 'matplotlib.pyplot.title', 'plt.title', (['"""Supply curve"""'], {}), "('Supply curve')\n", (7976, 7992), True, 'import matplotlib.pyplot as plt\n'), ((8001, 8049), 'matplotlib.pyplot.legend', 'plt.legend', (["['quantity2price', 'price2quantity']"], {}), "(['quantity2price', 'price2quantity'])\n", (8011, 8049), True, 'import matplotlib.pyplot as plt\n'), ((8057, 8067), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8065, 8067), True, 'import matplotlib.pyplot as plt\n'), ((8206, 8237), 'numpy.linspace', 'np.linspace', (['(0)', 'self.xquant[-1]'], {}), '(0, self.xquant[-1])\n', (8217, 8237), True, 'import numpy as np\n'), ((8338, 8382), 'numpy.linspace', 'np.linspace', (['self.xprice[0]', 'self.xprice[-1]'], {}), '(self.xprice[0], self.xprice[-1])\n', (8349, 8382), True, 'import numpy as np\n'), ((9459, 9473), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9470, 9473), False, 'import pickle\n'), ((9737, 9781), 'numpy.array', 'np.array', (['[[10000, 10, 10], [10000, 80, 80]]'], {}), '([[10000, 10, 10], [10000, 80, 80]])\n', (9745, 9781), True, 'import numpy as np\n'), ((7843, 7860), 'numpy.array', 'np.array', (['qpoints'], {}), '(qpoints)\n', (7851, 7860), True, 'import numpy as np\n'), ((7861, 7878), 'numpy.array', 'np.array', (['ppoints'], {}), '(ppoints)\n', (7869, 7878), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
from sklearn.metrics import classification_report
from keras.callbacks import ModelCheckpoint
from keras.utils import plot_model
import matplotlib.pyplot as plt
import numpy as np
import os
from tnmlearn.callbacks import TrainingMonitor
# %%
class BaseLearningModel:
def __init__(self):
self.callbacks = []
def buildTrainMonCB_(self, outputpath):
# construct the set of callbacks
figPath = os.path.sep.join([outputpath, "{}.png".format(
os.getpid())])
jsonPath = os.path.sep.join([outputpath, "{}.json".format(
os.getpid())])
self.callbacks.append(TrainingMonitor(figPath, jsonPath=jsonPath))
def buildModelChkPointCB_(self, weightpath):
# construct the callback to save only the *best* model to disk
# based on the validation loss
fname = os.path.sep.join([weightpath,
"weights-{epoch:03d}-{val_loss:.4f}.hdf5"])
checkpoint = ModelCheckpoint(fname, monitor="val_loss", mode="min",
save_best_only=True, verbose=1)
self.callbacks.append(checkpoint)
def fit_(self, epochs=100, batch_size=32):
# train the model using SGD
print("[INFO] training network...")
H = self.model.fit(self.trainX, self.trainY,
callbacks=self.callbacks,
validation_data=(self.testX, self.testY),
epochs=epochs, batch_size=batch_size)
self.H = H
return H
def plotModel_(self, outputpath):
plot_model(self.model, to_file=outputpath, show_shapes=True)
def plot(self):
# plot the training loss and accuracy
H = self.H
epochs = len(H.history['loss'])
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, epochs), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, epochs), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, epochs), H.history["acc"], label="train_acc")
plt.plot(np.arange(0, epochs), H.history["val_acc"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.show()
def evaluate_(self, batch_size):
# evaluate the network
print("[INFO] evaluating network...")
predictions = self.model.predict(self.testX, batch_size=batch_size)
print(classification_report(self.testY.argmax(axis=1),
predictions.argmax(axis=1), target_names=self.classNames))
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"os.getpid",
"keras.callbacks.ModelCheckpoint",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"keras.utils.plot_model",
"matplotlib.pyplot.style.use",
"numpy.arange",
"tnmlearn.callbacks.TrainingMonitor",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.sep.join"
] |
[((829, 902), 'os.path.sep.join', 'os.path.sep.join', (["[weightpath, 'weights-{epoch:03d}-{val_loss:.4f}.hdf5']"], {}), "([weightpath, 'weights-{epoch:03d}-{val_loss:.4f}.hdf5'])\n", (845, 902), False, 'import os\n'), ((927, 1017), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['fname'], {'monitor': '"""val_loss"""', 'mode': '"""min"""', 'save_best_only': '(True)', 'verbose': '(1)'}), "(fname, monitor='val_loss', mode='min', save_best_only=True,\n verbose=1)\n", (942, 1017), False, 'from keras.callbacks import ModelCheckpoint\n'), ((1499, 1559), 'keras.utils.plot_model', 'plot_model', (['self.model'], {'to_file': 'outputpath', 'show_shapes': '(True)'}), '(self.model, to_file=outputpath, show_shapes=True)\n', (1509, 1559), False, 'from keras.utils import plot_model\n'), ((1681, 1704), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (1694, 1704), True, 'import matplotlib.pyplot as plt\n'), ((1709, 1721), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1719, 1721), True, 'import matplotlib.pyplot as plt\n'), ((2022, 2061), 'matplotlib.pyplot.title', 'plt.title', (['"""Training Loss and Accuracy"""'], {}), "('Training Loss and Accuracy')\n", (2031, 2061), True, 'import matplotlib.pyplot as plt\n'), ((2066, 2087), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch #"""'], {}), "('Epoch #')\n", (2076, 2087), True, 'import matplotlib.pyplot as plt\n'), ((2092, 2119), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss/Accuracy"""'], {}), "('Loss/Accuracy')\n", (2102, 2119), True, 'import matplotlib.pyplot as plt\n'), ((2124, 2136), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2134, 2136), True, 'import matplotlib.pyplot as plt\n'), ((2141, 2151), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2149, 2151), True, 'import matplotlib.pyplot as plt\n'), ((621, 664), 'tnmlearn.callbacks.TrainingMonitor', 'TrainingMonitor', (['figPath'], {'jsonPath': 'jsonPath'}), '(figPath, jsonPath=jsonPath)\n', (636, 664), False, 'from tnmlearn.callbacks import TrainingMonitor\n'), ((1735, 1755), 'numpy.arange', 'np.arange', (['(0)', 'epochs'], {}), '(0, epochs)\n', (1744, 1755), True, 'import numpy as np\n'), ((1809, 1829), 'numpy.arange', 'np.arange', (['(0)', 'epochs'], {}), '(0, epochs)\n', (1818, 1829), True, 'import numpy as np\n'), ((1885, 1905), 'numpy.arange', 'np.arange', (['(0)', 'epochs'], {}), '(0, epochs)\n', (1894, 1905), True, 'import numpy as np\n'), ((1957, 1977), 'numpy.arange', 'np.arange', (['(0)', 'epochs'], {}), '(0, epochs)\n', (1966, 1977), True, 'import numpy as np\n'), ((494, 505), 'os.getpid', 'os.getpid', ([], {}), '()\n', (503, 505), False, 'import os\n'), ((580, 591), 'os.getpid', 'os.getpid', ([], {}), '()\n', (589, 591), False, 'import os\n')]
|
import numpy as np
import tensorflow as tf
import csv
def classify_state(X, n_state):
up = 80
if (0 <= X <= 2.5):
return n_state - 1, 2.5
for i in range(n_state - 1):
if (up - (i + 1) * 2.5 < X <= up - i * 2.5):
return i, up - i * 2.5
def GA(max_prob_index, n_actions):
values = np.zeros(n_actions)
jjj = 0
for i in range(n_actions):
values[i] = jjj
jjj = jjj + 2.5
return values[max_prob_index]
def GR(X, x, n_actions, round_size, n_state):
values = np.zeros(n_actions)
jjj = 0
for i in range(n_actions):
values[i] = jjj
jjj = jjj + 2.5
reward = np.zeros(n_actions)
flag = 0
_, down = classify_state(X[x + 1][round_size - 1], n_state)
for i in range(n_actions):
if (down + 2.5 >= values[i] > down):
reward[i] = 1
elif (down + 5 >= values[i] >= down + 2.5):
reward[i] = 2
elif (down + 7.5 >= values[i] > down+5):
reward[i] = 3
else:
reward[i] = -1
return reward, flag, values
def classify_losspackge(diff, one_hot_state, n_state):
if (diff == 0):
class_one_hot = one_hot_state[0]
for i in range(int((n_state / 2) - 1)):
if (2.5 * i < diff <= 2.5 * (i + 1)):
class_one_hot = one_hot_state[i + 1]
if (2.5 * (int(n_state / 2) - 1) < diff):
class_one_hot = one_hot_state[int(n_state / 2) - 1]
for i in range(int(n_state / 2) - 2):
if (-2.5 * (i + 1) <= diff < -2.5 * (i)):
class_one_hot = one_hot_state[int(n_state / 2) - 1 + i + 1]
if (-2.5 * (int(n_state / 2) - 2) > diff):
class_one_hot = one_hot_state[int(n_state / 2) - 1 + int(n_state / 2) - 2 + 1]
return class_one_hot
def lstm_test(cell_number, n_actions, n_state, epoch, one_hot_state, X,
model_i, round_size):
tf.reset_default_graph()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
input = tf.placeholder(tf.float32, [None, round_size , n_state], name="input_x") # 1*30
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=cell_number, state_is_tuple=True)
_, final_state = tf.nn.dynamic_rnn(cell=lstm_cell, inputs=input, dtype=tf.float32)
W3 = tf.get_variable("W3", shape=[cell_number, n_actions],
initializer=tf.contrib.layers.xavier_initializer())
B3 = tf.get_variable("B3", shape=[1, n_actions],
initializer=tf.constant_initializer())
score = tf.matmul(final_state[1], W3) + B3
probability = tf.nn.softmax(score)
restore_path = './model_' + str(model_i) + '/' + str(epoch) + '.ckpt'
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, restore_path)
down_count = len(X)
RAB = np.zeros(2)
DLR = np.zeros(2)
G_list = []
X_list = []
G =80 #15.5
G_list.append(G)
batch_reward, batch_state, all_reward_for_loss, batch_action, all_action_for_loss = [], [], [], [], []
g_count=0
for x in range(len(X)-1):
if (x != 0):
if (G > 80):
G_list.append(80)
else:
G_list.append(action_values)
g_count=g_count+1
R_state = []
for i in range(round_size):
#print(len(X[x][i]))
state_arg, D = classify_state(X[x][i], n_state)
state_ = one_hot_state[state_arg]
R_state.append(state_)
batch_state.append(R_state)
state = np.reshape(R_state, [1, round_size , n_state])
tfprob = sess.run(probability, feed_dict={input: state})
max_prob_index = np.argmax(tfprob[0])
loss_package = G - X[x][round_size - 1]
if (loss_package >= 0):
RAB[0] = RAB[0] + loss_package
RAB[1] = RAB[1] + 1
else:
DLR[0] = DLR[0] + (-1) * loss_package
DLR[1] = DLR[1] + 1
action_values = GA(max_prob_index, n_actions)
reward, flag, values = GR(X, x, n_actions, round_size, n_state)
X_list.append(X[x][round_size - 1])
G = action_values
batch_reward.append(reward)
all_reward_for_loss.append(reward)
x_count=down_count
if (RAB[1] != 0):
RAB_ = RAB[0] / RAB[1]
else:
RAB_ = 0
if (DLR[1] != 0):
DLR_ = DLR[0] / DLR[1]
else:
DLR_ = 0
with open('./model_' + str(model_i) + '/lost_package.csv', 'a',
newline='') as p:
writer = csv.writer(p)
writer.writerow(['RAB', 'DLR'])
writer.writerow([RAB_, DLR_])
return x_count,g_count, G_list
|
[
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.nn.softmax",
"csv.writer",
"tensorflow.train.Saver",
"numpy.argmax",
"tensorflow.nn.dynamic_rnn",
"tensorflow.reset_default_graph",
"tensorflow.constant_initializer",
"numpy.zeros",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.nn.rnn_cell.BasicLSTMCell",
"tensorflow.matmul",
"tensorflow.ConfigProto",
"numpy.reshape",
"tensorflow.GPUOptions"
] |
[((325, 344), 'numpy.zeros', 'np.zeros', (['n_actions'], {}), '(n_actions)\n', (333, 344), True, 'import numpy as np\n'), ((531, 550), 'numpy.zeros', 'np.zeros', (['n_actions'], {}), '(n_actions)\n', (539, 550), True, 'import numpy as np\n'), ((655, 674), 'numpy.zeros', 'np.zeros', (['n_actions'], {}), '(n_actions)\n', (663, 674), True, 'import numpy as np\n'), ((1883, 1907), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1905, 1907), True, 'import tensorflow as tf\n'), ((1926, 1978), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': '(0.333)'}), '(per_process_gpu_memory_fraction=0.333)\n', (1939, 1978), True, 'import tensorflow as tf\n'), ((2061, 2132), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, round_size, n_state]'], {'name': '"""input_x"""'}), "(tf.float32, [None, round_size, n_state], name='input_x')\n", (2075, 2132), True, 'import tensorflow as tf\n'), ((2158, 2230), 'tensorflow.nn.rnn_cell.BasicLSTMCell', 'tf.nn.rnn_cell.BasicLSTMCell', ([], {'num_units': 'cell_number', 'state_is_tuple': '(True)'}), '(num_units=cell_number, state_is_tuple=True)\n', (2186, 2230), True, 'import tensorflow as tf\n'), ((2252, 2317), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', ([], {'cell': 'lstm_cell', 'inputs': 'input', 'dtype': 'tf.float32'}), '(cell=lstm_cell, inputs=input, dtype=tf.float32)\n', (2269, 2317), True, 'import tensorflow as tf\n'), ((2640, 2660), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['score'], {}), '(score)\n', (2653, 2660), True, 'import tensorflow as tf\n'), ((2748, 2764), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2762, 2764), True, 'import tensorflow as tf\n'), ((2587, 2616), 'tensorflow.matmul', 'tf.matmul', (['final_state[1]', 'W3'], {}), '(final_state[1], W3)\n', (2596, 2616), True, 'import tensorflow as tf\n'), ((2774, 2786), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2784, 2786), True, 'import tensorflow as tf\n'), ((2882, 2893), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2890, 2893), True, 'import numpy as np\n'), ((2908, 2919), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2916, 2919), True, 'import numpy as np\n'), ((4729, 4742), 'csv.writer', 'csv.writer', (['p'], {}), '(p)\n', (4739, 4742), False, 'import csv\n'), ((2008, 2047), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (2022, 2047), True, 'import tensorflow as tf\n'), ((2418, 2456), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (2454, 2456), True, 'import tensorflow as tf\n'), ((2548, 2573), 'tensorflow.constant_initializer', 'tf.constant_initializer', ([], {}), '()\n', (2571, 2573), True, 'import tensorflow as tf\n'), ((3679, 3724), 'numpy.reshape', 'np.reshape', (['R_state', '[1, round_size, n_state]'], {}), '(R_state, [1, round_size, n_state])\n', (3689, 3724), True, 'import numpy as np\n'), ((3824, 3844), 'numpy.argmax', 'np.argmax', (['tfprob[0]'], {}), '(tfprob[0])\n', (3833, 3844), True, 'import numpy as np\n')]
|
import os
import pdb
import numpy as np
from fastestimator.summary.logs import parse_log_file
from scipy.stats import ttest_ind
from tabulate import tabulate
def get_best_step(objective, eval_steps, result, mode, train_history):
obj_step = 0
for idx, value in enumerate(result):
if (mode == "max" and value >= objective) or (mode == "min"
and value <= objective):
obj_step = eval_steps[idx]
break
upper_step = obj_step
lower_step = eval_steps[idx - 1]
min_loss = None
min_train_step = None
for train_step, train_loss in train_history.items():
if train_step > lower_step and train_step <= upper_step:
if min_loss is None:
min_loss = train_loss
min_train_step = train_step
elif train_loss < min_loss:
min_loss = train_loss
min_train_step = train_step
return min_train_step
def get_column_mean_std(all_data,
best_mode,
lrname,
lr_schedules,
arc_name="lr-controller-weighted-acc"):
if best_mode == "max":
get_best = np.max
get_worst = np.min
elif best_mode == "min":
get_best = np.min
get_worst = np.max
else:
raise ValueError("best_mode needs to be one of ['max', 'min']")
column_data = all_data[lrname]
best_numbers = []
for lr_schedule in lr_schedules:
lr_schedule_data = column_data[lr_schedule]
for step, result, _ in lr_schedule_data:
best_numbers.append(get_best(result))
convergence_target = get_worst(best_numbers)
br_dict, bs_dict = {}, {}
for lr_schedule in lr_schedules:
best_step, best_result = [], []
lr_schedule_data = column_data[lr_schedule]
for step, result, train_history in lr_schedule_data:
best_result.append(get_best(result))
best_step.append(
get_best_step(convergence_target, step, result, best_mode,
train_history))
br_dict[lr_schedule] = best_result
bs_dict[lr_schedule] = best_step
table = []
for lr_schedule in lr_schedules:
best_result = br_dict[lr_schedule]
best_step = bs_dict[lr_schedule]
br_display = f"{np.mean(best_result):.4f}"
bs_display = f"{np.mean(best_step):.0f}"
if np.mean(best_result) == get_best(
[np.mean(x) for x in br_dict.values()]):
br_display += "*"
if np.mean(best_step) == min([np.mean(x) for x in bs_dict.values()]):
bs_display += "*"
if ttest_ind(br_dict[arc_name], br_dict[lr_schedule]).pvalue < 0.05:
br_display += "#"
if ttest_ind(bs_dict[arc_name], bs_dict[lr_schedule]).pvalue < 0.05:
bs_display += "#"
table.append([
lr_schedule, br_display, f"{np.std(best_result):.4f}", bs_display,
f"{np.std(best_step):.0f}"
])
print(
tabulate(table,
headers=[
"scheduler", "metric mean", "metric std", "step mean",
"step std"
],
tablefmt="github"))
def get_column_median(all_data,
best_mode,
lrname,
lr_schedules,
arc_name="lr-controller-weighted-acc"):
if best_mode == "max":
get_best = np.max
get_worst = np.min
elif best_mode == "min":
get_best = np.min
get_worst = np.max
else:
raise ValueError("best_mode needs to be one of ['max', 'min']")
column_data = all_data[lrname]
best_numbers = []
for lr_schedule in lr_schedules:
lr_schedule_data = column_data[lr_schedule]
for step, result, _ in lr_schedule_data:
best_numbers.append(get_best(result))
convergence_target = get_worst(best_numbers)
br_dict, bs_dict = {}, {}
for lr_schedule in lr_schedules:
best_step, best_result = [], []
lr_schedule_data = column_data[lr_schedule]
for step, result, train_history in lr_schedule_data:
best_result.append(get_best(result))
best_step.append(
get_best_step(convergence_target, step, result, best_mode,
train_history))
br_dict[lr_schedule] = best_result
bs_dict[lr_schedule] = best_step
table = []
for lr_schedule in lr_schedules:
best_result = br_dict[lr_schedule]
best_step = bs_dict[lr_schedule]
br_display = f"{np.median(best_result):.4f}"
bs_display = f"{np.median(best_step):.0f}"
if np.median(best_result) == get_best(
[np.median(x) for x in br_dict.values()]):
br_display += "*"
if np.median(best_step) == min(
[np.median(x) for x in bs_dict.values()]):
bs_display += "*"
if ttest_ind(br_dict[arc_name], br_dict[lr_schedule]).pvalue < 0.05:
br_display += "#"
if ttest_ind(bs_dict[arc_name], bs_dict[lr_schedule]).pvalue < 0.05:
bs_display += "#"
table.append([
lr_schedule,
br_display,
bs_display,
])
print(
tabulate(table,
headers=[
"scheduler",
"metric median",
"step median",
],
tablefmt="github"))
def check_file_complete(folder_path):
filenames = [
fname for fname in os.listdir(folder_path) if fname.endswith(".txt")
]
lr_set = set()
schedule_set = set()
id_set = set()
# get the set of lr, scheduler, id
for filename in filenames:
configs = os.path.splitext(filename)[0].split("_")
lr_name, lr_schedule_name, run_id = configs
lr_set.add(lr_name)
schedule_set.add(lr_schedule_name)
id_set.add(run_id)
# check all combinations exist
for lr in lr_set:
for schedule in schedule_set:
for run in id_set:
filename = f"{lr}_{schedule}_{run}.txt"
assert os.path.exists(os.path.join(
folder_path, filename)), f"{filename} is missing"
def print_table(folder_path, best_mode, metric_name, loss_name, mode):
if mode == "mean_std":
print_func = get_column_mean_std
elif mode == "median":
print_func = get_column_median
else:
raise ValueError("mode needs to be one of ['mean_std', 'median']")
check_file_complete(folder_path)
all_data = {}
filenames = [
fname for fname in os.listdir(folder_path) if fname.endswith(".txt")
]
for filename in filenames:
filepath = os.path.join(folder_path, filename)
configs = os.path.splitext(filename)[0].split("_")
lr_name, lr_schedule_name, run_id = configs
summary = parse_log_file(filepath, ".txt")
result = np.array(
[acc for acc in summary.history["eval"][metric_name].values()])
steps = np.array(
[acc for acc in summary.history["eval"][metric_name].keys()])
train_history = summary.history["train"][loss_name]
if lr_name not in all_data:
all_data[lr_name] = {}
if lr_schedule_name not in all_data[lr_name]:
all_data[lr_name][lr_schedule_name] = []
all_data[lr_name][lr_schedule_name].append(
(steps, result, train_history))
for lrname in sorted(list(all_data.keys()), reverse=True):
print(
f"========================================== lrname={lrname} ==========================================="
)
print_func(all_data,
best_mode,
lrname=lrname,
lr_schedules=[
"base-lr", "cosine-decay", "cyclic-cosine-decay",
"exponential-decay", "lr-controller-weighted-acc"
])
if __name__ == "__main__":
print_table(
mode="median", # "median" or "mean_std"
folder_path=
"/mnt/c/Users/212770359/Downloads/ARC-master/iccv/logs/normal_comparison/language_modeling", # path of the log dir
best_mode="min", # "max" or "min"
metric_name="perplexity", # evaluation metric
loss_name="ce") # loss key
|
[
"fastestimator.summary.logs.parse_log_file",
"numpy.median",
"numpy.std",
"scipy.stats.ttest_ind",
"numpy.mean",
"tabulate.tabulate",
"os.path.splitext",
"os.path.join",
"os.listdir"
] |
[((3104, 3219), 'tabulate.tabulate', 'tabulate', (['table'], {'headers': "['scheduler', 'metric mean', 'metric std', 'step mean', 'step std']", 'tablefmt': '"""github"""'}), "(table, headers=['scheduler', 'metric mean', 'metric std',\n 'step mean', 'step std'], tablefmt='github')\n", (3112, 3219), False, 'from tabulate import tabulate\n'), ((5397, 5490), 'tabulate.tabulate', 'tabulate', (['table'], {'headers': "['scheduler', 'metric median', 'step median']", 'tablefmt': '"""github"""'}), "(table, headers=['scheduler', 'metric median', 'step median'],\n tablefmt='github')\n", (5405, 5490), False, 'from tabulate import tabulate\n'), ((6894, 6929), 'os.path.join', 'os.path.join', (['folder_path', 'filename'], {}), '(folder_path, filename)\n', (6906, 6929), False, 'import os\n'), ((7059, 7091), 'fastestimator.summary.logs.parse_log_file', 'parse_log_file', (['filepath', '""".txt"""'], {}), "(filepath, '.txt')\n", (7073, 7091), False, 'from fastestimator.summary.logs import parse_log_file\n'), ((2489, 2509), 'numpy.mean', 'np.mean', (['best_result'], {}), '(best_result)\n', (2496, 2509), True, 'import numpy as np\n'), ((2618, 2636), 'numpy.mean', 'np.mean', (['best_step'], {}), '(best_step)\n', (2625, 2636), True, 'import numpy as np\n'), ((4806, 4828), 'numpy.median', 'np.median', (['best_result'], {}), '(best_result)\n', (4815, 4828), True, 'import numpy as np\n'), ((4939, 4959), 'numpy.median', 'np.median', (['best_step'], {}), '(best_step)\n', (4948, 4959), True, 'import numpy as np\n'), ((5690, 5713), 'os.listdir', 'os.listdir', (['folder_path'], {}), '(folder_path)\n', (5700, 5713), False, 'import os\n'), ((6787, 6810), 'os.listdir', 'os.listdir', (['folder_path'], {}), '(folder_path)\n', (6797, 6810), False, 'import os\n'), ((2401, 2421), 'numpy.mean', 'np.mean', (['best_result'], {}), '(best_result)\n', (2408, 2421), True, 'import numpy as np\n'), ((2452, 2470), 'numpy.mean', 'np.mean', (['best_step'], {}), '(best_step)\n', (2459, 2470), True, 'import numpy as np\n'), ((2727, 2777), 'scipy.stats.ttest_ind', 'ttest_ind', (['br_dict[arc_name]', 'br_dict[lr_schedule]'], {}), '(br_dict[arc_name], br_dict[lr_schedule])\n', (2736, 2777), False, 'from scipy.stats import ttest_ind\n'), ((2835, 2885), 'scipy.stats.ttest_ind', 'ttest_ind', (['bs_dict[arc_name]', 'bs_dict[lr_schedule]'], {}), '(bs_dict[arc_name], bs_dict[lr_schedule])\n', (2844, 2885), False, 'from scipy.stats import ttest_ind\n'), ((4714, 4736), 'numpy.median', 'np.median', (['best_result'], {}), '(best_result)\n', (4723, 4736), True, 'import numpy as np\n'), ((4767, 4787), 'numpy.median', 'np.median', (['best_step'], {}), '(best_step)\n', (4776, 4787), True, 'import numpy as np\n'), ((5065, 5115), 'scipy.stats.ttest_ind', 'ttest_ind', (['br_dict[arc_name]', 'br_dict[lr_schedule]'], {}), '(br_dict[arc_name], br_dict[lr_schedule])\n', (5074, 5115), False, 'from scipy.stats import ttest_ind\n'), ((5173, 5223), 'scipy.stats.ttest_ind', 'ttest_ind', (['bs_dict[arc_name]', 'bs_dict[lr_schedule]'], {}), '(bs_dict[arc_name], bs_dict[lr_schedule])\n', (5182, 5223), False, 'from scipy.stats import ttest_ind\n'), ((2536, 2546), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (2543, 2546), True, 'import numpy as np\n'), ((2645, 2655), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (2652, 2655), True, 'import numpy as np\n'), ((4855, 4867), 'numpy.median', 'np.median', (['x'], {}), '(x)\n', (4864, 4867), True, 'import numpy as np\n'), ((4981, 4993), 'numpy.median', 'np.median', (['x'], {}), '(x)\n', (4990, 4993), True, 'import numpy as np\n'), ((5898, 5924), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (5914, 5924), False, 'import os\n'), ((6310, 6345), 'os.path.join', 'os.path.join', (['folder_path', 'filename'], {}), '(folder_path, filename)\n', (6322, 6345), False, 'import os\n'), ((6948, 6974), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (6964, 6974), False, 'import os\n'), ((2995, 3014), 'numpy.std', 'np.std', (['best_result'], {}), '(best_result)\n', (3001, 3014), True, 'import numpy as np\n'), ((3049, 3066), 'numpy.std', 'np.std', (['best_step'], {}), '(best_step)\n', (3055, 3066), True, 'import numpy as np\n')]
|
import numpy as np
def filter_ids(array, clinical_ids):
# list of array indices that need to be deleted
del_indices = []
i = 0
for img in array:
id = img[-1]
if id not in clinical_ids:
del_indices.append(i)
i = i + 1
array = np.delete(array, del_indices, axis=0)
return array
|
[
"numpy.delete"
] |
[((286, 323), 'numpy.delete', 'np.delete', (['array', 'del_indices'], {'axis': '(0)'}), '(array, del_indices, axis=0)\n', (295, 323), True, 'import numpy as np\n')]
|
import cdutil
import cdat_info
import cdms2
import cdms2,cdutil,sys,MV2,numpy,os,cdat_info
import unittest
import numpy
import tempfile
class CDUTIL(unittest.TestCase):
def testRegions(self):
regionNA = cdutil.region.domain(latitude=(-50.,50.,'ccb'))
f=cdms2.open(cdat_info.get_sampledata_path()+'/clt.nc')
d=f('u', regionNA)
# --------------------------------------------------------
# makesure the warning has been displayed for the 3rd args
# --------------------------------------------------------
bounds = d.getLatitude().getBounds()
self.assertTrue(numpy.allclose(bounds[0], numpy.array([-50., -49.19124603])))
self.assertTrue(numpy.allclose(bounds[-1], numpy.array([49.19124603, 50.])))
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"cdutil.region.domain",
"numpy.array",
"cdat_info.get_sampledata_path"
] |
[((809, 824), 'unittest.main', 'unittest.main', ([], {}), '()\n', (822, 824), False, 'import unittest\n'), ((218, 269), 'cdutil.region.domain', 'cdutil.region.domain', ([], {'latitude': "(-50.0, 50.0, 'ccb')"}), "(latitude=(-50.0, 50.0, 'ccb'))\n", (238, 269), False, 'import cdms2, cdutil, sys, MV2, numpy, os, cdat_info\n'), ((287, 318), 'cdat_info.get_sampledata_path', 'cdat_info.get_sampledata_path', ([], {}), '()\n', (316, 318), False, 'import cdms2, cdutil, sys, MV2, numpy, os, cdat_info\n'), ((655, 689), 'numpy.array', 'numpy.array', (['[-50.0, -49.19124603]'], {}), '([-50.0, -49.19124603])\n', (666, 689), False, 'import numpy\n'), ((742, 774), 'numpy.array', 'numpy.array', (['[49.19124603, 50.0]'], {}), '([49.19124603, 50.0])\n', (753, 774), False, 'import numpy\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
COS method
==========
The method comes from [1]_
The original code is found at
http://www.wilmott.com/messageview.cfm?catid=34&threadid=78554
References
----------
.. [1] <NAME>., & <NAME>. (2009).
A Novel Pricing Method for European Options
Based on Fourier-Cosine Series Expansions.
*SIAM Journal on Scientific Computing*, 31(2), 826. doi:10.1137/080718061
<http://ta.twi.tudelft.nl/mf/users/oosterle/oosterlee/COS.pdf>
"""
from __future__ import division, print_function
import numpy as np
import numexpr as ne
__all__ = ['cosmethod']
def cosmethod(model, moneyness=0., call=True, npoints=2**10):
"""COS method.
Parameters
----------
model : instance of specific model class
The method depends on availability of two methods:
- charfun
- cos_restriction
moneyness : array_like
Moneyness of the option, np.log(strike/price) - riskfree * maturity
call : bool array_like
Call/Put flag
npoints : int
Number of points on the grid. The more the better, but slower.
Returns
-------
array_like
Option premium normalized by asset price
Notes
-----
`charfun` method (risk-neutral conditional chracteristic function)
of `model` instance should depend on
one argument only (array_like) and should return
array_like of the same dimension.
`cos_restriction` method of `model` instance takes `maturity`
and `riskfree` as array arguments,
and returns two corresponding arrays (a, b).
"""
if not hasattr(model, 'charfun'):
raise Exception('Characteristic function is not available!')
if not hasattr(model, 'cos_restriction'):
raise Exception('COS restriction is not available!')
# (nobs, ) arrays
alim, blim = model.cos_restriction()
# (npoints, nobs) array
kvec = np.arange(npoints)[:, np.newaxis] * np.pi / (blim - alim)
# (npoints, ) array
unit = np.append(.5, np.ones(npoints-1))
# Arguments
argc = (kvec, alim, blim, 0, blim)
argp = (kvec, alim, blim, alim, 0)
# (nobs, ) array
put = np.logical_not(call)
# (npoints, nobs) array
umat = 2 / (blim - alim) * (call * xfun(*argc) - put * xfun(*argp))
# (npoints, nobs) array
pmat = model.charfun(kvec)
# (npoints, nobs) array
xmat = np.exp(-1j * kvec * (moneyness + alim))
# (nobs, ) array
return np.exp(moneyness) * np.dot(unit, pmat * umat * xmat).real
def xfun(k, a, b, c, d):
"""Xi-Psi function.
Parameters
----------
k : (n, 1) array
a : float or (m, ) array
b : float or (m, ) array
c : float or (m, ) array
d : float or (m, ) array
Returns
-------
(n, m) array
"""
# out0 = (np.cos(k * (d-a)) * np.exp(d) - np.cos(k * (c-a)) * np.exp(c)
# + k * (np.sin(k * (d-a)) * np.exp(d) - np.sin(k * (c-a)) * np.exp(c)))\
# / (1 + k**2)
# out1 = (np.sin(k[1:] * (d-a)) - np.sin(k[1:] * (c-a))) / k[1:]
out0 = ne.evaluate(("(cos(k * (d-a)) * exp(d) - cos(k * (c-a)) * exp(c)"
"+ k * (sin(k * (d-a)) * exp(d) - sin(k * (c-a)) * exp(c)))"
"/ (1 + k**2)"))
k1 = k[1:]
out1 = ne.evaluate("(sin(k1 * (d-a)) - sin(k1 * (c-a))) / k1")
out1 = np.vstack([(d - c) * np.ones_like(a), out1])
return out0 - out1
if __name__ == '__main__':
pass
|
[
"numpy.ones_like",
"numpy.logical_not",
"numpy.ones",
"numexpr.evaluate",
"numpy.arange",
"numpy.exp",
"numpy.dot"
] |
[((2168, 2188), 'numpy.logical_not', 'np.logical_not', (['call'], {}), '(call)\n', (2182, 2188), True, 'import numpy as np\n'), ((2387, 2428), 'numpy.exp', 'np.exp', (['(-1.0j * kvec * (moneyness + alim))'], {}), '(-1.0j * kvec * (moneyness + alim))\n', (2393, 2428), True, 'import numpy as np\n'), ((3045, 3190), 'numexpr.evaluate', 'ne.evaluate', (['"""(cos(k * (d-a)) * exp(d) - cos(k * (c-a)) * exp(c)+ k * (sin(k * (d-a)) * exp(d) - sin(k * (c-a)) * exp(c)))/ (1 + k**2)"""'], {}), "(\n '(cos(k * (d-a)) * exp(d) - cos(k * (c-a)) * exp(c)+ k * (sin(k * (d-a)) * exp(d) - sin(k * (c-a)) * exp(c)))/ (1 + k**2)'\n )\n", (3056, 3190), True, 'import numexpr as ne\n'), ((3231, 3286), 'numexpr.evaluate', 'ne.evaluate', (['"""(sin(k1 * (d-a)) - sin(k1 * (c-a))) / k1"""'], {}), "('(sin(k1 * (d-a)) - sin(k1 * (c-a))) / k1')\n", (3242, 3286), True, 'import numexpr as ne\n'), ((2023, 2043), 'numpy.ones', 'np.ones', (['(npoints - 1)'], {}), '(npoints - 1)\n', (2030, 2043), True, 'import numpy as np\n'), ((2459, 2476), 'numpy.exp', 'np.exp', (['moneyness'], {}), '(moneyness)\n', (2465, 2476), True, 'import numpy as np\n'), ((2479, 2511), 'numpy.dot', 'np.dot', (['unit', '(pmat * umat * xmat)'], {}), '(unit, pmat * umat * xmat)\n', (2485, 2511), True, 'import numpy as np\n'), ((1916, 1934), 'numpy.arange', 'np.arange', (['npoints'], {}), '(npoints)\n', (1925, 1934), True, 'import numpy as np\n'), ((3320, 3335), 'numpy.ones_like', 'np.ones_like', (['a'], {}), '(a)\n', (3332, 3335), True, 'import numpy as np\n')]
|
import numpy as np
from glob import glob
from scipy import ndimage
from keras import callbacks
from keras.optimizers import Adamax, SGD, RMSprop
import resnet50
def convert_to_one_hot(Y, C):
'''Converts array with labels to one-hot encoding
Keyword Arguments:
Y -- 1-dimensional numpy array containing labeled values
C -- total number of labels in Y
'''
Y = np.eye(C)[Y.reshape(-1)].T
return Y
def load_dataset(datapath, composers):
'''Loads dataset into memory
Keyword Arguments:
datapath -- absolute or relative path to dataset location
composers -- list of composer names included in the dataset
'''
folders = glob('%s/*' %datapath)
X_train = []
Y_train = []
for folder in folders:
files = glob('%s\\*.jpg' %folder)
print('working on composer: %s' %(folder.split('\\')[-1]))
for f in files:
im = ndimage.imread(f, mode='L')
im = im/255
im = im.reshape(im.shape[0], im.shape[1], 1)
X_train.append(im)
Y_train.append(composers.index(folder.split('\\')[-1]))
return np.asarray(X_train), np.asarray(Y_train)
if __name__ == '__main__':
print('setting model')
model = ResNet50.ResNet50(input_shape = (70, 400, 1), classes = 7)
epochs = 100
learning_rate = 0.001
lr_decay = 0.001/100
print('compiling model...')
#optimizer_instance = Adam(lr=learning_rate, decay=lr_decay)#lr=0.0005, beta_1=0.9, beta_2=0.999, epsilon=0.001)
#optimizer_instance = Adamax(lr=learning_rate, decay=lr_decay)
optimizer_instance = SGD(lr=learning_rate, decay=lr_decay)
#optimizer_instance = RMSprop(lr=learning_rate, decay=lr_decay)
model.compile(optimizer=optimizer_instance, loss='categorical_crossentropy', metrics=['acc'])
print('loading dataset......')
composers = ['Bach', 'Beethoven', 'Brahms', 'Chopin', 'Grieg', 'Liszt', 'Mozart']
datapath = 'Dataset_Train_Medium/'
X_train, Y_train = load_dataset(datapath, composers)
datapath_val = 'Dataset_Dev_Medium/'
X_test, Y_test = load_dataset(datapath_val, composers)
print('applying one-hot-encoding')
Y_train = convert_to_one_hot(Y_train, 7).T
Y_test = convert_to_one_hot(Y_test, 7).T
print('setting up callbacks...')
nancheck = callbacks.TerminateOnNaN()
filepath = 'Models/weights-improvement-{epoch:02d}-{acc:.2f}.hdf5'
saver = callbacks.ModelCheckpoint(filepath, monitor='acc', verbose=1, save_best_only=False, mode='max', period=1)
logger = callbacks.CSVLogger('model-weights/trainingresults.log')
callbacklist = [nancheck, saver, logger]
print('starting model fitting')
model.fit(X_train, Y_train, validation_data = (X_test, Y_test), epochs=epochs, batch_size=72, callbacks=callbacklist)
print('Saving model.........')
model.save('second_run.h5')
|
[
"keras.optimizers.SGD",
"keras.callbacks.TerminateOnNaN",
"numpy.eye",
"keras.callbacks.ModelCheckpoint",
"numpy.asarray",
"glob.glob",
"keras.callbacks.CSVLogger",
"scipy.ndimage.imread"
] |
[((676, 699), 'glob.glob', 'glob', (["('%s/*' % datapath)"], {}), "('%s/*' % datapath)\n", (680, 699), False, 'from glob import glob\n'), ((1609, 1646), 'keras.optimizers.SGD', 'SGD', ([], {'lr': 'learning_rate', 'decay': 'lr_decay'}), '(lr=learning_rate, decay=lr_decay)\n', (1612, 1646), False, 'from keras.optimizers import Adamax, SGD, RMSprop\n'), ((2318, 2344), 'keras.callbacks.TerminateOnNaN', 'callbacks.TerminateOnNaN', ([], {}), '()\n', (2342, 2344), False, 'from keras import callbacks\n'), ((2428, 2537), 'keras.callbacks.ModelCheckpoint', 'callbacks.ModelCheckpoint', (['filepath'], {'monitor': '"""acc"""', 'verbose': '(1)', 'save_best_only': '(False)', 'mode': '"""max"""', 'period': '(1)'}), "(filepath, monitor='acc', verbose=1,\n save_best_only=False, mode='max', period=1)\n", (2453, 2537), False, 'from keras import callbacks\n'), ((2547, 2603), 'keras.callbacks.CSVLogger', 'callbacks.CSVLogger', (['"""model-weights/trainingresults.log"""'], {}), "('model-weights/trainingresults.log')\n", (2566, 2603), False, 'from keras import callbacks\n'), ((777, 803), 'glob.glob', 'glob', (["('%s\\\\*.jpg' % folder)"], {}), "('%s\\\\*.jpg' % folder)\n", (781, 803), False, 'from glob import glob\n'), ((1131, 1150), 'numpy.asarray', 'np.asarray', (['X_train'], {}), '(X_train)\n', (1141, 1150), True, 'import numpy as np\n'), ((1152, 1171), 'numpy.asarray', 'np.asarray', (['Y_train'], {}), '(Y_train)\n', (1162, 1171), True, 'import numpy as np\n'), ((390, 399), 'numpy.eye', 'np.eye', (['C'], {}), '(C)\n', (396, 399), True, 'import numpy as np\n'), ((911, 938), 'scipy.ndimage.imread', 'ndimage.imread', (['f'], {'mode': '"""L"""'}), "(f, mode='L')\n", (925, 938), False, 'from scipy import ndimage\n')]
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
target = ["True", "False"]
el_decay = ["True", "False"]
error = np.array([[4.478, 3.483],
[3.647, 2.502]])
fig, ax = plt.subplots()
im = ax.imshow(error)
# We want to show all ticks...
ax.set_xticks(np.arange(len(el_decay)))
ax.set_yticks(np.arange(len(target)))
# ... and label them with the respective list entries
ax.set_xticklabels(el_decay)
ax.set_yticklabels(target)
ax.set_ylabel("Target electron")
ax.set_xlabel("Electron-Electron shift decay")
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(len(target)):
for j in range(len(el_decay)):
text = ax.text(j, i, error[i, j],
ha="center", va="center", color="w")
ax.set_title("Shift: Energy error (mHa) for Nitrogen")
fig.tight_layout()
plt.show()
|
[
"numpy.array",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((135, 177), 'numpy.array', 'np.array', (['[[4.478, 3.483], [3.647, 2.502]]'], {}), '([[4.478, 3.483], [3.647, 2.502]])\n', (143, 177), True, 'import numpy as np\n'), ((210, 224), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (222, 224), True, 'import matplotlib.pyplot as plt\n'), ((986, 996), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (994, 996), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
import copy
class Particle:
def __init__(self, lb, ub):
"""Initialize the particle.
Attributes
----------
lb : float
lower bounds for initial values
ub : float
upper bounds for initial values
"""
self.lb = lb
self.ub = ub
self.position = np.random.uniform(lb, ub, size=lb.shape[0])
self.velocity = np.random.uniform(lb, ub, size=lb.shape[0])
self.fitness = None
self.pbest_position = self.position
self.pbest_fitness = float('inf')
def move(self):
self.position += self.velocity
class Swarm:
def __init__(self, function_list, n_particles, n_iterations,
lb, ub, w=0.7, c1=2.0, c2=2.0):
"""Initialize the swarm.
Attributes
---------
function_list : list
list of functions to optimize
n_particles : int
number of particles in swarm
n_iterations : int
number of optimization iterations
lb : float
lower bounds for initial values
ub : float
upper bounds for initial values
w : float
inertia weight
c1 : float
cognitive weight
c2 : float
social weight
"""
self.function_list = function_list
self.n_obj = len(function_list)
self.n_particles = n_particles
self.n_iterations = n_iterations
assert len(lb) == len(ub)
self.lb = np.array(lb)
self.ub = np.array(ub)
self.w = w
self.c1 = c1
self.c2 = c2
self.gbest_position = np.random.uniform(lb, ub, size=self.lb.shape[0])
self.gbest_fitness = float('inf')
self.population = []
self.iteration = 0
def reset_environment(self):
self.population = []
self.iteration = 0
def termination_check(self):
if self.iteration > self.n_iterations:
return False
else:
return True
def initialise_swarm(self):
for _ in range(self.n_particles):
self.population.append(Particle(self.lb, self.ub))
def eval_fitness(self, particle):
"""Evaluate particle fitness based on all functions in function_list"""
_fitness = 0
for func in self.function_list:
_fitness += func(particle.position)
particle.fitness = _fitness
def swarm_eval_fitness(self):
for particle in self.population:
self.eval_fitness(particle)
def update_velocity(self, particle):
inertia = self.w * particle.velocity
cognitive = (self.c1 * np.random.uniform()
* (particle.pbest_position - particle.position))
social = (self.c2 * np.random.uniform()
* (self.gbest_position - particle.position))
particle.velocity = inertia + cognitive + social
def swarm_update_velocity(self):
for particle in self.population:
self.update_velocity(particle)
def update_pbest(self, particle):
if particle.fitness < particle.pbest_fitness:
particle.pbest_fitness = particle.fitness
particle.pbest_position = particle.position
def update_gbest(self, particle):
if particle.fitness < self.gbest_fitness:
self.gbest_fitness = copy.deepcopy(particle.fitness)
self.gbest_position = copy.deepcopy(particle.position)
def swarm_update_best(self):
for particle in self.population:
self.update_pbest(particle)
self.update_gbest(particle)
def swarm_move(self):
for particle in self.population:
particle.move()
def optimise(self):
self.reset_environment()
self.initialise_swarm()
while self.termination_check():
self.swarm_eval_fitness()
self.swarm_update_best()
self.swarm_update_velocity()
self.swarm_move()
self.iteration += 1
if __name__ == '__main__':
print('MOPSO: Aggregating Approach')
def function_one(position):
return np.square(position[0])
def function_two(position):
return np.square(position[0] - 2)
function_list = [function_one, function_two]
n_particles = 30
n_iterations = 100
lb = [-100]
ub = [100]
swarm = Swarm(function_list=function_list,
n_particles=n_particles,
n_iterations=n_iterations,
lb=lb,
ub=ub)
swarm.optimise()
print('gbest_position: ', swarm.gbest_position)
print('gbest_fitness: ', swarm.gbest_fitness)
|
[
"numpy.random.uniform",
"copy.deepcopy",
"numpy.square",
"numpy.array"
] |
[((362, 405), 'numpy.random.uniform', 'np.random.uniform', (['lb', 'ub'], {'size': 'lb.shape[0]'}), '(lb, ub, size=lb.shape[0])\n', (379, 405), True, 'import numpy as np\n'), ((430, 473), 'numpy.random.uniform', 'np.random.uniform', (['lb', 'ub'], {'size': 'lb.shape[0]'}), '(lb, ub, size=lb.shape[0])\n', (447, 473), True, 'import numpy as np\n'), ((1556, 1568), 'numpy.array', 'np.array', (['lb'], {}), '(lb)\n', (1564, 1568), True, 'import numpy as np\n'), ((1587, 1599), 'numpy.array', 'np.array', (['ub'], {}), '(ub)\n', (1595, 1599), True, 'import numpy as np\n'), ((1693, 1741), 'numpy.random.uniform', 'np.random.uniform', (['lb', 'ub'], {'size': 'self.lb.shape[0]'}), '(lb, ub, size=self.lb.shape[0])\n', (1710, 1741), True, 'import numpy as np\n'), ((4201, 4223), 'numpy.square', 'np.square', (['position[0]'], {}), '(position[0])\n', (4210, 4223), True, 'import numpy as np\n'), ((4272, 4298), 'numpy.square', 'np.square', (['(position[0] - 2)'], {}), '(position[0] - 2)\n', (4281, 4298), True, 'import numpy as np\n'), ((3421, 3452), 'copy.deepcopy', 'copy.deepcopy', (['particle.fitness'], {}), '(particle.fitness)\n', (3434, 3452), False, 'import copy\n'), ((3487, 3519), 'copy.deepcopy', 'copy.deepcopy', (['particle.position'], {}), '(particle.position)\n', (3500, 3519), False, 'import copy\n'), ((2715, 2734), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2732, 2734), True, 'import numpy as np\n'), ((2833, 2852), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2850, 2852), True, 'import numpy as np\n')]
|
import pygame
from pygame.locals import *
from constants import *
from copy import deepcopy
import numpy as np
from heuristic import *
class Player(object):
def __init__(self, color, player_num):
self.color = color
self.direction = UP
self.player_num = player_num
self.move_counter = 0 # Keeps track of movement to regulate growth rate
loc = P1_LOC if player_num == 1 else P2_LOC
self.segments = [Rect(loc[0], loc[1], CELL_WIDTH, CELL_WIDTH)]
def direction_valid(self,direction):
if (direction == UP and self.direction == DOWN):
return False
if (direction == LEFT and self.direction == RIGHT):
return False
if (direction == DOWN and self.direction == UP):
return False
if (direction == RIGHT and self.direction == LEFT):
return False
return True
def set_direction(self, direction):
if self.direction_valid(direction):
self.direction = direction
def set_color(self, color):
self.color = color
def clone(self, player=None, direction=None):
if player == None:
player = self
cloned_player = deepcopy(player)
if direction != None:
cloned_player.direction = direction
cloned_player.move()
return cloned_player
def get_state(self, other_player):
state = np.zeros((GAME_HEIGHT/CELL_WIDTH, GAME_WIDTH/CELL_WIDTH))
for rect in self.segments:
loc = rect.topleft
x,y = loc[0]/CELL_WIDTH, loc[1]/CELL_WIDTH
state[y,x] = FRIENDLY
for rect in other_player.segments:
loc = rect.topleft
x,y = loc[0]/CELL_WIDTH, loc[1]/CELL_WIDTH
state[y,x] = OPPONENT
return state
def has_collided(self, other_player, head = None):
segments_to_check = self.segments[:]
if head == None:
head = self.segments[0]
segments_to_check.pop(0)
head_loc = head.topleft
return (not (0 <= head_loc[0] <= GAME_WIDTH - CELL_WIDTH) or
not (0 <= head_loc[1] <= GAME_HEIGHT - CELL_WIDTH) or
head.collidelist(segments_to_check) != -1 or
head.collidelist(other_player.segments) != -1)
def draw(self, display_surface):
for segment in self.segments:
pygame.draw.rect(display_surface, self.color, segment)
def move(self):
head_loc = self.segments[0].topleft
delta = DIRECTION_DELTAS[self.direction]
new_x = head_loc[0] + delta['x'] * CELL_WIDTH
new_y = head_loc[1] + delta['y'] * CELL_WIDTH
head = Rect(new_x, new_y, CELL_WIDTH, CELL_WIDTH)
self.segments.insert(0, head)
self.move_counter = (self.move_counter + 1) % PLAYER_GROWTH_RATE
if self.move_counter == 0:
self.segments.pop() # Remove last segment of tail
""" Chooses the next move to make in the game.
Subclasses of Player (aka custom bots) should override this method.
other_player is a dict object with the following key/values:
direction: The other player's current direction (i.e. UP)
segments: Copy of list of segments of the other player
"""
def choose_move(self, other_player):
self.move()
|
[
"pygame.draw.rect",
"copy.deepcopy",
"numpy.zeros"
] |
[((1204, 1220), 'copy.deepcopy', 'deepcopy', (['player'], {}), '(player)\n', (1212, 1220), False, 'from copy import deepcopy\n'), ((1417, 1478), 'numpy.zeros', 'np.zeros', (['(GAME_HEIGHT / CELL_WIDTH, GAME_WIDTH / CELL_WIDTH)'], {}), '((GAME_HEIGHT / CELL_WIDTH, GAME_WIDTH / CELL_WIDTH))\n', (1425, 1478), True, 'import numpy as np\n'), ((2397, 2451), 'pygame.draw.rect', 'pygame.draw.rect', (['display_surface', 'self.color', 'segment'], {}), '(display_surface, self.color, segment)\n', (2413, 2451), False, 'import pygame\n')]
|
import numpy as np
def to_array(image):
array = np.array(image, dtype=np.float32)[..., :3]
array = array / 255.
return array
def l2_normalize(x, axis=0):
norm = np.linalg.norm(x, axis=axis, keepdims=True)
return x / norm
def distance(a, b):
# Euclidean distance
# return np.linalg.norm(a - b)
# Cosine distance, ||a|| and ||b|| is one because embeddings are normalized.
# No need to compute np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
return np.dot(a, b)
|
[
"numpy.array",
"numpy.dot",
"numpy.linalg.norm"
] |
[((180, 223), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {'axis': 'axis', 'keepdims': '(True)'}), '(x, axis=axis, keepdims=True)\n', (194, 223), True, 'import numpy as np\n'), ((498, 510), 'numpy.dot', 'np.dot', (['a', 'b'], {}), '(a, b)\n', (504, 510), True, 'import numpy as np\n'), ((53, 86), 'numpy.array', 'np.array', (['image'], {'dtype': 'np.float32'}), '(image, dtype=np.float32)\n', (61, 86), True, 'import numpy as np\n')]
|
''' control systems - ode simulation
@link https://www.youtube.com/watch?v=yp5x8RMNi7o
'''
import numpy as np
from scipy.integrate import odeint
from matplotlib import pyplot as plt
def sys_ode(x, t):
# set system constants
c = 4 # damping constant
k = 2 # spring stiffness constant
m = 20 # point-mass
F = 5 # input force into the system
# compute state first derivative
dx1 = x[1]
dx2 = (F - c*x[1] - k*x[0])/m
return [dx1, dx2]
def sim():
# set constants
t_0 = 0
t_f = 60
period = 0.1
# set state initial condition
x_init = [0, 0]
# set a discrete time stamp
t = np.arange(t_0, t_f, period)
x = odeint(sys_ode, x_init, t)
x1 = x[:,0]
x2 = x[:,1]
plt.plot(t,x1)
plt.plot(t,x2)
plt.title('Mass-Spring-Damper System')
plt.xlabel('t')
plt.ylabel('x(t)')
plt.legend(['x1', 'x2'])
plt.grid()
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"scipy.integrate.odeint",
"matplotlib.pyplot.legend",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid"
] |
[((611, 638), 'numpy.arange', 'np.arange', (['t_0', 't_f', 'period'], {}), '(t_0, t_f, period)\n', (620, 638), True, 'import numpy as np\n'), ((645, 671), 'scipy.integrate.odeint', 'odeint', (['sys_ode', 'x_init', 't'], {}), '(sys_ode, x_init, t)\n', (651, 671), False, 'from scipy.integrate import odeint\n'), ((704, 719), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'x1'], {}), '(t, x1)\n', (712, 719), True, 'from matplotlib import pyplot as plt\n'), ((721, 736), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'x2'], {}), '(t, x2)\n', (729, 736), True, 'from matplotlib import pyplot as plt\n'), ((738, 776), 'matplotlib.pyplot.title', 'plt.title', (['"""Mass-Spring-Damper System"""'], {}), "('Mass-Spring-Damper System')\n", (747, 776), True, 'from matplotlib import pyplot as plt\n'), ((779, 794), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (789, 794), True, 'from matplotlib import pyplot as plt\n'), ((797, 815), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""x(t)"""'], {}), "('x(t)')\n", (807, 815), True, 'from matplotlib import pyplot as plt\n'), ((818, 842), 'matplotlib.pyplot.legend', 'plt.legend', (["['x1', 'x2']"], {}), "(['x1', 'x2'])\n", (828, 842), True, 'from matplotlib import pyplot as plt\n'), ((845, 855), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (853, 855), True, 'from matplotlib import pyplot as plt\n'), ((858, 868), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (866, 868), True, 'from matplotlib import pyplot as plt\n')]
|
import numpy as np
a_matris = [[2,0,0],
[0,2,0],
[0,0,2]]
x_matris = []
b_matris = [2, 4, 9]
u_a_matris = np.triu(a_matris)
x3 = float(b_matris[2])/u_a_matris[2][2]
x2 = float(b_matris[1] - x3*u_a_matris[1][2])/u_a_matris[1][1]
x1 = float(b_matris[0] - x2*u_a_matris[0][1] - x3*u_a_matris[0][2])/u_a_matris[0][0]
print(x1, x2, x3)
|
[
"numpy.triu"
] |
[((132, 149), 'numpy.triu', 'np.triu', (['a_matris'], {}), '(a_matris)\n', (139, 149), True, 'import numpy as np\n')]
|
import os
import logging
import galsim
import galsim.config
import piff
import numpy as np
import ngmix
if ngmix.__version__[0:2] == "v1":
NGMIX_V2 = False
from ngmix.fitting import LMSimple
from ngmix.admom import Admom
else:
NGMIX_V2 = True
from ngmix.fitting import Fitter
from ngmix.admom import AdmomFitter
from scipy.interpolate import CloughTocher2DInterpolator
logger = logging.getLogger(__name__)
# pixel scale used for fitting the Piff models
PIFF_SCALE = 0.25
class DES_Piff(object):
"""A wrapper for Piff to use with Galsim.
This wrapper uses ngmix to fit smooth models to the Piff PSF images. The
parameters of these models are then interpolated across the SE image
and used to generate a smooth approximation to the PSF.
Parameters
----------
file_name : str
The file with the Piff psf solution.
smooth : bool, optional
If True, then smooth the Piff PSFs. Default of False.
"""
_req_params = {'file_name': str}
_opt_params = {}
_single_params = []
_takes_rng = False
def __init__(self, file_name, smooth=False):
self.file_name = file_name
# Read the Piff file. This may fail if the Piff
# file is missing. We catch this and continue
# since if we're substituting in some different
# PSF model for rejectlisted piff files, we'll
# never actually use self._piff
try:
self._piff = piff.read(
os.path.expanduser(os.path.expandvars(file_name)))
except IOError:
print("failed to load Piff file, hopefully it's rejectlisted...")
self._piff = None
self._did_fit = False
self.smooth = smooth
def _fit_smooth_model(self):
dxy = 256
ny = 4096 // dxy + 1
nx = 2048 // dxy + 1
xloc = np.empty((ny, nx), dtype=np.float64)
yloc = np.empty((ny, nx), dtype=np.float64)
pars = np.empty((ny, nx, 3), dtype=np.float64)
for yi, yl in enumerate(np.linspace(1, 4096, ny)):
for xi, xl in enumerate(np.linspace(1, 2048, nx)):
rng = np.random.RandomState(seed=yi + nx * xi)
xloc[yi, xi] = xl
yloc[yi, xi] = yl
pos = galsim.PositionD(x=xl, y=yl)
gs_img = self._draw(pos).drawImage(
nx=19, ny=19, scale=PIFF_SCALE, method='sb')
img = gs_img.array
nse = np.std(
np.concatenate([img[0, :], img[-1, :]]))
obs = ngmix.Observation(
image=img,
weight=np.ones_like(img)/nse**2,
jacobian=ngmix.jacobian.DiagonalJacobian(
x=9, y=9, scale=PIFF_SCALE))
_g1 = np.nan
_g2 = np.nan
_T = np.nan
# there are some nutty PSFs
if gs_img.calculateFWHM() > 0.5:
for _ in range(5):
try:
if NGMIX_V2:
am = AdmomFitter(rng=rng)
res = am.go(obs, 0.3)
if res['flags'] != 0:
continue
lm = Fitter(model='turb')
lm_res = lm.go(obs, res['pars'])
if lm_res['flags'] == 0:
_g1 = lm_res['pars'][2]
_g2 = lm_res['pars'][3]
_T = lm_res['pars'][4]
break
else:
am = Admom(obs, rng=rng)
am.go(0.3)
res = am.get_result()
if res['flags'] != 0:
continue
lm = LMSimple(obs, 'turb')
lm.go(res['pars'])
lm_res = lm.get_result()
if lm_res['flags'] == 0:
_g1 = lm_res['pars'][2]
_g2 = lm_res['pars'][3]
_T = lm_res['pars'][4]
break
except ngmix.gexceptions.GMixRangeError:
pass
try:
irr, irc, icc = ngmix.moments.g2mom(_g1, _g2, _T)
# this is a fudge factor that gets the overall PSF FWHM
# correct
# the naive correction for the pixel size is
# a bit too small
pixel_var = PIFF_SCALE * PIFF_SCALE / 12 * 1.73
irr -= pixel_var
icc -= pixel_var
_g1, _g2, _T = ngmix.moments.mom2g(irr, irc, icc)
except Exception:
_g1 = np.nan
_g2 = np.nan
_T = np.nan
pars[yi, xi, 0] = _g1
pars[yi, xi, 1] = _g2
pars[yi, xi, 2] = _T
xloc = xloc.ravel()
yloc = yloc.ravel()
pos = np.stack([xloc, yloc], axis=1)
assert pos.shape == (xloc.shape[0], 2)
# make interps
g1 = pars[:, :, 0].ravel()
msk = np.isfinite(g1)
if len(msk) < 10:
raise ValueError('DES Piff fitting failed too much!')
if np.any(~msk):
g1[~msk] = np.mean(g1[msk])
self._g1int = CloughTocher2DInterpolator(
pos, g1, fill_value=np.mean(g1[msk]))
g2 = pars[:, :, 1].ravel()
msk = np.isfinite(g2)
if len(msk) < 10:
raise ValueError('DES Piff fitting failed too much!')
if np.any(~msk):
g2[~msk] = np.mean(g2[msk])
self._g2int = CloughTocher2DInterpolator(
pos, g2, fill_value=np.mean(g2[msk]))
T = pars[:, :, 2].ravel()
msk = np.isfinite(T)
if len(msk) < 10:
raise ValueError('DES Piff fitting failed too much!')
if np.any(~msk):
T[~msk] = np.mean(T[msk])
self._Tint = CloughTocher2DInterpolator(
pos, T, fill_value=np.mean(T[msk]))
self._did_fit = True
def _draw(self, image_pos, wcs=None, n_pix=None,
x_interpolant='lanczos15', gsparams=None):
"""Get an image of the PSF at the given location.
Parameters
----------
image_pos : galsim.Position
The image position for the PSF.
wcs : galsim.BaseWCS or subclass, optional
The WCS to use to draw the PSF.
n_pix : int, optional
The image size to use when drawing without smoothing. Defaults to
53 pixels if not given
x_interpolant : str, optional
The interpolant to use.
gsparams : galsim.GSParams, optional
Ootional galsim configuration data to pass along.
Returns
-------
psf : galsim.InterpolatedImage
The PSF at the image position.
"""
if wcs is not None:
if n_pix is not None:
n_pix = n_pix
else:
n_pix = 53
pixel_wcs = wcs.local(image_pos)
else:
n_pix = 19
pixel_wcs = galsim.PixelScale(PIFF_SCALE)
# nice and big image size here cause this has been a problem
image = galsim.ImageD(ncol=n_pix, nrow=n_pix, wcs=pixel_wcs)
psf = self.getPiff().draw(
image_pos.x,
image_pos.y,
image=image,
center=True,
)
psf = galsim.InterpolatedImage(
galsim.ImageD(psf.array), # make sure galsim is not keeping state
wcs=pixel_wcs,
gsparams=gsparams,
x_interpolant=x_interpolant
).withFlux(
1.0
)
return psf
def getPiff(self):
return self._piff
def getPSF(
self, image_pos, wcs=None,
smooth=False, n_pix=None, **kwargs
):
"""Get an image of the PSF at the given location.
Parameters
----------
image_pos : galsim.Position
The image position for the PSF.
wcs : galsim.BaseWCS or subclass, optional
The WCS to use to draw the PSF. Currently used only when smoothing
is turned off.
smooth : bool, optional
If True, then smooth the Piff PSFs. Default of False.
n_pix : int, optional
The image size to use when drawing without smoothing.
**kargs : extra keyword arguments
These are all ignored.
Returns
-------
psf : galsim.GSObject
The PSF at the image position.
"""
if smooth or self.smooth:
if not self._did_fit:
self._fit_smooth_model()
arr = np.array([
np.clip(image_pos.x, 1, 2048),
np.clip(image_pos.y, 1, 4096)])
_g1 = self._g1int(arr)[0]
_g2 = self._g2int(arr)[0]
_T = self._Tint(arr)[0]
if np.any(np.isnan(np.array([_g1, _g2, _T]))):
logger.debug("Piff smooth fit params are NaN: %s %s %s %s", image_pos, _g1, _g2, _T)
raise RuntimeError("NaN smooth Piff params at %s!" % image_pos)
pars = np.array([0, 0, _g1, _g2, _T, 1])
obj = ngmix.gmix.make_gmix_model(pars, 'turb').make_galsim_object()
return obj.withFlux(1)
else:
return self._draw(image_pos, wcs=wcs, n_pix=n_pix)
class PiffLoader(galsim.config.InputLoader):
def getKwargs(self, config, base, logger):
req = {'file_name': str}
opt = {}
kwargs, safe = galsim.config.GetAllParams(
config, base, req=req, opt=opt)
return kwargs, safe
# add a config input section
galsim.config.RegisterInputType('des_piff', PiffLoader(DES_Piff))
# and a builder
def BuildDES_Piff(config, base, ignore, gsparams, logger):
des_piff = galsim.config.GetInputObj('des_piff', config, base, 'DES_Piff')
opt = {'flux': float,
'num': int,
'image_pos': galsim.PositionD,
'x_interpolant': str,
'smooth': bool}
params, safe = galsim.config.GetAllParams(
config, base, opt=opt, ignore=ignore)
if 'image_pos' in params:
image_pos = params['image_pos']
elif 'image_pos' in base:
image_pos = base['image_pos']
else:
raise galsim.GalSimConfigError(
"DES_Piff requested, but no image_pos defined in base.")
if 'wcs' not in base:
raise galsim.GalSimConfigError(
"DES_Piff requested, but no wcs defined in base.")
wcs = base['wcs']
if gsparams:
gsparams = galsim.GSParams(**gsparams)
else:
gsparams = None
psf = des_piff.getPSF(
image_pos,
wcs,
smooth=params.get('smooth', False),
gsparams=gsparams)
if 'flux' in params:
psf = psf.withFlux(params['flux'])
# we make sure to declare the returned object as not safe for reuse
can_be_reused = False
return psf, can_be_reused
def BuildDES_Piff_with_substitute(config, base, ignore, gsparams, logger):
# This builder usually just calls BuildDES_Piff, but can also
# be passed use_substitute = True, in which case it builds some
# other PSF. We use this for rejectlisted Piff files.
if "use_substitute" in config:
use_substitute = galsim.config.ParseValue(config, "use_substitute",
base, bool)[0]
else:
use_substitute = False
if use_substitute:
return (galsim.config.BuildGSObject(
config, "substitute_psf", base=base,
gsparams=gsparams, logger=logger))
else:
ignore += ["use_substitute", "substitute_psf"]
return BuildDES_Piff(config, base, ignore, gsparams, logger)
galsim.config.RegisterObjectType(
'DES_Piff', BuildDES_Piff_with_substitute, input_type='des_piff')
|
[
"galsim.config.BuildGSObject",
"numpy.empty",
"numpy.clip",
"numpy.mean",
"ngmix.gmix.make_gmix_model",
"galsim.PixelScale",
"galsim.config.GetAllParams",
"galsim.config.GetInputObj",
"galsim.PositionD",
"galsim.ImageD",
"galsim.config.RegisterObjectType",
"numpy.isfinite",
"numpy.random.RandomState",
"ngmix.jacobian.DiagonalJacobian",
"numpy.linspace",
"numpy.stack",
"ngmix.fitting.LMSimple",
"numpy.ones_like",
"galsim.config.ParseValue",
"galsim.GSParams",
"os.path.expandvars",
"ngmix.moments.mom2g",
"galsim.GalSimConfigError",
"numpy.concatenate",
"ngmix.fitting.Fitter",
"ngmix.moments.g2mom",
"ngmix.admom.Admom",
"numpy.any",
"numpy.array",
"ngmix.admom.AdmomFitter",
"logging.getLogger"
] |
[((407, 434), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (424, 434), False, 'import logging\n'), ((12281, 12383), 'galsim.config.RegisterObjectType', 'galsim.config.RegisterObjectType', (['"""DES_Piff"""', 'BuildDES_Piff_with_substitute'], {'input_type': '"""des_piff"""'}), "('DES_Piff', BuildDES_Piff_with_substitute,\n input_type='des_piff')\n", (12313, 12383), False, 'import galsim\n'), ((10347, 10410), 'galsim.config.GetInputObj', 'galsim.config.GetInputObj', (['"""des_piff"""', 'config', 'base', '"""DES_Piff"""'], {}), "('des_piff', config, base, 'DES_Piff')\n", (10372, 10410), False, 'import galsim\n'), ((10582, 10646), 'galsim.config.GetAllParams', 'galsim.config.GetAllParams', (['config', 'base'], {'opt': 'opt', 'ignore': 'ignore'}), '(config, base, opt=opt, ignore=ignore)\n', (10608, 10646), False, 'import galsim\n'), ((1862, 1898), 'numpy.empty', 'np.empty', (['(ny, nx)'], {'dtype': 'np.float64'}), '((ny, nx), dtype=np.float64)\n', (1870, 1898), True, 'import numpy as np\n'), ((1914, 1950), 'numpy.empty', 'np.empty', (['(ny, nx)'], {'dtype': 'np.float64'}), '((ny, nx), dtype=np.float64)\n', (1922, 1950), True, 'import numpy as np\n'), ((1966, 2005), 'numpy.empty', 'np.empty', (['(ny, nx, 3)'], {'dtype': 'np.float64'}), '((ny, nx, 3), dtype=np.float64)\n', (1974, 2005), True, 'import numpy as np\n'), ((5414, 5444), 'numpy.stack', 'np.stack', (['[xloc, yloc]'], {'axis': '(1)'}), '([xloc, yloc], axis=1)\n', (5422, 5444), True, 'import numpy as np\n'), ((5565, 5580), 'numpy.isfinite', 'np.isfinite', (['g1'], {}), '(g1)\n', (5576, 5580), True, 'import numpy as np\n'), ((5684, 5696), 'numpy.any', 'np.any', (['(~msk)'], {}), '(~msk)\n', (5690, 5696), True, 'import numpy as np\n'), ((5888, 5903), 'numpy.isfinite', 'np.isfinite', (['g2'], {}), '(g2)\n', (5899, 5903), True, 'import numpy as np\n'), ((6007, 6019), 'numpy.any', 'np.any', (['(~msk)'], {}), '(~msk)\n', (6013, 6019), True, 'import numpy as np\n'), ((6210, 6224), 'numpy.isfinite', 'np.isfinite', (['T'], {}), '(T)\n', (6221, 6224), True, 'import numpy as np\n'), ((6328, 6340), 'numpy.any', 'np.any', (['(~msk)'], {}), '(~msk)\n', (6334, 6340), True, 'import numpy as np\n'), ((7700, 7752), 'galsim.ImageD', 'galsim.ImageD', ([], {'ncol': 'n_pix', 'nrow': 'n_pix', 'wcs': 'pixel_wcs'}), '(ncol=n_pix, nrow=n_pix, wcs=pixel_wcs)\n', (7713, 7752), False, 'import galsim\n'), ((10057, 10115), 'galsim.config.GetAllParams', 'galsim.config.GetAllParams', (['config', 'base'], {'req': 'req', 'opt': 'opt'}), '(config, base, req=req, opt=opt)\n', (10083, 10115), False, 'import galsim\n'), ((10955, 11030), 'galsim.GalSimConfigError', 'galsim.GalSimConfigError', (['"""DES_Piff requested, but no wcs defined in base."""'], {}), "('DES_Piff requested, but no wcs defined in base.')\n", (10979, 11030), False, 'import galsim\n'), ((11103, 11130), 'galsim.GSParams', 'galsim.GSParams', ([], {}), '(**gsparams)\n', (11118, 11130), False, 'import galsim\n'), ((12020, 12123), 'galsim.config.BuildGSObject', 'galsim.config.BuildGSObject', (['config', '"""substitute_psf"""'], {'base': 'base', 'gsparams': 'gsparams', 'logger': 'logger'}), "(config, 'substitute_psf', base=base, gsparams=\n gsparams, logger=logger)\n", (12047, 12123), False, 'import galsim\n'), ((2038, 2062), 'numpy.linspace', 'np.linspace', (['(1)', '(4096)', 'ny'], {}), '(1, 4096, ny)\n', (2049, 2062), True, 'import numpy as np\n'), ((5721, 5737), 'numpy.mean', 'np.mean', (['g1[msk]'], {}), '(g1[msk])\n', (5728, 5737), True, 'import numpy as np\n'), ((6044, 6060), 'numpy.mean', 'np.mean', (['g2[msk]'], {}), '(g2[msk])\n', (6051, 6060), True, 'import numpy as np\n'), ((6364, 6379), 'numpy.mean', 'np.mean', (['T[msk]'], {}), '(T[msk])\n', (6371, 6379), True, 'import numpy as np\n'), ((7584, 7613), 'galsim.PixelScale', 'galsim.PixelScale', (['PIFF_SCALE'], {}), '(PIFF_SCALE)\n', (7601, 7613), False, 'import galsim\n'), ((9664, 9697), 'numpy.array', 'np.array', (['[0, 0, _g1, _g2, _T, 1]'], {}), '([0, 0, _g1, _g2, _T, 1])\n', (9672, 9697), True, 'import numpy as np\n'), ((10819, 10905), 'galsim.GalSimConfigError', 'galsim.GalSimConfigError', (['"""DES_Piff requested, but no image_pos defined in base."""'], {}), "(\n 'DES_Piff requested, but no image_pos defined in base.')\n", (10843, 10905), False, 'import galsim\n'), ((11823, 11885), 'galsim.config.ParseValue', 'galsim.config.ParseValue', (['config', '"""use_substitute"""', 'base', 'bool'], {}), "(config, 'use_substitute', base, bool)\n", (11847, 11885), False, 'import galsim\n'), ((2101, 2125), 'numpy.linspace', 'np.linspace', (['(1)', '(2048)', 'nx'], {}), '(1, 2048, nx)\n', (2112, 2125), True, 'import numpy as np\n'), ((2150, 2190), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(yi + nx * xi)'}), '(seed=yi + nx * xi)\n', (2171, 2190), True, 'import numpy as np\n'), ((2282, 2310), 'galsim.PositionD', 'galsim.PositionD', ([], {'x': 'xl', 'y': 'yl'}), '(x=xl, y=yl)\n', (2298, 2310), False, 'import galsim\n'), ((5820, 5836), 'numpy.mean', 'np.mean', (['g1[msk]'], {}), '(g1[msk])\n', (5827, 5836), True, 'import numpy as np\n'), ((6143, 6159), 'numpy.mean', 'np.mean', (['g2[msk]'], {}), '(g2[msk])\n', (6150, 6159), True, 'import numpy as np\n'), ((6460, 6475), 'numpy.mean', 'np.mean', (['T[msk]'], {}), '(T[msk])\n', (6467, 6475), True, 'import numpy as np\n'), ((1513, 1542), 'os.path.expandvars', 'os.path.expandvars', (['file_name'], {}), '(file_name)\n', (1531, 1542), False, 'import os\n'), ((2513, 2552), 'numpy.concatenate', 'np.concatenate', (['[img[0, :], img[-1, :]]'], {}), '([img[0, :], img[-1, :]])\n', (2527, 2552), True, 'import numpy as np\n'), ((7952, 7976), 'galsim.ImageD', 'galsim.ImageD', (['psf.array'], {}), '(psf.array)\n', (7965, 7976), False, 'import galsim\n'), ((9213, 9242), 'numpy.clip', 'np.clip', (['image_pos.x', '(1)', '(2048)'], {}), '(image_pos.x, 1, 2048)\n', (9220, 9242), True, 'import numpy as np\n'), ((9260, 9289), 'numpy.clip', 'np.clip', (['image_pos.y', '(1)', '(4096)'], {}), '(image_pos.y, 1, 4096)\n', (9267, 9289), True, 'import numpy as np\n'), ((9436, 9460), 'numpy.array', 'np.array', (['[_g1, _g2, _T]'], {}), '([_g1, _g2, _T])\n', (9444, 9460), True, 'import numpy as np\n'), ((9716, 9756), 'ngmix.gmix.make_gmix_model', 'ngmix.gmix.make_gmix_model', (['pars', '"""turb"""'], {}), "(pars, 'turb')\n", (9742, 9756), False, 'import ngmix\n'), ((2708, 2767), 'ngmix.jacobian.DiagonalJacobian', 'ngmix.jacobian.DiagonalJacobian', ([], {'x': '(9)', 'y': '(9)', 'scale': 'PIFF_SCALE'}), '(x=9, y=9, scale=PIFF_SCALE)\n', (2739, 2767), False, 'import ngmix\n'), ((4594, 4627), 'ngmix.moments.g2mom', 'ngmix.moments.g2mom', (['_g1', '_g2', '_T'], {}), '(_g1, _g2, _T)\n', (4613, 4627), False, 'import ngmix\n'), ((5046, 5080), 'ngmix.moments.mom2g', 'ngmix.moments.mom2g', (['irr', 'irc', 'icc'], {}), '(irr, irc, icc)\n', (5065, 5080), False, 'import ngmix\n'), ((2653, 2670), 'numpy.ones_like', 'np.ones_like', (['img'], {}), '(img)\n', (2665, 2670), True, 'import numpy as np\n'), ((3121, 3141), 'ngmix.admom.AdmomFitter', 'AdmomFitter', ([], {'rng': 'rng'}), '(rng=rng)\n', (3132, 3141), False, 'from ngmix.admom import AdmomFitter\n'), ((3333, 3353), 'ngmix.fitting.Fitter', 'Fitter', ([], {'model': '"""turb"""'}), "(model='turb')\n", (3339, 3353), False, 'from ngmix.fitting import Fitter\n'), ((3768, 3787), 'ngmix.admom.Admom', 'Admom', (['obs'], {'rng': 'rng'}), '(obs, rng=rng)\n', (3773, 3787), False, 'from ngmix.admom import Admom\n'), ((4022, 4043), 'ngmix.fitting.LMSimple', 'LMSimple', (['obs', '"""turb"""'], {}), "(obs, 'turb')\n", (4030, 4043), False, 'from ngmix.fitting import LMSimple\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.