code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import copy
import sys
if sys.version_info < (3,):
range = xrange
import numpy as np
import pandas as pd
import scipy.stats as ss
from patsy import dmatrices, dmatrix, demo_data
from .. import families as fam
from .. import tsm as tsm
from .. import data_check as dc
from .kalman import *
class DAR(tsm.TSM):
""" Inherits time series methods from TSM class.
**** DYNAMIC AUTOREGRESSIVE MODEL ****
Parameters
----------
ar : int
Number of autoregressive lags
data : pd.DataFrame
Field to specify the data that will be used
"""
def __init__(self, data, ar, integ=0, target=None):
# Initialize TSM object
super(DAR, self).__init__('DAR')
# Latent Variable information
self.ar = ar
self.integ = integ
self.target = target
self.model_name = "DAR(" + str(self.ar) + ", integrated=" + str(self.integ) + ")"
self.max_lag = self.ar
self._z_hide = 0 # Whether to cutoff latent variables from results table
self.supported_methods = ["MLE", "PML", "Laplace", "M-H", "BBVI"]
self.default_method = "MLE"
self.multivariate_model = False
# Format the data
self.data_original = data.copy()
self.data, self.data_name, self.is_pandas, self.index = dc.data_check(data,target)
self.data = self.data.astype(np.float) # treat as float for Cython
self.data_original_nondf = self.data.copy()
# Difference data
for order in range(0, self.integ):
self.data = np.diff(self.data)
self.data_name = "Differenced " + self.data_name
self.X = self._ar_matrix()
self.data = self.data[self.max_lag:]
self.y = self.data
self.y_name = self.data_name
self._create_latent_variables()
self.z_no = len(self.latent_variables.z_list)
def _ar_matrix(self):
""" Creates Autoregressive matrix
Returns
----------
X : np.ndarray
Autoregressive Matrix
"""
Y = np.array(self.data[self.max_lag:self.data.shape[0]])
X = np.ones(Y.shape[0])
if self.ar != 0:
for i in range(0, self.ar):
X = np.vstack((X,self.data[(self.max_lag-i-1):-i-1]))
return X.T
def _create_latent_variables(self):
""" Creates model latent variables
Returns
----------
None (changes model attributes)
"""
self.latent_variables.add_z('Sigma^2 irregular', fam.Flat(transform='exp'), fam.Normal(0,3))
self.latent_variables.add_z('Constant', fam.Flat(transform=None), fam.Normal(0,3))
for parm in range(1,self.ar+1):
self.latent_variables.add_z('Sigma^2 AR(' + str(parm) + ')', fam.Flat(transform='exp'), fam.Normal(0,3))
def _forecast_model(self,beta,Z,h):
""" Creates forecasted states and variances
Parameters
----------
beta : np.ndarray
Contains untransformed starting values for latent variables
Returns
----------
a : np.ndarray
Forecasted states
P : np.ndarray
Variance of forecasted states
"""
T, _, R, Q, H = self._ss_matrices(beta)
return dl_univariate_kalman_fcst(self.data,Z,H,T,Q,R,0.0,h)
def _model(self,data,beta):
""" Creates the structure of the model
Parameters
----------
data : np.array
Contains the time series
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
a,P,K,F,v : np.array
Filted states, filtered variances, Kalman gains, F matrix, residuals
"""
T, Z, R, Q, H = self._ss_matrices(beta)
return dl_univariate_kalman(data,Z,H,T,Q,R,0.0)
def _ss_matrices(self,beta):
""" Creates the state space matrices required
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
T, Z, R, Q, H : np.array
State space matrices used in KFS algorithm
"""
T = np.identity(self.z_no-1)
H = np.identity(1)*self.latent_variables.z_list[0].prior.transform(beta[0])
Z = self.X
R = np.identity(self.z_no-1)
Q = np.identity(self.z_no-1)
for i in range(0,self.z_no-1):
Q[i][i] = self.latent_variables.z_list[i+1].prior.transform(beta[i+1])
return T, Z, R, Q, H
def neg_loglik(self,beta):
""" Creates the negative log marginal likelihood of the model
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
The negative log logliklihood of the model
"""
_, _, _, F, v = self._model(self.y,beta)
loglik = 0.0
for i in range(0,self.y.shape[0]):
loglik += np.linalg.slogdet(F[:,:,i])[1] + np.dot(v[i],np.dot(np.linalg.pinv(F[:,:,i]),v[i]))
return -(-((self.y.shape[0]/2)*np.log(2*np.pi))-0.5*loglik.T[0].sum())
def plot_predict(self, h=5, past_values=20, intervals=True, **kwargs):
""" Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
past_values : int (default : 20)
How many past observations to show on the forecast graph?
intervals : Boolean
Would you like to show 95% prediction intervals for the forecast?
Returns
----------
- Plot of the forecast
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
y_holder = self.y.copy() # holds past data and predicted data to create AR matrix
full_X = self.X.copy()
full_X = np.append(full_X,np.array([np.append(1.0, y_holder[-self.ar:][::-1])]), axis=0)
Z = full_X
# Construct Z matrix
for step in range(h):
a, P = self._forecast_model(self.latent_variables.get_z_values(),Z,step)
new_value = np.dot(Z[-1,:],a[:,self.y.shape[0]+step])
y_holder = np.append(y_holder, new_value)
Z = np.append(Z, np.array([np.append(1.0, y_holder[-self.ar:][::-1])]), axis=0)
# Retrieve data, dates and (transformed) latent variables
a, P = self._forecast_model(self.latent_variables.get_z_values(),Z,h)
smoothed_series = np.zeros(self.y.shape[0]+h)
series_variance = np.zeros(self.y.shape[0]+h)
for t in range(self.y.shape[0]+h):
smoothed_series[t] = np.dot(Z[t],a[:,t])
series_variance[t] = np.dot(np.dot(Z[t],P[:,:,t]),Z[t].T) + self.latent_variables.z_list[0].prior.transform(self.latent_variables.get_z_values()[0])
date_index = self.shift_dates(h)
plot_values = smoothed_series[-h-past_values:]
forecasted_values = smoothed_series[-h:]
lower = forecasted_values - 1.98*np.power(series_variance[-h:],0.5)
upper = forecasted_values + 1.98*np.power(series_variance[-h:],0.5)
lower = np.append(plot_values[-h-1],lower)
upper = np.append(plot_values[-h-1],upper)
plot_index = date_index[-h-past_values:]
plt.figure(figsize=figsize)
if intervals == True:
plt.fill_between(date_index[-h-1:], lower, upper, alpha=0.2)
plt.plot(plot_index,plot_values)
plt.title("Forecast for " + self.y_name)
plt.xlabel("Time")
plt.ylabel(self.y_name)
plt.show()
def plot_fit(self,intervals=False,**kwargs):
""" Plots the fit of the model
Parameters
----------
intervals : Boolean
Whether to plot 95% confidence interval of states
Returns
----------
None (plots data and the fit)
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
series_type = kwargs.get('series_type','Smoothed')
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
date_index = copy.deepcopy(self.index)
date_index = date_index[self.integ+self.ar:]
if series_type == 'Smoothed':
mu, V = self.smoothed_state(self.data,self.latent_variables.get_z_values())
elif series_type == 'Filtered':
mu, V, _, _, _ = self._model(self.data,self.latent_variables.get_z_values())
else:
mu, V = self.smoothed_state(self.data,self.latent_variables.get_z_values())
# Create smoothed/filtered aggregate series
_, Z, _, _, _ = self._ss_matrices(self.latent_variables.get_z_values())
smoothed_series = np.zeros(self.y.shape[0])
for t in range(0,self.y.shape[0]):
smoothed_series[t] = np.dot(Z[t],mu[:,t])
plt.figure(figsize=figsize)
plt.subplot(self.z_no+1, 1, 1)
plt.title(self.y_name + " Raw and " + series_type)
plt.plot(date_index,self.data,label='Data')
plt.plot(date_index,smoothed_series,label=series_type,c='black')
plt.legend(loc=2)
for coef in range(0,self.z_no-1):
V_coef = V[0][coef][:-1]
plt.subplot(self.z_no+1, 1, 2+coef)
plt.title("Beta " + self.latent_variables.z_list[1+coef].name)
if intervals == True:
alpha =[0.15*i/float(100) for i in range(50,12,-2)]
plt.fill_between(date_index[5:], mu[coef,0:mu.shape[1]-1][5:] + 1.98*np.sqrt(V_coef[5:]), mu[coef,0:mu.shape[1]-1][5:] - 1.98*np.sqrt(V_coef[5:]), alpha=0.15,label='95% C.I.')
plt.plot(date_index,mu[coef,0:mu.shape[1]-1],label='Data')
plt.legend(loc=2)
plt.subplot(self.z_no+1, 1, self.z_no+1)
plt.title("Measurement Error")
plt.plot(date_index,self.data-smoothed_series,label='Irregular')
plt.legend(loc=2)
plt.show()
def predict(self, h=5):
""" Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
Returns
----------
- pd.DataFrame with predictions
"""
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
y_holder = self.y.copy() # holds past data and predicted data to create AR matrix
full_X = self.X.copy()
full_X = np.append(full_X,np.array([np.append(1.0, y_holder[-self.ar:][::-1])]), axis=0)
Z = full_X
for step in range(h):
a, P = self._forecast_model(self.latent_variables.get_z_values(),Z,step)
new_value = np.dot(Z[-1,:],a[:,self.y.shape[0]+step])
y_holder = np.append(y_holder, new_value)
Z = np.append(Z, np.array([np.append(1.0, y_holder[-self.ar:][::-1])]), axis=0)
date_index = self.shift_dates(h)
result = pd.DataFrame(y_holder[-h:])
result.rename(columns={0:self.y_name}, inplace=True)
result.index = date_index[-h:]
return result
def predict_is(self, h=5, fit_once=True):
""" Makes dynamic in-sample predictions with the estimated model
Parameters
----------
h : int (default : 5)
How many steps would you like to forecast?
fit_once : boolean
(default: True) Fits only once before the in-sample prediction; if False, fits after every new datapoint
Returns
----------
- pd.DataFrame with predicted values
"""
predictions = []
for t in range(0,h):
data1 = self.data_original_nondf[:-h+t]
x = DAR(data=data1, ar=self.ar, integ=self.integ)
if fit_once is False:
x.fit(printer=False)
if t == 0:
if fit_once is True:
x.fit(printer=False)
saved_lvs = x.latent_variables
predictions = x.predict(1)
else:
if fit_once is True:
x.latent_variables = saved_lvs
predictions = pd.concat([predictions,x.predict(1)])
predictions.rename(columns={0:self.y_name}, inplace=True)
predictions.index = self.index[-h:]
return predictions
def plot_predict_is(self, h=5, **kwargs):
""" Plots forecasts with the estimated model against data
(Simulated prediction with data)
Parameters
----------
h : int (default : 5)
How many steps to forecast
Returns
----------
- Plot of the forecast against data
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
plt.figure(figsize=figsize)
predictions = self.predict_is(h)
data = self.data[-h:]
plt.plot(predictions.index,data,label='Data')
plt.plot(predictions.index,predictions,label='Predictions',c='black')
plt.title(self.y_name)
plt.legend(loc=2)
plt.show()
def simulation_smoother(self,beta):
""" Koopman's simulation smoother - simulates from states given
model parameters and observations
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
- A simulated state evolution
"""
T, Z, R, Q, H = self._ss_matrices(beta)
# Generate e_t+ and n_t+
rnd_h = np.random.normal(0,np.sqrt(H),self.data.shape[0]+1)
q_dist = ss.multivariate_normal([0.0, 0.0], Q)
rnd_q = q_dist.rvs(self.data.shape[0]+1)
# Generate a_t+ and y_t+
a_plus = np.zeros((T.shape[0],self.data.shape[0]+1))
a_plus[0,0] = np.mean(self.data[0:5])
y_plus = np.zeros(self.data.shape[0])
for t in range(0,self.data.shape[0]+1):
if t == 0:
a_plus[:,t] = np.dot(T,a_plus[:,t]) + rnd_q[t,:]
y_plus[t] = np.dot(Z,a_plus[:,t]) + rnd_h[t]
else:
if t != self.data.shape[0]:
a_plus[:,t] = np.dot(T,a_plus[:,t-1]) + rnd_q[t,:]
y_plus[t] = np.dot(Z,a_plus[:,t]) + rnd_h[t]
alpha_hat, _ = self.smoothed_state(self.data,beta)
alpha_hat_plus, _ = self.smoothed_state(y_plus,beta)
alpha_tilde = alpha_hat - alpha_hat_plus + a_plus
return alpha_tilde
def smoothed_state(self,data,beta):
""" Creates the negative log marginal likelihood of the model
Parameters
----------
data : np.array
Data to be smoothed
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
- Smoothed states
"""
T, Z, R, Q, H = self._ss_matrices(beta)
alpha, V = dl_univariate_KFS(data,Z,H,T,Q,R,0.0)
return alpha, V
|
[
"matplotlib.pyplot.title",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.mean",
"matplotlib.pyplot.fill_between",
"numpy.linalg.pinv",
"pandas.DataFrame",
"numpy.power",
"numpy.identity",
"numpy.append",
"copy.deepcopy",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"scipy.stats.multivariate_normal",
"matplotlib.pyplot.ylabel",
"numpy.dot",
"numpy.vstack",
"matplotlib.pyplot.subplot",
"numpy.log",
"matplotlib.pyplot.plot",
"numpy.zeros",
"numpy.diff",
"numpy.array",
"numpy.linalg.slogdet",
"matplotlib.pyplot.xlabel",
"numpy.sqrt"
] |
[((2067, 2119), 'numpy.array', 'np.array', (['self.data[self.max_lag:self.data.shape[0]]'], {}), '(self.data[self.max_lag:self.data.shape[0]])\n', (2075, 2119), True, 'import numpy as np\n'), ((2132, 2151), 'numpy.ones', 'np.ones', (['Y.shape[0]'], {}), '(Y.shape[0])\n', (2139, 2151), True, 'import numpy as np\n'), ((4274, 4300), 'numpy.identity', 'np.identity', (['(self.z_no - 1)'], {}), '(self.z_no - 1)\n', (4285, 4300), True, 'import numpy as np\n'), ((4421, 4447), 'numpy.identity', 'np.identity', (['(self.z_no - 1)'], {}), '(self.z_no - 1)\n', (4432, 4447), True, 'import numpy as np\n'), ((4467, 4493), 'numpy.identity', 'np.identity', (['(self.z_no - 1)'], {}), '(self.z_no - 1)\n', (4478, 4493), True, 'import numpy as np\n'), ((13747, 13774), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (13757, 13774), True, 'import matplotlib.pyplot as plt\n'), ((13854, 13901), 'matplotlib.pyplot.plot', 'plt.plot', (['predictions.index', 'data'], {'label': '"""Data"""'}), "(predictions.index, data, label='Data')\n", (13862, 13901), True, 'import matplotlib.pyplot as plt\n'), ((13908, 13980), 'matplotlib.pyplot.plot', 'plt.plot', (['predictions.index', 'predictions'], {'label': '"""Predictions"""', 'c': '"""black"""'}), "(predictions.index, predictions, label='Predictions', c='black')\n", (13916, 13980), True, 'import matplotlib.pyplot as plt\n'), ((13986, 14008), 'matplotlib.pyplot.title', 'plt.title', (['self.y_name'], {}), '(self.y_name)\n', (13995, 14008), True, 'import matplotlib.pyplot as plt\n'), ((14017, 14034), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (14027, 14034), True, 'import matplotlib.pyplot as plt\n'), ((14046, 14056), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14054, 14056), True, 'import matplotlib.pyplot as plt\n'), ((14621, 14658), 'scipy.stats.multivariate_normal', 'ss.multivariate_normal', (['[0.0, 0.0]', 'Q'], {}), '([0.0, 0.0], Q)\n', (14643, 14658), True, 'import scipy.stats as ss\n'), ((14759, 14805), 'numpy.zeros', 'np.zeros', (['(T.shape[0], self.data.shape[0] + 1)'], {}), '((T.shape[0], self.data.shape[0] + 1))\n', (14767, 14805), True, 'import numpy as np\n'), ((14826, 14849), 'numpy.mean', 'np.mean', (['self.data[0:5]'], {}), '(self.data[0:5])\n', (14833, 14849), True, 'import numpy as np\n'), ((14867, 14895), 'numpy.zeros', 'np.zeros', (['self.data.shape[0]'], {}), '(self.data.shape[0])\n', (14875, 14895), True, 'import numpy as np\n'), ((1561, 1579), 'numpy.diff', 'np.diff', (['self.data'], {}), '(self.data)\n', (1568, 1579), True, 'import numpy as np\n'), ((4311, 4325), 'numpy.identity', 'np.identity', (['(1)'], {}), '(1)\n', (4322, 4325), True, 'import numpy as np\n'), ((6925, 6954), 'numpy.zeros', 'np.zeros', (['(self.y.shape[0] + h)'], {}), '(self.y.shape[0] + h)\n', (6933, 6954), True, 'import numpy as np\n'), ((6983, 7012), 'numpy.zeros', 'np.zeros', (['(self.y.shape[0] + h)'], {}), '(self.y.shape[0] + h)\n', (6991, 7012), True, 'import numpy as np\n'), ((7622, 7659), 'numpy.append', 'np.append', (['plot_values[-h - 1]', 'lower'], {}), '(plot_values[-h - 1], lower)\n', (7631, 7659), True, 'import numpy as np\n'), ((7677, 7714), 'numpy.append', 'np.append', (['plot_values[-h - 1]', 'upper'], {}), '(plot_values[-h - 1], upper)\n', (7686, 7714), True, 'import numpy as np\n'), ((7779, 7806), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (7789, 7806), True, 'import matplotlib.pyplot as plt\n'), ((7943, 7976), 'matplotlib.pyplot.plot', 'plt.plot', (['plot_index', 'plot_values'], {}), '(plot_index, plot_values)\n', (7951, 7976), True, 'import matplotlib.pyplot as plt\n'), ((7988, 8028), 'matplotlib.pyplot.title', 'plt.title', (["('Forecast for ' + self.y_name)"], {}), "('Forecast for ' + self.y_name)\n", (7997, 8028), True, 'import matplotlib.pyplot as plt\n'), ((8041, 8059), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (8051, 8059), True, 'import matplotlib.pyplot as plt\n'), ((8072, 8095), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['self.y_name'], {}), '(self.y_name)\n', (8082, 8095), True, 'import matplotlib.pyplot as plt\n'), ((8108, 8118), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8116, 8118), True, 'import matplotlib.pyplot as plt\n'), ((8755, 8780), 'copy.deepcopy', 'copy.deepcopy', (['self.index'], {}), '(self.index)\n', (8768, 8780), False, 'import copy\n'), ((9391, 9416), 'numpy.zeros', 'np.zeros', (['self.y.shape[0]'], {}), '(self.y.shape[0])\n', (9399, 9416), True, 'import numpy as np\n'), ((9536, 9563), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (9546, 9563), True, 'import matplotlib.pyplot as plt\n'), ((9590, 9622), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(self.z_no + 1)', '(1)', '(1)'], {}), '(self.z_no + 1, 1, 1)\n', (9601, 9622), True, 'import matplotlib.pyplot as plt\n'), ((9633, 9683), 'matplotlib.pyplot.title', 'plt.title', (["(self.y_name + ' Raw and ' + series_type)"], {}), "(self.y_name + ' Raw and ' + series_type)\n", (9642, 9683), True, 'import matplotlib.pyplot as plt\n'), ((9699, 9744), 'matplotlib.pyplot.plot', 'plt.plot', (['date_index', 'self.data'], {'label': '"""Data"""'}), "(date_index, self.data, label='Data')\n", (9707, 9744), True, 'import matplotlib.pyplot as plt\n'), ((9755, 9822), 'matplotlib.pyplot.plot', 'plt.plot', (['date_index', 'smoothed_series'], {'label': 'series_type', 'c': '"""black"""'}), "(date_index, smoothed_series, label=series_type, c='black')\n", (9763, 9822), True, 'import matplotlib.pyplot as plt\n'), ((9832, 9849), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (9842, 9849), True, 'import matplotlib.pyplot as plt\n'), ((10531, 10575), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(self.z_no + 1)', '(1)', '(self.z_no + 1)'], {}), '(self.z_no + 1, 1, self.z_no + 1)\n', (10542, 10575), True, 'import matplotlib.pyplot as plt\n'), ((10584, 10614), 'matplotlib.pyplot.title', 'plt.title', (['"""Measurement Error"""'], {}), "('Measurement Error')\n", (10593, 10614), True, 'import matplotlib.pyplot as plt\n'), ((10627, 10695), 'matplotlib.pyplot.plot', 'plt.plot', (['date_index', '(self.data - smoothed_series)'], {'label': '"""Irregular"""'}), "(date_index, self.data - smoothed_series, label='Irregular')\n", (10635, 10695), True, 'import matplotlib.pyplot as plt\n'), ((10704, 10721), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (10714, 10721), True, 'import matplotlib.pyplot as plt\n'), ((10738, 10748), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10746, 10748), True, 'import matplotlib.pyplot as plt\n'), ((11862, 11889), 'pandas.DataFrame', 'pd.DataFrame', (['y_holder[-h:]'], {}), '(y_holder[-h:])\n', (11874, 11889), True, 'import pandas as pd\n'), ((14571, 14581), 'numpy.sqrt', 'np.sqrt', (['H'], {}), '(H)\n', (14578, 14581), True, 'import numpy as np\n'), ((2238, 2292), 'numpy.vstack', 'np.vstack', (['(X, self.data[self.max_lag - i - 1:-i - 1])'], {}), '((X, self.data[self.max_lag - i - 1:-i - 1]))\n', (2247, 2292), True, 'import numpy as np\n'), ((6537, 6583), 'numpy.dot', 'np.dot', (['Z[-1, :]', 'a[:, self.y.shape[0] + step]'], {}), '(Z[-1, :], a[:, self.y.shape[0] + step])\n', (6543, 6583), True, 'import numpy as np\n'), ((6606, 6636), 'numpy.append', 'np.append', (['y_holder', 'new_value'], {}), '(y_holder, new_value)\n', (6615, 6636), True, 'import numpy as np\n'), ((7095, 7116), 'numpy.dot', 'np.dot', (['Z[t]', 'a[:, t]'], {}), '(Z[t], a[:, t])\n', (7101, 7116), True, 'import numpy as np\n'), ((7857, 7919), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['date_index[-h - 1:]', 'lower', 'upper'], {'alpha': '(0.2)'}), '(date_index[-h - 1:], lower, upper, alpha=0.2)\n', (7873, 7919), True, 'import matplotlib.pyplot as plt\n'), ((9502, 9524), 'numpy.dot', 'np.dot', (['Z[t]', 'mu[:, t]'], {}), '(Z[t], mu[:, t])\n', (9508, 9524), True, 'import numpy as np\n'), ((9958, 9997), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(self.z_no + 1)', '(1)', '(2 + coef)'], {}), '(self.z_no + 1, 1, 2 + coef)\n', (9969, 9997), True, 'import matplotlib.pyplot as plt\n'), ((10010, 10074), 'matplotlib.pyplot.title', 'plt.title', (["('Beta ' + self.latent_variables.z_list[1 + coef].name)"], {}), "('Beta ' + self.latent_variables.z_list[1 + coef].name)\n", (10019, 10074), True, 'import matplotlib.pyplot as plt\n'), ((10398, 10461), 'matplotlib.pyplot.plot', 'plt.plot', (['date_index', 'mu[coef, 0:mu.shape[1] - 1]'], {'label': '"""Data"""'}), "(date_index, mu[coef, 0:mu.shape[1] - 1], label='Data')\n", (10406, 10461), True, 'import matplotlib.pyplot as plt\n'), ((10473, 10490), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (10483, 10490), True, 'import matplotlib.pyplot as plt\n'), ((11598, 11644), 'numpy.dot', 'np.dot', (['Z[-1, :]', 'a[:, self.y.shape[0] + step]'], {}), '(Z[-1, :], a[:, self.y.shape[0] + step])\n', (11604, 11644), True, 'import numpy as np\n'), ((11667, 11697), 'numpy.append', 'np.append', (['y_holder', 'new_value'], {}), '(y_holder, new_value)\n', (11676, 11697), True, 'import numpy as np\n'), ((5124, 5153), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['F[:, :, i]'], {}), '(F[:, :, i])\n', (5141, 5153), True, 'import numpy as np\n'), ((7487, 7522), 'numpy.power', 'np.power', (['series_variance[-h:]', '(0.5)'], {}), '(series_variance[-h:], 0.5)\n', (7495, 7522), True, 'import numpy as np\n'), ((7567, 7602), 'numpy.power', 'np.power', (['series_variance[-h:]', '(0.5)'], {}), '(series_variance[-h:], 0.5)\n', (7575, 7602), True, 'import numpy as np\n'), ((14998, 15021), 'numpy.dot', 'np.dot', (['T', 'a_plus[:, t]'], {}), '(T, a_plus[:, t])\n', (15004, 15021), True, 'import numpy as np\n'), ((15061, 15084), 'numpy.dot', 'np.dot', (['Z', 'a_plus[:, t]'], {}), '(Z, a_plus[:, t])\n', (15067, 15084), True, 'import numpy as np\n'), ((5176, 5202), 'numpy.linalg.pinv', 'np.linalg.pinv', (['F[:, :, i]'], {}), '(F[:, :, i])\n', (5190, 5202), True, 'import numpy as np\n'), ((5247, 5264), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (5253, 5264), True, 'import numpy as np\n'), ((6276, 6317), 'numpy.append', 'np.append', (['(1.0)', 'y_holder[-self.ar:][::-1]'], {}), '(1.0, y_holder[-self.ar:][::-1])\n', (6285, 6317), True, 'import numpy as np\n'), ((7159, 7183), 'numpy.dot', 'np.dot', (['Z[t]', 'P[:, :, t]'], {}), '(Z[t], P[:, :, t])\n', (7165, 7183), True, 'import numpy as np\n'), ((11370, 11411), 'numpy.append', 'np.append', (['(1.0)', 'y_holder[-self.ar:][::-1]'], {}), '(1.0, y_holder[-self.ar:][::-1])\n', (11379, 11411), True, 'import numpy as np\n'), ((15190, 15217), 'numpy.dot', 'np.dot', (['T', 'a_plus[:, t - 1]'], {}), '(T, a_plus[:, t - 1])\n', (15196, 15217), True, 'import numpy as np\n'), ((15259, 15282), 'numpy.dot', 'np.dot', (['Z', 'a_plus[:, t]'], {}), '(Z, a_plus[:, t])\n', (15265, 15282), True, 'import numpy as np\n'), ((6680, 6721), 'numpy.append', 'np.append', (['(1.0)', 'y_holder[-self.ar:][::-1]'], {}), '(1.0, y_holder[-self.ar:][::-1])\n', (6689, 6721), True, 'import numpy as np\n'), ((11741, 11782), 'numpy.append', 'np.append', (['(1.0)', 'y_holder[-self.ar:][::-1]'], {}), '(1.0, y_holder[-self.ar:][::-1])\n', (11750, 11782), True, 'import numpy as np\n'), ((10274, 10293), 'numpy.sqrt', 'np.sqrt', (['V_coef[5:]'], {}), '(V_coef[5:])\n', (10281, 10293), True, 'import numpy as np\n'), ((10331, 10350), 'numpy.sqrt', 'np.sqrt', (['V_coef[5:]'], {}), '(V_coef[5:])\n', (10338, 10350), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
import tensorflow as tf
import sys
sys.path.append("/data")
csv = pd.read_csv("bmi.csv")
csv["height"] = csv["height"] / 200
csv["weight"] = csv["weight"] / 100
bclass = {"thin": [1, 0, 0], "normal": [0, 1, 0], "fat": [0, 0, 1]}
csv["label_pat"] = csv["label"].apply(lambda x: np.array(bclass[x]))
test_csv = csv[15000:20000]
test_pat = test_csv[["weight", "height"]]
test_ans = list(test_csv["label_pat"])
x = tf.placeholder(tf.float32, [None, 2])
y_ = tf.placeholder(tf.float32, [None, 3])
W = tf.Variable(tf.zeros([2, 3]))
b = tf.Variable(tf.zeros([3]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(cross_entropy)
predict = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(predict, tf.float32))
sess = tf.Session()
tw = tf.summary.FileWriter("log_dir", graph=sess.graph)
tf.train.write_graph(sess.graph_def, '/data/scripts/study_case/pbtxt_files', 'ml_plactice_tbbmi.pbtxt')
sess.run(tf.initialize_all_variables())
'''inserted code'''
from scripts.utils.tf_utils import TensorFlowScheduler
scheduler = TensorFlowScheduler(name="ml_plactice.ch5.tb-bmi")
'''inserted code'''
step = 0
while True:
i = (step * 100) % 14000
rows = csv[1 + i: 1 + i + 100]
x_pat = rows[["weight", "height"]]
y_ans = list(rows["label_pat"])
fd = {x: x_pat, y_: y_ans}
_, loss = sess.run([train, cross_entropy], feed_dict=fd)
if step % 500 == 0:
cre = sess.run(cross_entropy, feed_dict=fd)
acc = sess.run(accuracy, feed_dict={x: test_pat, y_: test_ans})
# print("step=", step, "cre=", cre, "acc=", acc)
step += 1
'''inserted code'''
scheduler.loss_checker(loss)
scheduler.check_time()
'''inserted code'''
|
[
"sys.path.append",
"tensorflow.log",
"pandas.read_csv",
"tensorflow.argmax",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.train.write_graph",
"tensorflow.summary.FileWriter",
"tensorflow.zeros",
"tensorflow.cast",
"tensorflow.initialize_all_variables",
"numpy.array",
"tensorflow.matmul",
"tensorflow.train.GradientDescentOptimizer",
"scripts.utils.tf_utils.TensorFlowScheduler"
] |
[((75, 99), 'sys.path.append', 'sys.path.append', (['"""/data"""'], {}), "('/data')\n", (90, 99), False, 'import sys\n'), ((106, 128), 'pandas.read_csv', 'pd.read_csv', (['"""bmi.csv"""'], {}), "('bmi.csv')\n", (117, 128), True, 'import pandas as pd\n'), ((453, 490), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 2]'], {}), '(tf.float32, [None, 2])\n', (467, 490), True, 'import tensorflow as tf\n'), ((496, 533), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 3]'], {}), '(tf.float32, [None, 3])\n', (510, 533), True, 'import tensorflow as tf\n'), ((699, 738), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['(0.01)'], {}), '(0.01)\n', (732, 738), True, 'import tensorflow as tf\n'), ((900, 912), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (910, 912), True, 'import tensorflow as tf\n'), ((918, 968), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""log_dir"""'], {'graph': 'sess.graph'}), "('log_dir', graph=sess.graph)\n", (939, 968), True, 'import tensorflow as tf\n'), ((969, 1076), 'tensorflow.train.write_graph', 'tf.train.write_graph', (['sess.graph_def', '"""/data/scripts/study_case/pbtxt_files"""', '"""ml_plactice_tbbmi.pbtxt"""'], {}), "(sess.graph_def, '/data/scripts/study_case/pbtxt_files',\n 'ml_plactice_tbbmi.pbtxt')\n", (989, 1076), True, 'import tensorflow as tf\n'), ((1202, 1252), 'scripts.utils.tf_utils.TensorFlowScheduler', 'TensorFlowScheduler', ([], {'name': '"""ml_plactice.ch5.tb-bmi"""'}), "(name='ml_plactice.ch5.tb-bmi')\n", (1221, 1252), False, 'from scripts.utils.tf_utils import TensorFlowScheduler\n'), ((551, 567), 'tensorflow.zeros', 'tf.zeros', (['[2, 3]'], {}), '([2, 3])\n', (559, 567), True, 'import tensorflow as tf\n'), ((585, 598), 'tensorflow.zeros', 'tf.zeros', (['[3]'], {}), '([3])\n', (593, 598), True, 'import tensorflow as tf\n'), ((801, 816), 'tensorflow.argmax', 'tf.argmax', (['y', '(1)'], {}), '(y, 1)\n', (810, 816), True, 'import tensorflow as tf\n'), ((818, 834), 'tensorflow.argmax', 'tf.argmax', (['y_', '(1)'], {}), '(y_, 1)\n', (827, 834), True, 'import tensorflow as tf\n'), ((862, 890), 'tensorflow.cast', 'tf.cast', (['predict', 'tf.float32'], {}), '(predict, tf.float32)\n', (869, 890), True, 'import tensorflow as tf\n'), ((1083, 1112), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (1110, 1112), True, 'import tensorflow as tf\n'), ((317, 336), 'numpy.array', 'np.array', (['bclass[x]'], {}), '(bclass[x])\n', (325, 336), True, 'import numpy as np\n'), ((618, 633), 'tensorflow.matmul', 'tf.matmul', (['x', 'W'], {}), '(x, W)\n', (627, 633), True, 'import tensorflow as tf\n'), ((676, 685), 'tensorflow.log', 'tf.log', (['y'], {}), '(y)\n', (682, 685), True, 'import tensorflow as tf\n')]
|
import tensorflow as tf
import skimage.transform
import numpy as np
def conv2d(x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(x, k=2):
# Wrapper for maxpolling
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],padding='SAME')
def upsampling(x, k=2):
# Wrapper for resizing. It actually is upsamling, which is reverse operation to pooling
t_shape = x.get_shape()
# print(t_shape)
return tf.image.resize_images(x, size=[t_shape[1]*k, t_shape[2]*k])
def batch_generator(raw_image_data, batch_size):
# this generator take a picture and random rotates it mupltiple to 90 degrees
# angle of rotate is a lable
angls = [0, 90, 180, 270]
x = []
y = []
for i, img in enumerate(raw_image_data):
angle = np.random.choice(angls)
ohe_vector = np.zeros(4)
ohe_vector[angls.index(angle)] = 1
y.append(ohe_vector)
transformed_img = skimage.transform.rotate(img.reshape((28, 28)), angle)
x.append(transformed_img)
if i % batch_size == 0:
if i != 0:
x = np.stack(x)
x_out = x.reshape((-1, 28, 28, 1))
y_out = np.stack(y)
x = []
y = []
yield x_out, y_out
label_dict = {
0: 'T-shirt/top',
1: 'Trouser',
2: 'Pullover',
3: 'Dress',
4: 'Coat',
5: 'Sandal',
6: 'Shirt',
7: 'Sneaker',
8: 'Bag',
9: 'Ankle boot',
}
|
[
"numpy.stack",
"tensorflow.image.resize_images",
"tensorflow.nn.relu",
"numpy.zeros",
"tensorflow.nn.max_pool",
"tensorflow.nn.conv2d",
"numpy.random.choice",
"tensorflow.nn.bias_add"
] |
[((162, 230), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W'], {'strides': '[1, strides, strides, 1]', 'padding': '"""SAME"""'}), "(x, W, strides=[1, strides, strides, 1], padding='SAME')\n", (174, 230), True, 'import tensorflow as tf\n'), ((239, 259), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['x', 'b'], {}), '(x, b)\n', (253, 259), True, 'import tensorflow as tf\n'), ((271, 284), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (281, 284), True, 'import tensorflow as tf\n'), ((350, 425), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, k, k, 1]', 'strides': '[1, k, k, 1]', 'padding': '"""SAME"""'}), "(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')\n", (364, 425), True, 'import tensorflow as tf\n'), ((603, 667), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['x'], {'size': '[t_shape[1] * k, t_shape[2] * k]'}), '(x, size=[t_shape[1] * k, t_shape[2] * k])\n', (625, 667), True, 'import tensorflow as tf\n'), ((943, 966), 'numpy.random.choice', 'np.random.choice', (['angls'], {}), '(angls)\n', (959, 966), True, 'import numpy as np\n'), ((988, 999), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (996, 999), True, 'import numpy as np\n'), ((1262, 1273), 'numpy.stack', 'np.stack', (['x'], {}), '(x)\n', (1270, 1273), True, 'import numpy as np\n'), ((1349, 1360), 'numpy.stack', 'np.stack', (['y'], {}), '(y)\n', (1357, 1360), True, 'import numpy as np\n')]
|
"""Example code for the nodes in the example pipeline. This code is meant
just for illustrating basic Kedro features.
Delete this when you start working on your own Kedro project.
"""
# pylint: disable=invalid-name
import logging
from typing import Any, Dict
import numpy as np
import pandas as pd
def train_model(
train_x: pd.DataFrame, train_y: pd.DataFrame, parameters: Dict[str, Any]
) -> np.ndarray:
"""Node for training a simple multi-class logistic regression model. The
number of training iterations as well as the learning rate are taken from
conf/project/parameters.yml. All of the data as well as the parameters
will be provided to this function at the time of execution.
"""
num_iter = parameters["example_num_train_iter"]
lr = parameters["example_learning_rate"]
X = train_x.to_numpy()
Y = train_y.to_numpy()
# Add bias to the features
bias = np.ones((X.shape[0], 1))
X = np.concatenate((bias, X), axis=1)
weights = []
# Train one model for each class in Y
for k in range(Y.shape[1]):
# Initialise weights
theta = np.zeros(X.shape[1])
y = Y[:, k]
for _ in range(num_iter):
z = np.dot(X, theta)
h = _sigmoid(z)
gradient = np.dot(X.T, (h - y)) / y.size
theta -= lr * gradient
# Save the weights for each model
weights.append(theta)
# Return a joint multi-class model with weights for all classes
return np.vstack(weights).transpose()
def predict(model: np.ndarray, test_x: pd.DataFrame) -> np.ndarray:
"""Node for making predictions given a pre-trained model and a test set."""
X = test_x.to_numpy()
# Add bias to the features
bias = np.ones((X.shape[0], 1))
X = np.concatenate((bias, X), axis=1)
# Predict "probabilities" for each class
result = _sigmoid(np.dot(X, model))
# Return the index of the class with max probability for all samples
return np.argmax(result, axis=1)
def report_accuracy(predictions: np.ndarray, test_y: pd.DataFrame) -> None:
"""Node for reporting the accuracy of the predictions performed by the
previous node. Notice that this function has no outputs, except logging.
"""
# Get true class index
target = np.argmax(test_y.to_numpy(), axis=1)
# Calculate accuracy of predictions
accuracy = np.sum(predictions == target) / target.shape[0]
# Log the accuracy of the model
log = logging.getLogger(__name__)
log.info("Model accuracy on test set: %0.2f%%", accuracy * 100)
def _sigmoid(z):
"""A helper sigmoid function used by the training and the scoring nodes."""
return 1 / (1 + np.exp(-z))
|
[
"numpy.sum",
"numpy.concatenate",
"numpy.argmax",
"numpy.zeros",
"numpy.ones",
"numpy.vstack",
"numpy.exp",
"numpy.dot",
"logging.getLogger"
] |
[((910, 934), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (917, 934), True, 'import numpy as np\n'), ((943, 976), 'numpy.concatenate', 'np.concatenate', (['(bias, X)'], {'axis': '(1)'}), '((bias, X), axis=1)\n', (957, 976), True, 'import numpy as np\n'), ((1740, 1764), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (1747, 1764), True, 'import numpy as np\n'), ((1773, 1806), 'numpy.concatenate', 'np.concatenate', (['(bias, X)'], {'axis': '(1)'}), '((bias, X), axis=1)\n', (1787, 1806), True, 'import numpy as np\n'), ((1978, 2003), 'numpy.argmax', 'np.argmax', (['result'], {'axis': '(1)'}), '(result, axis=1)\n', (1987, 2003), True, 'import numpy as np\n'), ((2468, 2495), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2485, 2495), False, 'import logging\n'), ((1114, 1134), 'numpy.zeros', 'np.zeros', (['X.shape[1]'], {}), '(X.shape[1])\n', (1122, 1134), True, 'import numpy as np\n'), ((1875, 1891), 'numpy.dot', 'np.dot', (['X', 'model'], {}), '(X, model)\n', (1881, 1891), True, 'import numpy as np\n'), ((2374, 2403), 'numpy.sum', 'np.sum', (['(predictions == target)'], {}), '(predictions == target)\n', (2380, 2403), True, 'import numpy as np\n'), ((1205, 1221), 'numpy.dot', 'np.dot', (['X', 'theta'], {}), '(X, theta)\n', (1211, 1221), True, 'import numpy as np\n'), ((1490, 1508), 'numpy.vstack', 'np.vstack', (['weights'], {}), '(weights)\n', (1499, 1508), True, 'import numpy as np\n'), ((2683, 2693), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (2689, 2693), True, 'import numpy as np\n'), ((1273, 1291), 'numpy.dot', 'np.dot', (['X.T', '(h - y)'], {}), '(X.T, h - y)\n', (1279, 1291), True, 'import numpy as np\n')]
|
from __future__ import division, absolute_import, print_function
import unittest
import numpy.testing as testing
import numpy as np
import healpy as hp
import healsparse
class UpdateValuesTestCase(unittest.TestCase):
def test_update_values_inorder(self):
"""
Test doing update_values, in coarse pixel order.
"""
nside_coverage = 32
nside_map = 64
dtype = np.float64
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map, dtype)
nfine_per_cov = 2**sparse_map._cov_map.bit_shift
test_pix = np.arange(nfine_per_cov) + nfine_per_cov * 10
test_values = np.zeros(nfine_per_cov)
sparse_map.update_values_pix(test_pix, test_values)
testing.assert_almost_equal(sparse_map.get_values_pix(test_pix), test_values)
valid_pixels = sparse_map.valid_pixels
testing.assert_equal(valid_pixels, test_pix)
test_pix2 = np.arange(nfine_per_cov) + nfine_per_cov * 16
test_values2 = np.zeros(nfine_per_cov) + 100
sparse_map.update_values_pix(test_pix2, test_values2)
testing.assert_almost_equal(sparse_map.get_values_pix(test_pix), test_values)
testing.assert_almost_equal(sparse_map.get_values_pix(test_pix2), test_values2)
valid_pixels = sparse_map.valid_pixels
testing.assert_equal(np.sort(valid_pixels), np.sort(np.concatenate((test_pix, test_pix2))))
def test_update_values_outoforder(self):
"""
Test doing updateValues, out of order.
"""
nside_coverage = 32
nside_map = 64
dtype = np.float64
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map, dtype)
nfine_per_cov = 2**sparse_map._cov_map.bit_shift
test_pix = np.arange(nfine_per_cov) + nfine_per_cov * 16
test_values = np.zeros(nfine_per_cov)
sparse_map.update_values_pix(test_pix, test_values)
testing.assert_almost_equal(sparse_map.get_values_pix(test_pix), test_values)
valid_pixels = sparse_map.valid_pixels
testing.assert_equal(valid_pixels, test_pix)
test_pix2 = np.arange(nfine_per_cov) + nfine_per_cov * 10
test_values2 = np.zeros(nfine_per_cov) + 100
sparse_map.update_values_pix(test_pix2, test_values2)
testing.assert_almost_equal(sparse_map.get_values_pix(test_pix), test_values)
testing.assert_almost_equal(sparse_map.get_values_pix(test_pix2), test_values2)
valid_pixels = sparse_map.valid_pixels
testing.assert_equal(np.sort(valid_pixels), np.sort(np.concatenate((test_pix, test_pix2))))
def test_update_values_nonunique(self):
"""
Test doing update_values with non-unique pixels.
"""
nside_coverage = 32
nside_map = 64
dtype = np.float64
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map, dtype)
pixels = np.array([0, 1, 5, 10, 0])
self.assertRaises(ValueError, sparse_map.update_values_pix, pixels, 0.0)
self.assertRaises(ValueError, sparse_map.__setitem__, pixels, 0.0)
def test_update_values_or(self):
"""
Test doing update_values with or operation.
"""
nside_coverage = 32
nside_map = 64
dtype = np.int32
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map, dtype, sentinel=0)
# Check with new unique pixels
pixels = np.arange(4)
values = np.array([2**0, 2**1, 2**2, 2**4], dtype=dtype)
sparse_map.update_values_pix(pixels, values, operation='or')
testing.assert_array_equal(sparse_map[pixels], values)
# Check with pre-existing unique pixels
values2 = np.array([2**1, 2**2, 2**3, 2**4], dtype=dtype)
sparse_map.update_values_pix(pixels, values2, operation='or')
testing.assert_array_equal(sparse_map[pixels],
values | values2)
# Check with new non-unique pixels
pixels = np.array([100, 101, 102, 100])
values = np.array([2**0, 2**1, 2**2, 2**4], dtype=dtype)
sparse_map.update_values_pix(pixels, values, operation='or')
testing.assert_array_equal(sparse_map[pixels],
np.array([2**0 | 2**4, 2**1, 2**2, 2**0 | 2**4]))
# Check with pre-existing non-unique pixels
values = np.array([2**1, 2**2, 2**3, 2**5], dtype=dtype)
sparse_map.update_values_pix(pixels, values, operation='or')
testing.assert_array_equal(sparse_map[pixels],
np.array([2**0 | 2**4 | 2**1 | 2**5,
2**1 | 2**2,
2**2 | 2**3,
2**0 | 2**4 | 2**1 | 2**5]))
def test_update_values_and(self):
"""
Test doing update_values with and operation.
"""
nside_coverage = 32
nside_map = 64
dtype = np.int32
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map, dtype, sentinel=0)
# Check with new unique pixels
pixels = np.arange(4)
values = np.array([2**0, 2**1, 2**2, 2**4], dtype=dtype)
sparse_map.update_values_pix(pixels, values, operation='and')
testing.assert_array_equal(sparse_map[pixels], values*0)
# Check with pre-existing unique pixels
sparse_map[pixels] = values
sparse_map.update_values_pix(pixels, values, operation='and')
testing.assert_array_equal(sparse_map[pixels], values)
# Check with new non-unique pixels
pixels = np.array([100, 101, 102, 100])
values = np.array([2**0, 2**1, 2**2, 2**4], dtype=dtype)
sparse_map.update_values_pix(pixels, values, operation='and')
testing.assert_array_equal(sparse_map[pixels], values*0)
# Check with pre-existing non-unique pixels
sparse_map[100] = 2**0 | 2**4
sparse_map[101] = 2**1
sparse_map[102] = 2**2
sparse_map.update_values_pix(pixels, values, operation='and')
# The first and last will be 0 because we get anded sequentially.
testing.assert_array_equal(sparse_map[pixels],
[0, 2**1, 2**2, 0])
def test_update_values_pos(self):
"""
Test doing update_values with positions (unique and non-unique).
"""
nside_coverage = 32
nside_map = 64
dtype = np.float64
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map, dtype)
pixels = np.array([0, 1, 5, 10, 20])
ra, dec = hp.pix2ang(nside_map, pixels, lonlat=True, nest=True)
sparse_map.update_values_pos(ra, dec, 0.0)
testing.assert_array_almost_equal(sparse_map[pixels], 0.0)
# Test non-unique raise
pixels = np.array([0, 1, 5, 10, 0])
ra, dec = hp.pix2ang(nside_map, pixels, lonlat=True, nest=True)
self.assertRaises(ValueError, sparse_map.update_values_pos, ra, dec, 0.0)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"healpy.pix2ang",
"numpy.testing.assert_array_almost_equal",
"numpy.testing.assert_array_equal",
"numpy.zeros",
"numpy.sort",
"numpy.array",
"numpy.arange",
"numpy.testing.assert_equal",
"healsparse.HealSparseMap.make_empty",
"numpy.concatenate"
] |
[((7166, 7181), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7179, 7181), False, 'import unittest\n'), ((443, 512), 'healsparse.HealSparseMap.make_empty', 'healsparse.HealSparseMap.make_empty', (['nside_coverage', 'nside_map', 'dtype'], {}), '(nside_coverage, nside_map, dtype)\n', (478, 512), False, 'import healsparse\n'), ((659, 682), 'numpy.zeros', 'np.zeros', (['nfine_per_cov'], {}), '(nfine_per_cov)\n', (667, 682), True, 'import numpy as np\n'), ((886, 930), 'numpy.testing.assert_equal', 'testing.assert_equal', (['valid_pixels', 'test_pix'], {}), '(valid_pixels, test_pix)\n', (906, 930), True, 'import numpy.testing as testing\n'), ((1654, 1723), 'healsparse.HealSparseMap.make_empty', 'healsparse.HealSparseMap.make_empty', (['nside_coverage', 'nside_map', 'dtype'], {}), '(nside_coverage, nside_map, dtype)\n', (1689, 1723), False, 'import healsparse\n'), ((1870, 1893), 'numpy.zeros', 'np.zeros', (['nfine_per_cov'], {}), '(nfine_per_cov)\n', (1878, 1893), True, 'import numpy as np\n'), ((2097, 2141), 'numpy.testing.assert_equal', 'testing.assert_equal', (['valid_pixels', 'test_pix'], {}), '(valid_pixels, test_pix)\n', (2117, 2141), True, 'import numpy.testing as testing\n'), ((2873, 2942), 'healsparse.HealSparseMap.make_empty', 'healsparse.HealSparseMap.make_empty', (['nside_coverage', 'nside_map', 'dtype'], {}), '(nside_coverage, nside_map, dtype)\n', (2908, 2942), False, 'import healsparse\n'), ((2961, 2987), 'numpy.array', 'np.array', (['[0, 1, 5, 10, 0]'], {}), '([0, 1, 5, 10, 0])\n', (2969, 2987), True, 'import numpy as np\n'), ((3357, 3442), 'healsparse.HealSparseMap.make_empty', 'healsparse.HealSparseMap.make_empty', (['nside_coverage', 'nside_map', 'dtype'], {'sentinel': '(0)'}), '(nside_coverage, nside_map, dtype,\n sentinel=0)\n', (3392, 3442), False, 'import healsparse\n'), ((3496, 3508), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (3505, 3508), True, 'import numpy as np\n'), ((3526, 3581), 'numpy.array', 'np.array', (['[2 ** 0, 2 ** 1, 2 ** 2, 2 ** 4]'], {'dtype': 'dtype'}), '([2 ** 0, 2 ** 1, 2 ** 2, 2 ** 4], dtype=dtype)\n', (3534, 3581), True, 'import numpy as np\n'), ((3652, 3706), 'numpy.testing.assert_array_equal', 'testing.assert_array_equal', (['sparse_map[pixels]', 'values'], {}), '(sparse_map[pixels], values)\n', (3678, 3706), True, 'import numpy.testing as testing\n'), ((3774, 3829), 'numpy.array', 'np.array', (['[2 ** 1, 2 ** 2, 2 ** 3, 2 ** 4]'], {'dtype': 'dtype'}), '([2 ** 1, 2 ** 2, 2 ** 3, 2 ** 4], dtype=dtype)\n', (3782, 3829), True, 'import numpy as np\n'), ((3901, 3965), 'numpy.testing.assert_array_equal', 'testing.assert_array_equal', (['sparse_map[pixels]', '(values | values2)'], {}), '(sparse_map[pixels], values | values2)\n', (3927, 3965), True, 'import numpy.testing as testing\n'), ((4062, 4092), 'numpy.array', 'np.array', (['[100, 101, 102, 100]'], {}), '([100, 101, 102, 100])\n', (4070, 4092), True, 'import numpy as np\n'), ((4110, 4165), 'numpy.array', 'np.array', (['[2 ** 0, 2 ** 1, 2 ** 2, 2 ** 4]'], {'dtype': 'dtype'}), '([2 ** 0, 2 ** 1, 2 ** 2, 2 ** 4], dtype=dtype)\n', (4118, 4165), True, 'import numpy as np\n'), ((4438, 4493), 'numpy.array', 'np.array', (['[2 ** 1, 2 ** 2, 2 ** 3, 2 ** 5]'], {'dtype': 'dtype'}), '([2 ** 1, 2 ** 2, 2 ** 3, 2 ** 5], dtype=dtype)\n', (4446, 4493), True, 'import numpy as np\n'), ((5087, 5172), 'healsparse.HealSparseMap.make_empty', 'healsparse.HealSparseMap.make_empty', (['nside_coverage', 'nside_map', 'dtype'], {'sentinel': '(0)'}), '(nside_coverage, nside_map, dtype,\n sentinel=0)\n', (5122, 5172), False, 'import healsparse\n'), ((5226, 5238), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (5235, 5238), True, 'import numpy as np\n'), ((5256, 5311), 'numpy.array', 'np.array', (['[2 ** 0, 2 ** 1, 2 ** 2, 2 ** 4]'], {'dtype': 'dtype'}), '([2 ** 0, 2 ** 1, 2 ** 2, 2 ** 4], dtype=dtype)\n', (5264, 5311), True, 'import numpy as np\n'), ((5383, 5441), 'numpy.testing.assert_array_equal', 'testing.assert_array_equal', (['sparse_map[pixels]', '(values * 0)'], {}), '(sparse_map[pixels], values * 0)\n', (5409, 5441), True, 'import numpy.testing as testing\n'), ((5603, 5657), 'numpy.testing.assert_array_equal', 'testing.assert_array_equal', (['sparse_map[pixels]', 'values'], {}), '(sparse_map[pixels], values)\n', (5629, 5657), True, 'import numpy.testing as testing\n'), ((5719, 5749), 'numpy.array', 'np.array', (['[100, 101, 102, 100]'], {}), '([100, 101, 102, 100])\n', (5727, 5749), True, 'import numpy as np\n'), ((5767, 5822), 'numpy.array', 'np.array', (['[2 ** 0, 2 ** 1, 2 ** 2, 2 ** 4]'], {'dtype': 'dtype'}), '([2 ** 0, 2 ** 1, 2 ** 2, 2 ** 4], dtype=dtype)\n', (5775, 5822), True, 'import numpy as np\n'), ((5894, 5952), 'numpy.testing.assert_array_equal', 'testing.assert_array_equal', (['sparse_map[pixels]', '(values * 0)'], {}), '(sparse_map[pixels], values * 0)\n', (5920, 5952), True, 'import numpy.testing as testing\n'), ((6257, 6327), 'numpy.testing.assert_array_equal', 'testing.assert_array_equal', (['sparse_map[pixels]', '[0, 2 ** 1, 2 ** 2, 0]'], {}), '(sparse_map[pixels], [0, 2 ** 1, 2 ** 2, 0])\n', (6283, 6327), True, 'import numpy.testing as testing\n'), ((6595, 6664), 'healsparse.HealSparseMap.make_empty', 'healsparse.HealSparseMap.make_empty', (['nside_coverage', 'nside_map', 'dtype'], {}), '(nside_coverage, nside_map, dtype)\n', (6630, 6664), False, 'import healsparse\n'), ((6683, 6710), 'numpy.array', 'np.array', (['[0, 1, 5, 10, 20]'], {}), '([0, 1, 5, 10, 20])\n', (6691, 6710), True, 'import numpy as np\n'), ((6729, 6782), 'healpy.pix2ang', 'hp.pix2ang', (['nside_map', 'pixels'], {'lonlat': '(True)', 'nest': '(True)'}), '(nside_map, pixels, lonlat=True, nest=True)\n', (6739, 6782), True, 'import healpy as hp\n'), ((6843, 6901), 'numpy.testing.assert_array_almost_equal', 'testing.assert_array_almost_equal', (['sparse_map[pixels]', '(0.0)'], {}), '(sparse_map[pixels], 0.0)\n', (6876, 6901), True, 'import numpy.testing as testing\n'), ((6952, 6978), 'numpy.array', 'np.array', (['[0, 1, 5, 10, 0]'], {}), '([0, 1, 5, 10, 0])\n', (6960, 6978), True, 'import numpy as np\n'), ((6997, 7050), 'healpy.pix2ang', 'hp.pix2ang', (['nside_map', 'pixels'], {'lonlat': '(True)', 'nest': '(True)'}), '(nside_map, pixels, lonlat=True, nest=True)\n', (7007, 7050), True, 'import healpy as hp\n'), ((591, 615), 'numpy.arange', 'np.arange', (['nfine_per_cov'], {}), '(nfine_per_cov)\n', (600, 615), True, 'import numpy as np\n'), ((952, 976), 'numpy.arange', 'np.arange', (['nfine_per_cov'], {}), '(nfine_per_cov)\n', (961, 976), True, 'import numpy as np\n'), ((1021, 1044), 'numpy.zeros', 'np.zeros', (['nfine_per_cov'], {}), '(nfine_per_cov)\n', (1029, 1044), True, 'import numpy as np\n'), ((1365, 1386), 'numpy.sort', 'np.sort', (['valid_pixels'], {}), '(valid_pixels)\n', (1372, 1386), True, 'import numpy as np\n'), ((1802, 1826), 'numpy.arange', 'np.arange', (['nfine_per_cov'], {}), '(nfine_per_cov)\n', (1811, 1826), True, 'import numpy as np\n'), ((2163, 2187), 'numpy.arange', 'np.arange', (['nfine_per_cov'], {}), '(nfine_per_cov)\n', (2172, 2187), True, 'import numpy as np\n'), ((2232, 2255), 'numpy.zeros', 'np.zeros', (['nfine_per_cov'], {}), '(nfine_per_cov)\n', (2240, 2255), True, 'import numpy as np\n'), ((2576, 2597), 'numpy.sort', 'np.sort', (['valid_pixels'], {}), '(valid_pixels)\n', (2583, 2597), True, 'import numpy as np\n'), ((4318, 4378), 'numpy.array', 'np.array', (['[2 ** 0 | 2 ** 4, 2 ** 1, 2 ** 2, 2 ** 0 | 2 ** 4]'], {}), '([2 ** 0 | 2 ** 4, 2 ** 1, 2 ** 2, 2 ** 0 | 2 ** 4])\n', (4326, 4378), True, 'import numpy as np\n'), ((4646, 4764), 'numpy.array', 'np.array', (['[2 ** 0 | 2 ** 4 | 2 ** 1 | 2 ** 5, 2 ** 1 | 2 ** 2, 2 ** 2 | 2 ** 3, 2 ** \n 0 | 2 ** 4 | 2 ** 1 | 2 ** 5]'], {}), '([2 ** 0 | 2 ** 4 | 2 ** 1 | 2 ** 5, 2 ** 1 | 2 ** 2, 2 ** 2 | 2 **\n 3, 2 ** 0 | 2 ** 4 | 2 ** 1 | 2 ** 5])\n', (4654, 4764), True, 'import numpy as np\n'), ((1396, 1433), 'numpy.concatenate', 'np.concatenate', (['(test_pix, test_pix2)'], {}), '((test_pix, test_pix2))\n', (1410, 1433), True, 'import numpy as np\n'), ((2607, 2644), 'numpy.concatenate', 'np.concatenate', (['(test_pix, test_pix2)'], {}), '((test_pix, test_pix2))\n', (2621, 2644), True, 'import numpy as np\n')]
|
from pathlib import Path
import numpy as np
import pytest
from npe2 import DynamicPlugin
from npe2.manifest.contributions import SampleDataURI
import napari
from napari.layers._source import Source
from napari.viewer import ViewerModel
def test_sample_hook(builtins, tmp_plugin: DynamicPlugin):
viewer = ViewerModel()
NAME = tmp_plugin.name
KEY = 'random data'
with pytest.raises(KeyError, match=f"Plugin {NAME!r} does not provide"):
viewer.open_sample(NAME, KEY)
@tmp_plugin.contribute.sample_data(key=KEY)
def _generate_random_data(shape=(512, 512)):
data = np.random.rand(*shape)
return [(data, {'name': KEY})]
LOGO = str(Path(napari.__file__).parent / 'resources' / 'logo.png')
tmp_plugin.manifest.contributions.sample_data.append(
SampleDataURI(uri=LOGO, key='napari logo', display_name='Napari logo')
)
assert len(viewer.layers) == 0
viewer.open_sample(NAME, KEY)
assert viewer.layers[-1].source == Source(
path=None, reader_plugin=None, sample=(NAME, KEY)
)
assert len(viewer.layers) == 1
viewer.open_sample(NAME, 'napari logo')
assert viewer.layers[-1].source == Source(
path=LOGO, reader_plugin='napari', sample=(NAME, 'napari logo')
)
# test calling with kwargs
viewer.open_sample(NAME, KEY, shape=(256, 256))
assert len(viewer.layers) == 3
assert viewer.layers[-1].source == Source(sample=(NAME, KEY))
|
[
"napari.viewer.ViewerModel",
"npe2.manifest.contributions.SampleDataURI",
"pytest.raises",
"pathlib.Path",
"numpy.random.rand",
"napari.layers._source.Source"
] |
[((313, 326), 'napari.viewer.ViewerModel', 'ViewerModel', ([], {}), '()\n', (324, 326), False, 'from napari.viewer import ViewerModel\n'), ((387, 453), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': 'f"""Plugin {NAME!r} does not provide"""'}), "(KeyError, match=f'Plugin {NAME!r} does not provide')\n", (400, 453), False, 'import pytest\n'), ((606, 628), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (620, 628), True, 'import numpy as np\n'), ((807, 877), 'npe2.manifest.contributions.SampleDataURI', 'SampleDataURI', ([], {'uri': 'LOGO', 'key': '"""napari logo"""', 'display_name': '"""Napari logo"""'}), "(uri=LOGO, key='napari logo', display_name='Napari logo')\n", (820, 877), False, 'from npe2.manifest.contributions import SampleDataURI\n'), ((993, 1050), 'napari.layers._source.Source', 'Source', ([], {'path': 'None', 'reader_plugin': 'None', 'sample': '(NAME, KEY)'}), '(path=None, reader_plugin=None, sample=(NAME, KEY))\n', (999, 1050), False, 'from napari.layers._source import Source\n'), ((1183, 1254), 'napari.layers._source.Source', 'Source', ([], {'path': 'LOGO', 'reader_plugin': '"""napari"""', 'sample': "(NAME, 'napari logo')"}), "(path=LOGO, reader_plugin='napari', sample=(NAME, 'napari logo'))\n", (1189, 1254), False, 'from napari.layers._source import Source\n'), ((1427, 1453), 'napari.layers._source.Source', 'Source', ([], {'sample': '(NAME, KEY)'}), '(sample=(NAME, KEY))\n', (1433, 1453), False, 'from napari.layers._source import Source\n'), ((684, 705), 'pathlib.Path', 'Path', (['napari.__file__'], {}), '(napari.__file__)\n', (688, 705), False, 'from pathlib import Path\n')]
|
def example(Simulator):
import numpy as np
from csdl import Model
import csdl
class ExampleReorderMatrixSparse(Model):
def define(self):
shape2 = (5, 4)
b = np.arange(20).reshape(shape2)
mat = self.declare_variable('b', val=b)
self.register_output(
'einsum_reorder1_sparse_derivs',
csdl.einsum(
mat,
subscripts='ij->ji',
partial_format='sparse',
))
sim = Simulator(ExampleReorderMatrixSparse())
sim.run()
print('b', sim['b'].shape)
print(sim['b'])
print('einsum_reorder1_sparse_derivs', sim['einsum_reorder1_sparse_derivs'].shape)
print(sim['einsum_reorder1_sparse_derivs'])
return sim
|
[
"csdl.einsum",
"numpy.arange"
] |
[((406, 468), 'csdl.einsum', 'csdl.einsum', (['mat'], {'subscripts': '"""ij->ji"""', 'partial_format': '"""sparse"""'}), "(mat, subscripts='ij->ji', partial_format='sparse')\n", (417, 468), False, 'import csdl\n'), ((220, 233), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (229, 233), True, 'import numpy as np\n')]
|
import argparse
from dataloader import picked_train_test_data_loader
from sklearn import preprocessing
from classifier import train_best
import numpy
from bert_serving.client import BertClient
bc = BertClient()
def train_test(pickled_train_path, pickled_test_path):
train, test = picked_train_test_data_loader(pickled_train_path, pickled_test_path)
def vectorize_dataset(data):
X = []
Y = []
sentences = []
for row in data:
sentences.append(" ".join(row[3]))
Y.append(row[0])
if len(sentences)%20 == 0:
X.extend([e for e in bc.encode(sentences)])
sentences = []
if len(sentences) != 0:
X.extend([e for e in bc.encode(sentences)])
return numpy.vstack(X), Y
X_train, Y_train = vectorize_dataset(train)
X_test, Y_test = vectorize_dataset(test)
X_train = numpy.asarray(X_train)
X_test = numpy.asarray(X_test)
le = preprocessing.LabelEncoder()
le.fit(Y_train)
Y_train = le.transform(Y_train)
Y_test = le.transform(Y_test)
print ("Length of vector: %s"%X_train.shape[1])
return train_best(X_train, Y_train, X_test, Y_test)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Evaluate ELMo based sentence embedding')
parser.add_argument("pickled_training_data_path", help="pickled train path")
parser.add_argument("pickled_test_data_path", help="pickled test path")
args = parser.parse_args()
pickled_training_data_path = args.pickled_training_data_path
pickled_test_data_path = args.pickled_test_data_path
results = train_test(pickled_training_data_path, pickled_test_data_path)
results = results.split("\n")[-2]
print (results)
|
[
"argparse.ArgumentParser",
"numpy.asarray",
"dataloader.picked_train_test_data_loader",
"sklearn.preprocessing.LabelEncoder",
"classifier.train_best",
"bert_serving.client.BertClient",
"numpy.vstack"
] |
[((200, 212), 'bert_serving.client.BertClient', 'BertClient', ([], {}), '()\n', (210, 212), False, 'from bert_serving.client import BertClient\n'), ((287, 355), 'dataloader.picked_train_test_data_loader', 'picked_train_test_data_loader', (['pickled_train_path', 'pickled_test_path'], {}), '(pickled_train_path, pickled_test_path)\n', (316, 355), False, 'from dataloader import picked_train_test_data_loader\n'), ((842, 864), 'numpy.asarray', 'numpy.asarray', (['X_train'], {}), '(X_train)\n', (855, 864), False, 'import numpy\n'), ((876, 897), 'numpy.asarray', 'numpy.asarray', (['X_test'], {}), '(X_test)\n', (889, 897), False, 'import numpy\n'), ((906, 934), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (932, 934), False, 'from sklearn import preprocessing\n'), ((1080, 1124), 'classifier.train_best', 'train_best', (['X_train', 'Y_train', 'X_test', 'Y_test'], {}), '(X_train, Y_train, X_test, Y_test)\n', (1090, 1124), False, 'from classifier import train_best\n'), ((1166, 1243), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluate ELMo based sentence embedding"""'}), "(description='Evaluate ELMo based sentence embedding')\n", (1189, 1243), False, 'import argparse\n'), ((720, 735), 'numpy.vstack', 'numpy.vstack', (['X'], {}), '(X)\n', (732, 735), False, 'import numpy\n')]
|
from flask import Flask, request
from flask_cors import CORS, cross_origin
from flask_restful import Resource, Api
from json import dumps
from flask_jsonpify import jsonify
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
import seaborn as sns
from matplotlib.pylab import rcParams
from datetime import datetime
app = Flask(__name__)
api = Api(app)
from sklearn.externals import joblib
CORS(app)
@app.route("/get_",methods=["POST"])
def get_():
#date_=request.form['date_']
#tons=request.form['tons']
#return json.dumps({'status':'OK'});
data = request.get_json(force=True)
date_= datetime.strptime(data['date_'], '%Y-%m-%d').toordinal()
qty=float(data["tons"])
lin_reg = joblib.load("regression_model.pkl")
dat= lin_reg.predict(np.array([[qty,date_]]))
dat=np.round(dat,2)
dat=dat.tolist()
return jsonify(dat)
##api.add_resource(Employees_Name, '/employees/<employee_id>') # Route_3
@app.route("/get1_",methods=["POST"])
def get1_():
data1=request.get_json(force=True)
date_=datetime.strptime(data1['date_'],'%Y-%m-%d').toordinal()
qty=float(data1["tons"])
lin_reg1 = joblib.load("regression_model1.pkl")
dat1= lin_reg1.predict(np.array([[date_,qty]]))
dat1=dat1.tolist()
return jsonify(dat1)
#@<EMAIL>("/get2_",methods=["POST"])
#def get2_():
# data2=request.get_json(force=True)
# date_=datetime.strptime(data2['date_'],'%Y-%m-%d').toordinal()
# qty=float(data2["tons"])
#print(date_,qty)
#lin_reg2 = joblib.load("regression_model2.pkl")
#dat2= lin_reg2.predict(np.array([[date_,qty]]))
#dat2=dat2.tolist()
#return jsonify(dat2)
if __name__ == '__main__':
app.run(port=8080)
|
[
"flask_restful.Api",
"flask_jsonpify.jsonify",
"flask_cors.CORS",
"flask.Flask",
"datetime.datetime.strptime",
"numpy.array",
"sklearn.externals.joblib.load",
"numpy.round",
"flask.request.get_json"
] |
[((340, 355), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (345, 355), False, 'from flask import Flask, request\n'), ((362, 370), 'flask_restful.Api', 'Api', (['app'], {}), '(app)\n', (365, 370), False, 'from flask_restful import Resource, Api\n'), ((410, 419), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (414, 419), False, 'from flask_cors import CORS, cross_origin\n'), ((588, 616), 'flask.request.get_json', 'request.get_json', ([], {'force': '(True)'}), '(force=True)\n', (604, 616), False, 'from flask import Flask, request\n'), ((733, 768), 'sklearn.externals.joblib.load', 'joblib.load', (['"""regression_model.pkl"""'], {}), "('regression_model.pkl')\n", (744, 768), False, 'from sklearn.externals import joblib\n'), ((827, 843), 'numpy.round', 'np.round', (['dat', '(2)'], {}), '(dat, 2)\n', (835, 843), True, 'import numpy as np\n'), ((881, 893), 'flask_jsonpify.jsonify', 'jsonify', (['dat'], {}), '(dat)\n', (888, 893), False, 'from flask_jsonpify import jsonify\n'), ((1031, 1059), 'flask.request.get_json', 'request.get_json', ([], {'force': '(True)'}), '(force=True)\n', (1047, 1059), False, 'from flask import Flask, request\n'), ((1179, 1215), 'sklearn.externals.joblib.load', 'joblib.load', (['"""regression_model1.pkl"""'], {}), "('regression_model1.pkl')\n", (1190, 1215), False, 'from sklearn.externals import joblib\n'), ((1303, 1316), 'flask_jsonpify.jsonify', 'jsonify', (['dat1'], {}), '(dat1)\n', (1310, 1316), False, 'from flask_jsonpify import jsonify\n'), ((794, 818), 'numpy.array', 'np.array', (['[[qty, date_]]'], {}), '([[qty, date_]])\n', (802, 818), True, 'import numpy as np\n'), ((1243, 1267), 'numpy.array', 'np.array', (['[[date_, qty]]'], {}), '([[date_, qty]])\n', (1251, 1267), True, 'import numpy as np\n'), ((629, 673), 'datetime.datetime.strptime', 'datetime.strptime', (["data['date_']", '"""%Y-%m-%d"""'], {}), "(data['date_'], '%Y-%m-%d')\n", (646, 673), False, 'from datetime import datetime\n'), ((1070, 1115), 'datetime.datetime.strptime', 'datetime.strptime', (["data1['date_']", '"""%Y-%m-%d"""'], {}), "(data1['date_'], '%Y-%m-%d')\n", (1087, 1115), False, 'from datetime import datetime\n')]
|
import subprocess
import ujson as json
import numpy as np
import sys
import os
os.environ["MKL_SERVICE_FORCE_INTEL"] = "1"
runs=10
#Top k HAN, variant2; adjust train_per in helper.py
args = [
'python3',
'train.py',
'--problem-path',
'../../../LineGraphGCN/data/yelp/',
'--problem',
'yelp',
'--lr-init',
'1e-4',
'--weight-decay',
'5e-4',
'--dropout',
'0.5',
'--prep-class',
'linear',
'--n-train-samples',
'100,100',
'--n-val-samples',
'100,100',
'--prep-len',
'128',
'--in-edge-len',
'18',
'--n-head',
'8',
'--output-dims',
'128,128,32,32',
'--n-layer',
'1',
'--tolerance',
'30',
'--train-per',
'0.4',
'--batch-size',
'64',
'--val-batch-size',
'64',
'--K',
'2599',
'--concat-node',
'--optimizer',
'adam',
'--lr-schedule',
'const',
'--mpaggr-class',
'attention',
]
print(args)
test_acc = []
test_macro = []
for seed in range(runs):
process = subprocess.Popen(args+['--seed',str(seed)],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
text = process.communicate()[1]
lines = text.decode().split('\n')
# print(lines)
correct = False
for line in lines:
if '{' not in line:
continue
print(line)
line = json.loads(line)
if 'test_metric' in line:
correct = True
test_acc.append(line['test_metric']['accuracy'])
test_macro.append(line['test_metric']['macro'])
if not correct:
print(lines)
sys.stdout.flush()
test_acc = np.asarray(test_acc)
test_macro = np.asarray(test_macro)
print('average acc for {} runs is : {}'.format(len(test_acc), np.average(test_acc)))
print('average macro for {} runs is : {}'.format(len(test_macro), np.average(test_macro)))
|
[
"numpy.average",
"numpy.asarray",
"ujson.loads",
"sys.stdout.flush"
] |
[((1609, 1629), 'numpy.asarray', 'np.asarray', (['test_acc'], {}), '(test_acc)\n', (1619, 1629), True, 'import numpy as np\n'), ((1643, 1665), 'numpy.asarray', 'np.asarray', (['test_macro'], {}), '(test_macro)\n', (1653, 1665), True, 'import numpy as np\n'), ((1579, 1597), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1595, 1597), False, 'import sys\n'), ((1335, 1351), 'ujson.loads', 'json.loads', (['line'], {}), '(line)\n', (1345, 1351), True, 'import ujson as json\n'), ((1728, 1748), 'numpy.average', 'np.average', (['test_acc'], {}), '(test_acc)\n', (1738, 1748), True, 'import numpy as np\n'), ((1817, 1839), 'numpy.average', 'np.average', (['test_macro'], {}), '(test_macro)\n', (1827, 1839), True, 'import numpy as np\n')]
|
import math
import random
import torch
import numpy as np
from scipy.stats import beta
from openmixup.models.utils import batch_shuffle_ddp
def fftfreqnd(h, w=None, z=None):
""" Get bin values for discrete fourier transform of size (h, w, z)
:param h: Required, first dimension size
:param w: Optional, second dimension size
:param z: Optional, third dimension size
"""
fz = fx = 0
fy = np.fft.fftfreq(h)
if w is not None:
fy = np.expand_dims(fy, -1)
if w % 2 == 1:
fx = np.fft.fftfreq(w)[: w // 2 + 2]
else:
fx = np.fft.fftfreq(w)[: w // 2 + 1]
if z is not None:
fy = np.expand_dims(fy, -1)
if z % 2 == 1:
fz = np.fft.fftfreq(z)[:, None]
else:
fz = np.fft.fftfreq(z)[:, None]
return np.sqrt(fx * fx + fy * fy + fz * fz)
def get_spectrum(freqs, decay_power, ch, h, w=0, z=0):
""" Samples a fourier image with given size and frequencies decayed by decay power
:param freqs: Bin values for the discrete fourier transform
:param decay_power: Decay power for frequency decay prop 1/f**d
:param ch: Number of channels for the resulting mask
:param h: Required, first dimension size
:param w: Optional, second dimension size
:param z: Optional, third dimension size
"""
scale = np.ones(1) / (
np.maximum(freqs, np.array([1.0 / max(w, h, z)])) ** decay_power
)
param_size = [ch] + list(freqs.shape) + [2]
param = np.random.randn(*param_size)
scale = np.expand_dims(scale, -1)[None, :]
return scale * param
def make_low_freq_image(decay, shape, ch=1):
""" Sample a low frequency image from fourier space
:param decay_power: Decay power for frequency decay prop 1/f**d
:param shape: Shape of desired mask, list up to 3 dims
:param ch: Number of channels for desired mask
"""
freqs = fftfreqnd(*shape)
spectrum = get_spectrum(freqs, decay, ch, *shape) # .reshape((1, *shape[:-1], -1))
spectrum = spectrum[:, 0] + 1j * spectrum[:, 1]
mask = np.real(np.fft.irfftn(spectrum, shape))
if len(shape) == 1:
mask = mask[:1, : shape[0]]
if len(shape) == 2:
mask = mask[:1, : shape[0], : shape[1]]
if len(shape) == 3:
mask = mask[:1, : shape[0], : shape[1], : shape[2]]
mask = mask
mask = mask - mask.min()
mask = mask / mask.max()
return mask
def sample_lam(alpha, reformulate=False):
""" Sample a lambda from symmetric beta distribution with given alpha
:param alpha: Alpha value for beta distribution
:param reformulate: If True, uses the reformulation of [1].
"""
if reformulate:
lam = beta.rvs(alpha + 1, alpha)
else:
lam = beta.rvs(alpha, alpha)
return lam
def binarise_mask(mask, lam, in_shape, max_soft=0.0):
""" Binarises a given low frequency image such that it has mean lambda.
:param mask: Low frequency image, usually the result of `make_low_freq_image`
:param lam: Mean value of final mask
:param in_shape: Shape of inputs
:param max_soft: Softening value between 0 and 0.5 which smooths hard edges in the mask.
"""
idx = mask.reshape(-1).argsort()[::-1]
mask = mask.reshape(-1)
num = (
math.ceil(lam * mask.size)
if random.random() > 0.5
else math.floor(lam * mask.size)
)
eff_soft = max_soft
if max_soft > lam or max_soft > (1 - lam):
eff_soft = min(lam, 1 - lam)
soft = int(mask.size * eff_soft)
num_low = num - soft
num_high = num + soft
mask[idx[:num_high]] = 1
mask[idx[num_low:]] = 0
mask[idx[num_low:num_high]] = np.linspace(1, 0, (num_high - num_low))
mask = mask.reshape((1, *in_shape))
return mask
def sample_mask(alpha, decay_power, shape, max_soft=0.0, reformulate=False):
""" Samples a mean lambda from beta distribution parametrised by alpha,
creates a low frequency image and binarises, it based on this lambda.
:param alpha: Alpha value for beta distribution from which to sample mean of mask
:param decay_power: Decay power for frequency decay prop 1/f**d
:param shape: Shape of desired mask, list up to 3 dims
:param max_soft: Softening value between 0 and 0.5 which smooths hard edges in the mask.
:param reformulate: If True, uses the reformulation of [1].
"""
if isinstance(shape, int):
shape = (shape,)
# Choose lambda
lam = sample_lam(alpha, reformulate)
# Make mask, get mean / std
mask = make_low_freq_image(decay_power, shape)
mask = binarise_mask(mask, lam, shape, max_soft)
return lam, mask
def sample_and_apply(x, alpha, decay_power, shape, max_soft=0.0, reformulate=False):
"""
:param x: Image batch on which to apply fmix of shape [b, c, shape*]
:param alpha: Alpha value for beta distribution from which to sample mean of mask
:param decay_power: Decay power for frequency decay prop 1/f**d
:param shape: Shape of desired mask, list up to 3 dims
:param max_soft: Softening value between 0 and 0.5 which smooths hard edges in the mask.
:param reformulate: If True, uses the reformulation of [1].
:return: mixed input, permutation indices, lambda value of mix,
"""
lam, mask = sample_mask(alpha, decay_power, shape, max_soft, reformulate)
index = np.random.permutation(x.shape[0])
x1, x2 = x * mask, x[index] * (1 - mask)
return x1 + x2, index, lam
@torch.no_grad()
def fmix(img, gt_label, alpha=1.0, lam=None, dist_mode=False,
decay_power=3, size=(32,32), max_soft=0., reformulate=False, **kwargs):
r""" FMix augmentation.
"FMix: Enhancing Mixed Sample Data Augmentation (https://arxiv.org/abs/2002.12047)".
https://github.com/ecs-vlc/FMix/blob/master/fmix.py
Args:
decay_power (float): Decay power for frequency decay prop 1/f**d
alpha (float): Alpha value for beta distribution from which to
sample mean of mask.
lam (float): The given mixing ratio (fixed). If lam is None, sample a
new lam from Beta distribution.
size ([int] | [int, int] | [int, int, int]): Shape of desired mask,
list up to 3 dims.
max_soft (float): Softening value between 0 and 0.5 which smooths
hard edges in the mask.
reformulate (bool): If True, uses the reformulation of [1].
dist_mode (bool): Whether to do cross gpus index shuffling and
return the mixup shuffle index, which support supervised and
self-supervised methods.
"""
# fmix mask
lam_, mask = sample_mask(alpha, decay_power, size, max_soft, reformulate)
# convert to img dtype (fp16)
mask = torch.from_numpy(mask).cuda().type_as(img)
if lam is None:
lam = lam_
else: # lam bias is fixed, lam should be larger than lam_
if lam_ < lam:
mask = 1 - mask
lam = 1 - lam_
# normal mixup process
if not dist_mode:
indices = torch.randperm(img.size(0)).cuda()
if len(img.size()) == 4: # [N, C, H, W]
img_ = img[indices]
else:
assert img.dim() == 5 # semi-supervised img [N, 2, C, H, W]
# * notice that the rank of two groups of img is fixed
img_ = img[:, 1, ...].contiguous()
img = img[:, 0, ...].contiguous()
y_a = gt_label
y_b = gt_label[indices]
img = mask * img + (1 - mask) * img_
return img, (y_a, y_b, lam)
# dist mixup with cross gpus shuffle
else:
if len(img.size()) == 5: # self-supervised img [N, 2, C, H, W]
img_ = img[:, 1, ...].contiguous()
img = img[:, 0, ...].contiguous()
img_, idx_shuffle, idx_unshuffle = batch_shuffle_ddp( # N
img_, idx_shuffle=kwargs.get("idx_shuffle_mix", None), no_repeat=True)
else:
assert len(img.size()) == 4 # normal img [N, C, H, w]
img_, idx_shuffle, idx_unshuffle = batch_shuffle_ddp( # N
img, idx_shuffle=kwargs.get("idx_shuffle_mix", None), no_repeat=True)
# mixup by mask
img = mask * img + (1 - mask) * img_
if gt_label is not None:
y_a = gt_label
y_b, _, _ = batch_shuffle_ddp(gt_label, idx_shuffle=idx_shuffle, no_repeat=True)
return img, (y_a, y_b, lam)
else:
return img, (idx_shuffle, idx_unshuffle, lam)
|
[
"scipy.stats.beta.rvs",
"torch.from_numpy",
"numpy.random.randn",
"math.ceil",
"numpy.fft.irfftn",
"openmixup.models.utils.batch_shuffle_ddp",
"math.floor",
"numpy.expand_dims",
"numpy.ones",
"random.random",
"numpy.fft.fftfreq",
"numpy.linspace",
"numpy.random.permutation",
"torch.no_grad",
"numpy.sqrt"
] |
[((5484, 5499), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5497, 5499), False, 'import torch\n'), ((418, 435), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['h'], {}), '(h)\n', (432, 435), True, 'import numpy as np\n'), ((827, 863), 'numpy.sqrt', 'np.sqrt', (['(fx * fx + fy * fy + fz * fz)'], {}), '(fx * fx + fy * fy + fz * fz)\n', (834, 863), True, 'import numpy as np\n'), ((1508, 1536), 'numpy.random.randn', 'np.random.randn', (['*param_size'], {}), '(*param_size)\n', (1523, 1536), True, 'import numpy as np\n'), ((3676, 3713), 'numpy.linspace', 'np.linspace', (['(1)', '(0)', '(num_high - num_low)'], {}), '(1, 0, num_high - num_low)\n', (3687, 3713), True, 'import numpy as np\n'), ((5370, 5403), 'numpy.random.permutation', 'np.random.permutation', (['x.shape[0]'], {}), '(x.shape[0])\n', (5391, 5403), True, 'import numpy as np\n'), ((472, 494), 'numpy.expand_dims', 'np.expand_dims', (['fy', '(-1)'], {}), '(fy, -1)\n', (486, 494), True, 'import numpy as np\n'), ((667, 689), 'numpy.expand_dims', 'np.expand_dims', (['fy', '(-1)'], {}), '(fy, -1)\n', (681, 689), True, 'import numpy as np\n'), ((1353, 1363), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (1360, 1363), True, 'import numpy as np\n'), ((1550, 1575), 'numpy.expand_dims', 'np.expand_dims', (['scale', '(-1)'], {}), '(scale, -1)\n', (1564, 1575), True, 'import numpy as np\n'), ((2089, 2119), 'numpy.fft.irfftn', 'np.fft.irfftn', (['spectrum', 'shape'], {}), '(spectrum, shape)\n', (2102, 2119), True, 'import numpy as np\n'), ((2705, 2731), 'scipy.stats.beta.rvs', 'beta.rvs', (['(alpha + 1)', 'alpha'], {}), '(alpha + 1, alpha)\n', (2713, 2731), False, 'from scipy.stats import beta\n'), ((2756, 2778), 'scipy.stats.beta.rvs', 'beta.rvs', (['alpha', 'alpha'], {}), '(alpha, alpha)\n', (2764, 2778), False, 'from scipy.stats import beta\n'), ((3279, 3305), 'math.ceil', 'math.ceil', (['(lam * mask.size)'], {}), '(lam * mask.size)\n', (3288, 3305), False, 'import math\n'), ((3352, 3379), 'math.floor', 'math.floor', (['(lam * mask.size)'], {}), '(lam * mask.size)\n', (3362, 3379), False, 'import math\n'), ((3317, 3332), 'random.random', 'random.random', ([], {}), '()\n', (3330, 3332), False, 'import random\n'), ((8309, 8377), 'openmixup.models.utils.batch_shuffle_ddp', 'batch_shuffle_ddp', (['gt_label'], {'idx_shuffle': 'idx_shuffle', 'no_repeat': '(True)'}), '(gt_label, idx_shuffle=idx_shuffle, no_repeat=True)\n', (8326, 8377), False, 'from openmixup.models.utils import batch_shuffle_ddp\n'), ((536, 553), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['w'], {}), '(w)\n', (550, 553), True, 'import numpy as np\n'), ((599, 616), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['w'], {}), '(w)\n', (613, 616), True, 'import numpy as np\n'), ((730, 747), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['z'], {}), '(z)\n', (744, 747), True, 'import numpy as np\n'), ((788, 805), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['z'], {}), '(z)\n', (802, 805), True, 'import numpy as np\n'), ((6744, 6766), 'torch.from_numpy', 'torch.from_numpy', (['mask'], {}), '(mask)\n', (6760, 6766), False, 'import torch\n')]
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import cv2
import mmcv
import numpy as np
try:
import imageio
except ImportError:
imageio = None
def parse_args():
parser = argparse.ArgumentParser(
description='Merge images and visualized flow')
parser.add_argument(
'--img_dir', type=str, default=None, help='directory of images')
parser.add_argument(
'--flow_dir',
type=str,
default=None,
help='directory of visualized flow')
parser.add_argument(
'--resize_factor',
type=float,
default=0.5,
help='resize factor for gif')
parser.add_argument(
'--out_dir',
type=str,
default=None,
help='directory to save merged results')
args = parser.parse_args()
return args
def merge_imgs_flow(img_dir: str, flow_dir: str, out_dir: str) -> None:
"""Load images and visualized flow maps and merge them.
Args:
img_dir ([str): The directory of images.
flow_dir (str): The directory of flow maps.
out_dir (str): The directory to save the frames
"""
img_files = list(mmcv.scandir(img_dir))
flow_files = list(mmcv.scandir(flow_dir))
img_files.sort()
flow_files.sort()
# img is longer than flow
for i in range(len(img_files) - 1):
img = mmcv.imread(osp.join(img_dir, img_files[i]))
flow = mmcv.imread(osp.join(flow_dir, flow_files[i]))
frame = np.concatenate((img, flow), axis=1)
cv2.imwrite(osp.join(out_dir, flow_files[i]), frame)
def main():
args = parse_args()
merge_imgs_flow(args.img_dir, args.flow_dir, args.out_dir)
if __name__ == '__main__':
main()
|
[
"os.path.join",
"numpy.concatenate",
"argparse.ArgumentParser",
"mmcv.scandir"
] |
[((226, 297), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Merge images and visualized flow"""'}), "(description='Merge images and visualized flow')\n", (249, 297), False, 'import argparse\n'), ((1182, 1203), 'mmcv.scandir', 'mmcv.scandir', (['img_dir'], {}), '(img_dir)\n', (1194, 1203), False, 'import mmcv\n'), ((1227, 1249), 'mmcv.scandir', 'mmcv.scandir', (['flow_dir'], {}), '(flow_dir)\n', (1239, 1249), False, 'import mmcv\n'), ((1501, 1536), 'numpy.concatenate', 'np.concatenate', (['(img, flow)'], {'axis': '(1)'}), '((img, flow), axis=1)\n', (1515, 1536), True, 'import numpy as np\n'), ((1390, 1421), 'os.path.join', 'osp.join', (['img_dir', 'img_files[i]'], {}), '(img_dir, img_files[i])\n', (1398, 1421), True, 'import os.path as osp\n'), ((1450, 1483), 'os.path.join', 'osp.join', (['flow_dir', 'flow_files[i]'], {}), '(flow_dir, flow_files[i])\n', (1458, 1483), True, 'import os.path as osp\n'), ((1558, 1590), 'os.path.join', 'osp.join', (['out_dir', 'flow_files[i]'], {}), '(out_dir, flow_files[i])\n', (1566, 1590), True, 'import os.path as osp\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: tshzzz
"""
import numpy as np
import torch
from src.utils import py_cpu_nms,bbox_iou
def gen_yolo_box(featmaps,anchor_wh):
#featmaps = [b,c,h,w]
output = np.zeros((featmaps[0], featmaps[1], len(anchor_wh), 4))
for i in range(featmaps[0]):
for j in range(featmaps[1]):
cx = (j ) #/ featmaps[0]
cy = (i ) #/ featmaps[1]
for k,(w,h) in enumerate(anchor_wh):
output[i,j,k,:] = [cx, cy, w , h ]
return output
class yolo_box_encoder(object):
def __init__(self,anchor,class_num,featmap_size):
# anchor B,13,13,5
self.anchor = gen_yolo_box(featmap_size,anchor)
self.class_num = class_num
self.featmap_size = featmap_size
self.boxes_num = len(anchor)
def __call__(self,bs):
#global tw_a,tw_b
# b,c,h,w -> b,c,x,y
bb_class = np.zeros((self.featmap_size[0],self.featmap_size[1],self.boxes_num,self.class_num))
bb_boxes = np.zeros((self.featmap_size[0], self.featmap_size[1], self.boxes_num, 4))
bb_conf = np.zeros((self.featmap_size[0],self.featmap_size[1],self.boxes_num,1))
for i in range(bs.shape[0]):
local_x = int(min(0.999, max(0, bs[i, 0] + bs[i, 2] / 2)) * (self.featmap_size[0]) )
local_y = int(min(0.999, max(0, bs[i, 1] + bs[i, 3] / 2)) * (self.featmap_size[1]) )
ious = []
for k in range(self.boxes_num):
temp_x,temp_y,temp_w,temp_h = self.anchor[local_y,local_x,k,:]
temp_w = temp_w / self.featmap_size[0]
temp_h = temp_h / self.featmap_size[1]
anchor_ = np.array([[0,0,temp_w,temp_h]])
gt = np.array([[0,0,bs[i,2],bs[i,3]]])
ious.append(bbox_iou(anchor_, gt)[0])
selected_ = np.argsort(ious)[::-1]
for kk,selected_anchor in enumerate(selected_):
if bb_conf[local_y,local_x, selected_anchor,0] == 0 and bs[i,2]>0.02 and bs[i,3]>0.02 :
tx = (bs[i, 0] + bs[i, 2] / 2) * self.featmap_size[0] \
- (self.anchor[local_y,local_x,selected_anchor,0] )
ty = (bs[i, 1] + bs[i, 3] / 2) * self.featmap_size[1] \
- (self.anchor[local_y,local_x,selected_anchor,1] )
tw = np.log(max(0.01,bs[i,2]* self.featmap_size[0] / self.anchor[local_y,local_x,selected_anchor,2]) )
th = np.log(max(0.01,bs[i,3]* self.featmap_size[1] / self.anchor[local_y,local_x,selected_anchor,3]) )
bb_boxes[local_y,local_x, selected_anchor,:] = np.array([tx,ty,tw,th])
#考虑背景 使用 softmax
#bb_class[local_x, local_y, selected_anchor,:] = 0
bb_class[local_y, local_x, selected_anchor, int(bs[i, 4])] = 1
bb_conf[local_y,local_x, selected_anchor,0] = 1
break
target = (bb_class,bb_conf,bb_boxes)
return target
class yolo_box_decoder(object):
def __init__(self, anchor, class_num,featmap_size,conf=0.05,nms_thresh=0.5):
self.class_num = class_num#
self.anchor = torch.from_numpy(gen_yolo_box(featmap_size, anchor)).float()
self.boxes_num = len(anchor)
self.featmap_size = featmap_size
self.conf_thresh = conf
self.nms_thresh = nms_thresh
def __call__(self, pred):
boxes = []
classes = []
pred_cls, pred_conf, pred_bboxes = pred
featmap_size = torch.Tensor([pred_cls.shape[1], pred_cls.shape[2]])
pred_cls = pred_cls.cpu().float().view(-1,self.class_num)
pred_conf = pred_conf.cpu().float().view(-1,1)
pred_bboxes = pred_bboxes.cpu().float().view(-1,4)
anchor = self.anchor.repeat(1, 1, 1, 1, 1).cpu().view(-1,4)
#找最anchor中置信度最高的
pred_mask = (pred_conf>self.conf_thresh).view(-1)
pred_bboxes = pred_bboxes[pred_mask]
pred_conf = pred_conf[pred_mask]
pred_cls = pred_cls[pred_mask]
anchor = anchor[pred_mask]
for cls in range(self.class_num):
cls_prob = pred_cls[:, cls].float() * pred_conf[:, 0]
mask_a = cls_prob.gt(self.conf_thresh)
bbox = pred_bboxes[mask_a]
anchor_ = anchor[mask_a]
cls_prob = cls_prob[mask_a]
if bbox.shape[0] > 0:
bbox[:, 2:4] = torch.exp(bbox[:, 2:4]) * anchor_[:, 2:4] / (featmap_size[0:2])
bbox[:, 0:2] = (bbox[:, 0:2] + (anchor_[:, 0:2]))/ (featmap_size[0:2]) - bbox[:, 2:4] / 2
#bbox[:, 0:2] = (bbox[:, 0:2] + (anchor_[:, 0:2])) - bbox[:, 2:4] / 2
pre_cls_box = bbox.data.numpy()
pre_cls_score = cls_prob.data.view(-1).numpy()
keep = py_cpu_nms(pre_cls_box, pre_cls_score, thresh=self.nms_thresh)
for conf_keep, loc_keep in zip(pre_cls_score[keep], pre_cls_box[keep]):
boxes.append(loc_keep)
classes.append([cls, conf_keep])
boxes = np.array(boxes)
classes = np.array(classes)
return boxes,classes
class single_decoder(object):
def __init__(self, anchor, class_num, featmap_size, conf=0.01):
self.class_num = class_num
self.anchor = torch.from_numpy(gen_yolo_box(featmap_size, anchor)).float()
self.boxes_num = len(anchor)
self.featmap_size = featmap_size
self.conf_thresh = conf
def __call__(self, pred):
pred_cls, pred_conf, pred_bboxes = pred
featmap_size = torch.Tensor([pred_cls.shape[1], pred_cls.shape[2]])
pred_cls = pred_cls.cpu().float().view(-1, self.class_num)
pred_conf = pred_conf.cpu().float().view(-1, 1)
pred_bboxes = pred_bboxes.cpu().float().view(-1, 4)
anchor = self.anchor.repeat(1, 1, 1, 1, 1).cpu().view(-1, 4)
# 找最anchor中置信度最高的
pred_mask = (pred_conf > self.conf_thresh).view(-1)
pred_bboxes = pred_bboxes[pred_mask]
pred_conf = pred_conf[pred_mask]
pred_cls = pred_cls[pred_mask]
anchor = anchor[pred_mask]
pred_bboxes[:, 2:4] = torch.exp(pred_bboxes[:, 2:4]) * anchor[:, 2:4] / (featmap_size[0:2])
pred_bboxes[:, 0:2] = (pred_bboxes[:, 0:2] + (anchor[:, 0:2]))/ (featmap_size[0:2]) - pred_bboxes[:, 2:4] / 2
return pred_cls, pred_conf, pred_bboxes
class group_decoder(object):
def __init__(self, anchor, class_num, featmap_size, conf=0.01, nms_thresh=0.5):
self.decoder = []
for i in range(len(anchor)):
self.decoder.append(single_decoder(anchor[i], class_num, featmap_size[i], conf))
self.class_num = class_num
self.conf_thresh = conf
self.nms_thresh = nms_thresh
def __call__(self, preds):
pred_cls = []
pred_conf = []
pred_bboxes = []
for pred,decoder in zip(preds,self.decoder):
cls,conf,bbox = decoder(pred)
pred_cls.append(cls)
pred_conf.append(conf)
pred_bboxes.append(bbox)
pred_cls = torch.cat([cls for cls in pred_cls])
pred_bboxes = torch.cat([bbox for bbox in pred_bboxes])
pred_conf = torch.cat([conf for conf in pred_conf])
boxes = []
classes = []
for cls in range(self.class_num):
cls_prob = pred_cls[:, cls].float() * pred_conf[:, 0]
mask_a = cls_prob.gt(self.conf_thresh)
bbox = pred_bboxes[mask_a]
cls_prob = cls_prob[mask_a]
iou_prob = pred_conf[mask_a]
if bbox.shape[0] > 0:
pre_cls_box = bbox.data.numpy()
pre_cls_score = cls_prob.data.view(-1).numpy()
iou_prob = iou_prob.data.view(-1).numpy()
keep = py_cpu_nms(pre_cls_box, pre_cls_score, thresh=self.nms_thresh)
for conf_keep, loc_keep in zip(pre_cls_score[keep], pre_cls_box[keep]):
boxes.append(loc_keep)
classes.append([cls, conf_keep])
boxes = np.array(boxes)
classes = np.array(classes)
return boxes, classes
class single_encoder(object):
def __init__(self, anchor, class_num, featmap_size):
# anchor B,13,13,5
self.anchor = gen_yolo_box(featmap_size, anchor)
self.class_num = class_num
self.featmap_size = featmap_size
self.boxes_num = len(anchor)
self.bb_class = np.zeros((self.featmap_size[0], self.featmap_size[1], self.boxes_num, self.class_num))
self.bb_boxes = np.zeros((self.featmap_size[0], self.featmap_size[1], self.boxes_num, 4))
self.bb_conf = np.zeros((self.featmap_size[0], self.featmap_size[1], self.boxes_num, 1))
def get_target(self):
return (self.bb_class,self.bb_conf,self.bb_boxes)
def clean_target(self):
self.bb_class = np.zeros((self.featmap_size[0], self.featmap_size[1], self.boxes_num, self.class_num))
self.bb_boxes = np.zeros((self.featmap_size[0], self.featmap_size[1], self.boxes_num, 4))
self.bb_conf = np.zeros((self.featmap_size[0], self.featmap_size[1], self.boxes_num, 1))
return
def __call__(self, bs):
local_x = int(min(0.999, max(0, bs[0] + bs[2] / 2)) * (self.featmap_size[0]))
local_y = int(min(0.999, max(0, bs[1] + bs[3] / 2)) * (self.featmap_size[1]))
ious = []
for k in range(self.boxes_num):
temp_x, temp_y, temp_w, temp_h = self.anchor[local_y, local_x, k, :]
temp_w = temp_w / self.featmap_size[0]
temp_h = temp_h / self.featmap_size[1]
anchor_ = np.array([[0, 0, temp_w, temp_h]])
gt = np.array([[0, 0, bs[2], bs[3]]])
ious.append(bbox_iou(anchor_, gt)[0])
selected_ = np.argsort(ious)[::-1]
for kk, selected_anchor in enumerate(selected_):
if self.bb_conf[local_y, local_x, selected_anchor, 0] == 0 and bs[2] > 0.02 and bs[3] > 0.02:
tx = (bs[0] + bs[2] / 2) * self.featmap_size[0] - (self.anchor[local_y, local_x, selected_anchor, 0])
ty = (bs[1] + bs[3] / 2) * self.featmap_size[1] - (self.anchor[local_y, local_x, selected_anchor, 1])
tw = np.log(max(0.01, bs[2] * self.featmap_size[0] / self.anchor[local_y, local_x, selected_anchor, 2]))
th = np.log(max(0.01, bs[3] * self.featmap_size[1] / self.anchor[local_y, local_x, selected_anchor, 3]))
self.bb_boxes[local_y, local_x, selected_anchor, :] = np.array([tx, ty, tw, th])
# 考虑背景 使用 softmax
self.bb_class[local_y, local_x, selected_anchor, int(bs[4])] = 1
self.bb_conf[local_y, local_x, selected_anchor, 0] = 1
break
return
class group_encoder(object):
def __init__(self, anchor, class_num, featmap_size):
# anchor B,13,13,5
self.anchor = anchor
self.class_num = class_num
self.featmap_size = featmap_size
self.boxes_num = len(anchor)
self.featmap_num = len(featmap_size)
self.encoder = []
for i in range(len(anchor)):
self.encoder.append(single_encoder(anchor[i], class_num, featmap_size[i]))
def __call__(self, bs):
# global tw_a,tw_b
# b,c,h,w -> b,c,x,y
for i in range(bs.shape[0]):
for encoder in self.encoder:
encoder(bs[i])
target = []
for encoder in self.encoder:
target.append(encoder.get_target())
for encoder in self.encoder:
encoder.clean_target()
return target
|
[
"numpy.zeros",
"torch.cat",
"numpy.argsort",
"torch.exp",
"torch.Tensor",
"numpy.array",
"src.utils.py_cpu_nms",
"src.utils.bbox_iou"
] |
[((936, 1027), 'numpy.zeros', 'np.zeros', (['(self.featmap_size[0], self.featmap_size[1], self.boxes_num, self.class_num)'], {}), '((self.featmap_size[0], self.featmap_size[1], self.boxes_num, self.\n class_num))\n', (944, 1027), True, 'import numpy as np\n'), ((1039, 1112), 'numpy.zeros', 'np.zeros', (['(self.featmap_size[0], self.featmap_size[1], self.boxes_num, 4)'], {}), '((self.featmap_size[0], self.featmap_size[1], self.boxes_num, 4))\n', (1047, 1112), True, 'import numpy as np\n'), ((1131, 1204), 'numpy.zeros', 'np.zeros', (['(self.featmap_size[0], self.featmap_size[1], self.boxes_num, 1)'], {}), '((self.featmap_size[0], self.featmap_size[1], self.boxes_num, 1))\n', (1139, 1204), True, 'import numpy as np\n'), ((3620, 3672), 'torch.Tensor', 'torch.Tensor', (['[pred_cls.shape[1], pred_cls.shape[2]]'], {}), '([pred_cls.shape[1], pred_cls.shape[2]])\n', (3632, 3672), False, 'import torch\n'), ((5170, 5185), 'numpy.array', 'np.array', (['boxes'], {}), '(boxes)\n', (5178, 5185), True, 'import numpy as np\n'), ((5204, 5221), 'numpy.array', 'np.array', (['classes'], {}), '(classes)\n', (5212, 5221), True, 'import numpy as np\n'), ((5683, 5735), 'torch.Tensor', 'torch.Tensor', (['[pred_cls.shape[1], pred_cls.shape[2]]'], {}), '([pred_cls.shape[1], pred_cls.shape[2]])\n', (5695, 5735), False, 'import torch\n'), ((7209, 7245), 'torch.cat', 'torch.cat', (['[cls for cls in pred_cls]'], {}), '([cls for cls in pred_cls])\n', (7218, 7245), False, 'import torch\n'), ((7268, 7309), 'torch.cat', 'torch.cat', (['[bbox for bbox in pred_bboxes]'], {}), '([bbox for bbox in pred_bboxes])\n', (7277, 7309), False, 'import torch\n'), ((7330, 7369), 'torch.cat', 'torch.cat', (['[conf for conf in pred_conf]'], {}), '([conf for conf in pred_conf])\n', (7339, 7369), False, 'import torch\n'), ((8186, 8201), 'numpy.array', 'np.array', (['boxes'], {}), '(boxes)\n', (8194, 8201), True, 'import numpy as np\n'), ((8220, 8237), 'numpy.array', 'np.array', (['classes'], {}), '(classes)\n', (8228, 8237), True, 'import numpy as np\n'), ((8584, 8675), 'numpy.zeros', 'np.zeros', (['(self.featmap_size[0], self.featmap_size[1], self.boxes_num, self.class_num)'], {}), '((self.featmap_size[0], self.featmap_size[1], self.boxes_num, self.\n class_num))\n', (8592, 8675), True, 'import numpy as np\n'), ((8695, 8768), 'numpy.zeros', 'np.zeros', (['(self.featmap_size[0], self.featmap_size[1], self.boxes_num, 4)'], {}), '((self.featmap_size[0], self.featmap_size[1], self.boxes_num, 4))\n', (8703, 8768), True, 'import numpy as np\n'), ((8792, 8865), 'numpy.zeros', 'np.zeros', (['(self.featmap_size[0], self.featmap_size[1], self.boxes_num, 1)'], {}), '((self.featmap_size[0], self.featmap_size[1], self.boxes_num, 1))\n', (8800, 8865), True, 'import numpy as np\n'), ((9005, 9096), 'numpy.zeros', 'np.zeros', (['(self.featmap_size[0], self.featmap_size[1], self.boxes_num, self.class_num)'], {}), '((self.featmap_size[0], self.featmap_size[1], self.boxes_num, self.\n class_num))\n', (9013, 9096), True, 'import numpy as np\n'), ((9116, 9189), 'numpy.zeros', 'np.zeros', (['(self.featmap_size[0], self.featmap_size[1], self.boxes_num, 4)'], {}), '((self.featmap_size[0], self.featmap_size[1], self.boxes_num, 4))\n', (9124, 9189), True, 'import numpy as np\n'), ((9213, 9286), 'numpy.zeros', 'np.zeros', (['(self.featmap_size[0], self.featmap_size[1], self.boxes_num, 1)'], {}), '((self.featmap_size[0], self.featmap_size[1], self.boxes_num, 1))\n', (9221, 9286), True, 'import numpy as np\n'), ((9768, 9802), 'numpy.array', 'np.array', (['[[0, 0, temp_w, temp_h]]'], {}), '([[0, 0, temp_w, temp_h]])\n', (9776, 9802), True, 'import numpy as np\n'), ((9820, 9852), 'numpy.array', 'np.array', (['[[0, 0, bs[2], bs[3]]]'], {}), '([[0, 0, bs[2], bs[3]]])\n', (9828, 9852), True, 'import numpy as np\n'), ((9924, 9940), 'numpy.argsort', 'np.argsort', (['ious'], {}), '(ious)\n', (9934, 9940), True, 'import numpy as np\n'), ((1717, 1751), 'numpy.array', 'np.array', (['[[0, 0, temp_w, temp_h]]'], {}), '([[0, 0, temp_w, temp_h]])\n', (1725, 1751), True, 'import numpy as np\n'), ((1770, 1808), 'numpy.array', 'np.array', (['[[0, 0, bs[i, 2], bs[i, 3]]]'], {}), '([[0, 0, bs[i, 2], bs[i, 3]]])\n', (1778, 1808), True, 'import numpy as np\n'), ((1884, 1900), 'numpy.argsort', 'np.argsort', (['ious'], {}), '(ious)\n', (1894, 1900), True, 'import numpy as np\n'), ((4906, 4968), 'src.utils.py_cpu_nms', 'py_cpu_nms', (['pre_cls_box', 'pre_cls_score'], {'thresh': 'self.nms_thresh'}), '(pre_cls_box, pre_cls_score, thresh=self.nms_thresh)\n', (4916, 4968), False, 'from src.utils import py_cpu_nms, bbox_iou\n'), ((6270, 6300), 'torch.exp', 'torch.exp', (['pred_bboxes[:, 2:4]'], {}), '(pred_bboxes[:, 2:4])\n', (6279, 6300), False, 'import torch\n'), ((7922, 7984), 'src.utils.py_cpu_nms', 'py_cpu_nms', (['pre_cls_box', 'pre_cls_score'], {'thresh': 'self.nms_thresh'}), '(pre_cls_box, pre_cls_score, thresh=self.nms_thresh)\n', (7932, 7984), False, 'from src.utils import py_cpu_nms, bbox_iou\n'), ((10663, 10689), 'numpy.array', 'np.array', (['[tx, ty, tw, th]'], {}), '([tx, ty, tw, th])\n', (10671, 10689), True, 'import numpy as np\n'), ((2718, 2744), 'numpy.array', 'np.array', (['[tx, ty, tw, th]'], {}), '([tx, ty, tw, th])\n', (2726, 2744), True, 'import numpy as np\n'), ((9877, 9898), 'src.utils.bbox_iou', 'bbox_iou', (['anchor_', 'gt'], {}), '(anchor_, gt)\n', (9885, 9898), False, 'from src.utils import py_cpu_nms, bbox_iou\n'), ((1832, 1853), 'src.utils.bbox_iou', 'bbox_iou', (['anchor_', 'gt'], {}), '(anchor_, gt)\n', (1840, 1853), False, 'from src.utils import py_cpu_nms, bbox_iou\n'), ((4514, 4537), 'torch.exp', 'torch.exp', (['bbox[:, 2:4]'], {}), '(bbox[:, 2:4])\n', (4523, 4537), False, 'import torch\n')]
|
#plots.py
import os
import pandas
import numpy as np
import matplotlib.pyplot as plt
#plots.py
# . . .
def plot_lines(df, linewidth = 1, figsize = (40,20),secondary_y = None, legend=True, pp = None, save_fig = False):
fig, ax = plt.subplots(figsize = figsize)
# If no secondary_y (axis), plot all variables at once
df.dropna(axis=0, how = "all").plot.line(linewidth = linewidth, ax = ax, secondary_y=secondary_y, legend = legend)
# Turn the text on the x-axis so that it reads vertically
ax.tick_params(axis='x', rotation=90)
# Get rid of tick lines perpendicular to the axis for aesthetic
ax.tick_params('both', length=0, which='both')
# transform y-axis values from sci notation to integers
vals = ax.get_yticks()
ax.set_yticklabels([round(x,2) for x in vals])
# format image filename
remove_chars = "[]:$'\\"
filename = str(list(df.keys()))
for char in remove_chars:
filename = filename.replace(char, "")
if save_fig:
try:
os.mkdir("plots")
except:
pass
plt.savefig("plots/" + filename[:50] + " line.png",
bbox_inches = "tight")
#[:50] + " line.png"
# save image if PdfPages object was passed
if pp != None: pp.savefig(fig, bbox_inches = "tight")
def plot_scatter(data, s = 75, figsize = (40, 20), save_fig = False, pp = None):
# Create plot for every unique pair of variables
df = data.copy()
for var1 in df:
for var2 in df:
if var1 != var2:
fig, ax = plt.subplots(figsize = figsize)
# Create list of years from index
# Year will be represented by color
if "Year" not in df.keys():
df["Year"] = [int(str(ind)[:4]) for ind in df.index]
df.plot.scatter(x = var1, y = var2, s = s, ax = ax,
c = "Year", cmap = "viridis")
# Turn the text on the x-axis so that it reads vertically
ax.tick_params(axis='x', rotation=90)
# Get rid of tick lines perpendicular to the axis for aesthetic
ax.tick_params('both', length=0, which='both')
# save image if PdfPages object was passed
if save_fig:
try:
os.mkdir("plots")
except:
pass
plt.savefig("plots/" + str(list(df.keys())).replace("[", "").replace("]","")[:40] + " scatter.png",
bbox_inches = "tight")
if pp != None: pp.savefig(fig, bbox_inches = "tight")
def corr_matrix_heatmap(df, save_fig = False, pp = None, title = "Correlation"):
#Create a figure to visualize a corr matrix
fig, ax = plt.subplots(figsize=(20,20))
# use ax.imshow() to create a heatmap of correlation values
# seismic mapping shows negative values as blue and positive values as red
im = ax.imshow(df, norm = plt.cm.colors.Normalize(-1,1), cmap = "seismic")
# create a list of labels, stacking each word in a label by replacing " "
# with "\n"
labels = df.keys()
num_vars = len(labels)
tick_labels = [lab.replace(" ", "\n") for lab in labels]
# adjust font size according to the number of variables visualized
tick_font_size = 120 / num_vars
val_font_size = 200 / num_vars
plt.rcParams.update({'font.size': tick_font_size})
# prepare space for label of each column
x_ticks = np.arange(num_vars)
# select labels and rotate them 90 degrees so that they are vertical
plt.xticks(x_ticks, tick_labels, fontsize = tick_font_size, rotation = 90)
# prepare space for label of each row
y_ticks = np.arange(len(labels))
# select labels
plt.yticks(y_ticks, tick_labels, fontsize = tick_font_size)
# show values in each tile of the heatmap
for i in range(len(labels)):
for j in range(len(labels)):
text = ax.text(i, j, str(round(df.values[i][j],2)),
fontsize= val_font_size, ha="center",
va="center", color = "w")
#Create title with Times New Roman Font
title_font = {"fontname":"Times New Roman"}
plt.title(title, fontsize = 50, **title_font)
#Call scale to show value of colors
cbar = fig.colorbar(im)
plt.show()
if save_fig:
try:
os.mkdir("plots")
except:
pass
plt.savefig("plots/" + str(list(df.keys())).replace("[", "").replace("]","")[:40] + " corrMatrix.png",
bbox_inches = "tight")
if pp != None: pp.savefig(fig, bbox_inches="tight")
plt.close()
def plot_stacked_lines(df, plot_vars, linewidth = 1,
figsize = (40, 20),
pp = None, total_var = False,
title = False):
fig, ax = plt.subplots(figsize = figsize)
# df.plot.area() created a stacked plot
df[plot_vars].plot.area(stacked = True, linewidth = linewidth,
ax = ax)
if total_var != False:
df[total_var].plot.line(linewidth = linewidth, ax = ax,
c = "k",label = total_var,
ls = "--")
# place legend in top left corner of plot
# format legend so that there are two columns of names
ax.legend(loc = 2, ncol = 2)
if title != False:
plt.title(title)
|
[
"matplotlib.pyplot.title",
"os.mkdir",
"matplotlib.pyplot.show",
"matplotlib.pyplot.close",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.rcParams.update",
"numpy.arange",
"matplotlib.pyplot.cm.colors.Normalize",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] |
[((239, 268), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (251, 268), True, 'import matplotlib.pyplot as plt\n'), ((2829, 2859), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (2841, 2859), True, 'import matplotlib.pyplot as plt\n'), ((3456, 3506), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': tick_font_size}"], {}), "({'font.size': tick_font_size})\n", (3475, 3506), True, 'import matplotlib.pyplot as plt\n'), ((3569, 3588), 'numpy.arange', 'np.arange', (['num_vars'], {}), '(num_vars)\n', (3578, 3588), True, 'import numpy as np\n'), ((3670, 3740), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x_ticks', 'tick_labels'], {'fontsize': 'tick_font_size', 'rotation': '(90)'}), '(x_ticks, tick_labels, fontsize=tick_font_size, rotation=90)\n', (3680, 3740), True, 'import matplotlib.pyplot as plt\n'), ((3856, 3913), 'matplotlib.pyplot.yticks', 'plt.yticks', (['y_ticks', 'tick_labels'], {'fontsize': 'tick_font_size'}), '(y_ticks, tick_labels, fontsize=tick_font_size)\n', (3866, 3913), True, 'import matplotlib.pyplot as plt\n'), ((4329, 4372), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(50)'}), '(title, fontsize=50, **title_font)\n', (4338, 4372), True, 'import matplotlib.pyplot as plt\n'), ((4450, 4460), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4458, 4460), True, 'import matplotlib.pyplot as plt\n'), ((4765, 4776), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4774, 4776), True, 'import matplotlib.pyplot as plt\n'), ((4980, 5009), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (4992, 5009), True, 'import matplotlib.pyplot as plt\n'), ((1093, 1165), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('plots/' + filename[:50] + ' line.png')"], {'bbox_inches': '"""tight"""'}), "('plots/' + filename[:50] + ' line.png', bbox_inches='tight')\n", (1104, 1165), True, 'import matplotlib.pyplot as plt\n'), ((5523, 5539), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (5532, 5539), True, 'import matplotlib.pyplot as plt\n'), ((1034, 1051), 'os.mkdir', 'os.mkdir', (['"""plots"""'], {}), "('plots')\n", (1042, 1051), False, 'import os\n'), ((3038, 3068), 'matplotlib.pyplot.cm.colors.Normalize', 'plt.cm.colors.Normalize', (['(-1)', '(1)'], {}), '(-1, 1)\n', (3061, 3068), True, 'import matplotlib.pyplot as plt\n'), ((4503, 4520), 'os.mkdir', 'os.mkdir', (['"""plots"""'], {}), "('plots')\n", (4511, 4520), False, 'import os\n'), ((1570, 1599), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (1582, 1599), True, 'import matplotlib.pyplot as plt\n'), ((2361, 2378), 'os.mkdir', 'os.mkdir', (['"""plots"""'], {}), "('plots')\n", (2369, 2378), False, 'import os\n')]
|
import numpy as np
import pytest
import astropy
import astropy.units as u
from astropy.tests.helper import quantity_allclose, assert_quantity_allclose
from astropy.coordinates import (SkyCoord, get_body_barycentric, Angle,
ConvertError, Longitude, CartesianRepresentation,
get_body_barycentric_posvel,
CartesianDifferential, SphericalDifferential)
# Versions of Astropy that do not have HeliocentricMeanEcliptic have the same frame
# with the misleading name HeliocentricTrueEcliptic
try:
from astropy.coordinates import HeliocentricMeanEcliptic
except ImportError:
from astropy.coordinates import HeliocentricTrueEcliptic as HeliocentricMeanEcliptic
from astropy.time import Time
from sunpy.coordinates import (Helioprojective, HeliographicStonyhurst,
HeliographicCarrington, Heliocentric,
HeliocentricEarthEcliptic, GeocentricSolarEcliptic,
HeliocentricInertial, GeocentricEarthEquatorial,
get_earth)
from sunpy.coordinates import sun
from sunpy.coordinates.frames import _J2000
from sunpy.coordinates.transformations import transform_with_sun_center
from sunpy.time import parse_time
def test_hcc_to_hgs():
'''
Check that a coordinate pointing to the observer in Heliocentric
coordinates maps to the lattitude/longitude of the observer in
HeliographicStonyhurst coordinates.
'''
lat = 10 * u.deg
lon = 20 * u.deg
observer = HeliographicStonyhurst(lat=lat, lon=lon)
hcc_in = Heliocentric(x=0*u.km, y=0*u.km, z=1*u.km, observer=observer)
hgs_out = hcc_in.transform_to(HeliographicStonyhurst)
assert_quantity_allclose(hgs_out.lat, lat)
assert_quantity_allclose(hgs_out.lon, lon)
def test_hpc_hpc():
# Use some unphysical values for solar parameters for testing, to make it
# easier to calculate expected results.
rsun = 1*u.m
D0 = 1*u.km
L0 = 1*u.deg
observer_in = HeliographicStonyhurst(lat=0*u.deg, lon=0*u.deg, radius=D0)
observer_out = HeliographicStonyhurst(lat=0*u.deg, lon=L0, radius=D0)
hpc_in = Helioprojective(0*u.arcsec, 0*u.arcsec, rsun=rsun, observer=observer_in)
hpc_out = Helioprojective(observer=observer_out, rsun=rsun)
hpc_new = hpc_in.transform_to(hpc_out)
assert hpc_new.observer == hpc_out.observer
# Calculate the distance subtended by an angle of L0 from the centre of the
# Sun.
dd = -1 * rsun * np.tan(L0)
# Calculate the angle corresponding to that distance as seen by the new
# observer.
theta = np.arctan2(dd, (D0 - rsun))
assert quantity_allclose(theta, hpc_new.Tx, rtol=1e-3)
def test_hpc_hpc_sc():
# Use some unphysical values for solar parameters for testing, to make it
# easier to calculate expected results.
rsun = 1*u.m
D0 = 1*u.km
L0 = 1*u.deg
observer_in = HeliographicStonyhurst(lat=0*u.deg, lon=0*u.deg, radius=D0)
observer_out = HeliographicStonyhurst(lat=0*u.deg, lon=L0, radius=D0)
sc_in = SkyCoord(0*u.arcsec, 0*u.arcsec, rsun=rsun, observer=observer_in,
frame='helioprojective')
hpc_out = Helioprojective(observer=observer_out, rsun=rsun)
hpc_new = sc_in.transform_to(hpc_out)
assert hpc_new.observer.lat == hpc_out.observer.lat
assert hpc_new.observer.lon == hpc_out.observer.lon
assert hpc_new.observer.radius == hpc_out.observer.radius
def test_hpc_hpc_null():
hpc_in = Helioprojective(0*u.arcsec, 0*u.arcsec)
hpc_out = Helioprojective()
hpc_new = hpc_in.transform_to(hpc_out)
assert hpc_new is not hpc_in
assert quantity_allclose(hpc_new.Tx, hpc_in.Tx)
assert quantity_allclose(hpc_new.Ty, hpc_in.Ty)
assert hpc_out.observer == hpc_new.observer
def test_hcrs_hgs():
# Get the current Earth location in HCRS
adate = parse_time('2015/05/01 01:13:00')
earth_hcrs = SkyCoord(get_body_barycentric('earth', adate), frame='icrs', obstime=adate).hcrs
# Convert from HCRS to HGS
earth_hgs = earth_hcrs.transform_to(HeliographicStonyhurst)
# The HGS longitude of the Earth should be zero within numerical error
# Due to an issue with wrapping at +-360, we shift it to pass the test.
assert quantity_allclose((earth_hgs.lon+1*u.deg) % (360*u.deg), 1*u.deg, atol=1e-12*u.deg)
# The HGS latitude and radius should be within valid ranges
assert quantity_allclose(earth_hgs.lat, 0*u.deg, atol=7.3*u.deg)
assert quantity_allclose(earth_hgs.radius, 1*u.AU, atol=0.017*u.AU)
def test_hcrs_hgs_array_obstime():
# Get the Earth location in HCRS at two times
times = Time(['2017-01-01', '2017-06-01'])
earth_hcrs = SkyCoord(get_body_barycentric('earth', times), frame='icrs', obstime=times).hcrs
# Transform each time in separate calls (uses scalar obstime)
earth_hgs_0 = earth_hcrs[0].transform_to(HeliographicStonyhurst)
earth_hgs_1 = earth_hcrs[1].transform_to(HeliographicStonyhurst)
# Transform both times in one call (uses array obstime)
earth_hgs = earth_hcrs.transform_to(HeliographicStonyhurst)
# Confirm that the two approaches produce the same results
assert quantity_allclose(earth_hgs_0.lon, earth_hgs[0].lon, atol=1e-12*u.deg)
assert quantity_allclose(earth_hgs_0.lat, earth_hgs[0].lat, rtol=1e-10)
assert quantity_allclose(earth_hgs_0.radius, earth_hgs[0].radius, rtol=1e-10)
assert quantity_allclose(earth_hgs_1.lon, earth_hgs[1].lon, atol=1e-12*u.deg)
assert quantity_allclose(earth_hgs_1.lat, earth_hgs[1].lat, rtol=1e-10)
assert quantity_allclose(earth_hgs_1.radius, earth_hgs[1].radius, rtol=1e-10)
def test_hgs_hcrs():
# This test checks the HGS->HCRS transformation by transforming from HGS to
# HeliocentricMeanEcliptic (HME). It will fail if there are errors in Astropy's
# HCRS->ICRS or ICRS->HME transformations.
# Use published HGS coordinates in the Astronomical Almanac (2013), pages C6-C7
obstime = Time('2013-01-28')
earth_hgs = SkyCoord(0*u.deg, -5.73*u.deg, 0.9848139*u.AU, frame=HeliographicStonyhurst,
obstime=obstime)
# Transform to HME at observation-time equinox
earth_hme = earth_hgs.transform_to(HeliocentricMeanEcliptic(equinox=obstime))
# Validate against published values from the Astronomical Almanac (2013), page C6 per page E2
# The dominant source of inaccuracy is the limited precision of the published B0 used above
assert quantity_allclose(earth_hme.lon, Angle('308d13m30.51s') - 180*u.deg, atol=5*u.arcsec)
assert quantity_allclose(earth_hme.lat, -Angle('-0.27s'), atol=10*u.arcsec)
assert quantity_allclose(earth_hme.distance, 0.9848139*u.AU, atol=5e-7*u.AU)
def test_hgs_hgc_roundtrip():
obstime = "2011-01-01"
hgsin = HeliographicStonyhurst(lat=10*u.deg, lon=20*u.deg, obstime=obstime)
hgcout = hgsin.transform_to(HeliographicCarrington(obstime=obstime))
assert_quantity_allclose(hgsin.lat, hgcout.lat)
assert_quantity_allclose(hgsin.lon + sun.L0(obstime), hgcout.lon)
hgsout = hgcout.transform_to(HeliographicStonyhurst(obstime=obstime))
assert_quantity_allclose(hgsout.lat, hgsin.lat)
assert_quantity_allclose(hgsout.lon, hgsin.lon)
def test_hgs_cartesian_rep_to_hpc():
# This test checks transformation HGS->HPC when the coordinate is in a Cartesian
# representation and that it is the same as a transformation from an HGS frame with a
# spherical representation
obstime = "2011-01-01"
hgscoord_cart = SkyCoord(x=1*u.km, y=0.*u.km, z=0.*u.km,
frame=HeliographicStonyhurst(obstime=obstime),
representation_type='cartesian')
hpc_frame = Helioprojective(observer='earth', obstime=obstime)
hgscoord_sph = hgscoord_cart.copy()
hgscoord_sph.representation_type = 'spherical'
hpccoord_cart = hgscoord_cart.transform_to(hpc_frame)
hpccoord_sph = hgscoord_sph.transform_to(hpc_frame)
assert_quantity_allclose(hpccoord_cart.Tx, hpccoord_sph.Tx)
assert_quantity_allclose(hpccoord_cart.Ty, hpccoord_sph.Ty)
assert_quantity_allclose(hpccoord_cart.distance, hpccoord_sph.distance)
def test_hgs_cartesian_rep_to_hcc():
# This test checks transformation HGS->HCC when the coordinate is in a Cartesian
# representation and that it is the same as a transformation from an HGS frame with a
# spherical representation
obstime = "2011-01-01"
hgscoord_cart = SkyCoord(x=1*u.km, y=0.*u.km, z=0.*u.km,
frame=HeliographicStonyhurst(obstime=obstime),
representation_type='cartesian')
hcc_frame = Heliocentric(observer='earth', obstime=obstime)
hgscoord_sph = hgscoord_cart.copy()
hgscoord_sph.representation_type = 'spherical'
hcccoord_cart = hgscoord_cart.transform_to(hcc_frame)
hcccoord_sph = hgscoord_sph.transform_to(hcc_frame)
assert_quantity_allclose(hcccoord_cart.x, hcccoord_sph.x)
assert_quantity_allclose(hcccoord_cart.y, hcccoord_sph.y)
assert_quantity_allclose(hcccoord_cart.z, hcccoord_sph.z)
def test_hgs_cartesian_rep_to_hgc():
# This test checks transformation HGS->HCC when the coordinate is in a Cartesian
# representation and that it is the same as a transformation from an HGS frame with a
# spherical representation
obstime = "2011-01-01"
hgscoord_cart = SkyCoord(x=1*u.km, y=0.*u.km, z=0.*u.km,
frame=HeliographicStonyhurst(obstime=obstime),
representation_type='cartesian')
hgscoord_sph = hgscoord_cart.copy()
hgscoord_sph.representation_type = 'spherical'
# HGC
hgccoord_cart = hgscoord_cart.transform_to(HeliographicCarrington(obstime=obstime))
hgccoord_sph = hgscoord_sph.transform_to(HeliographicCarrington(obstime=obstime))
assert_quantity_allclose(hgccoord_cart.lat, hgccoord_sph.lat)
assert_quantity_allclose(hgccoord_cart.lon, hgccoord_sph.lon)
assert_quantity_allclose(hgccoord_cart.radius, hgccoord_sph.radius)
def test_hcc_to_hpc_different_observer():
# This test checks transformation HCC->HPC in the case where the HCC and HPC frames are
# defined by different observers.
rsun = 1*u.m
D0 = 1*u.km
L0 = 1*u.deg
observer_1 = HeliographicStonyhurst(lat=0*u.deg, lon=0*u.deg, radius=D0)
observer_2 = HeliographicStonyhurst(lat=0*u.deg, lon=L0, radius=D0)
hcc_frame = Heliocentric(observer=observer_1)
hpc_frame = Helioprojective(observer=observer_2)
hcccoord = SkyCoord(x=rsun, y=rsun, z=rsun, frame=hcc_frame)
hpccoord_out = hcccoord.transform_to(hpc_frame)
hpccoord_expected = hcccoord.transform_to(HeliographicStonyhurst).transform_to(hpc_frame)
assert_quantity_allclose(hpccoord_out.Tx, hpccoord_expected.Tx)
assert_quantity_allclose(hpccoord_out.Ty, hpccoord_expected.Ty)
assert_quantity_allclose(hpccoord_out.distance, hpccoord_expected.distance)
def test_hpc_to_hcc_different_observer():
# This test checks transformation HPC->HCC in the case where the HCC and HPC frames are
# defined by different observers.
rsun = 1*u.m
D0 = 1*u.km
L0 = 1*u.deg
observer_1 = HeliographicStonyhurst(lat=0*u.deg, lon=0*u.deg, radius=D0)
observer_2 = HeliographicStonyhurst(lat=0*u.deg, lon=L0, radius=D0)
hcc_frame = Heliocentric(observer=observer_1)
hpc_frame = Helioprojective(observer=observer_2, rsun=rsun)
hpccoord = SkyCoord(Tx=0*u.arcsec, Ty=0*u.arcsec, frame=hpc_frame)
hcccoord_out = hpccoord.transform_to(hcc_frame)
hcccoord_expected = hpccoord.transform_to(HeliographicStonyhurst).transform_to(hcc_frame)
assert_quantity_allclose(hcccoord_out.x, hcccoord_expected.x)
assert_quantity_allclose(hcccoord_out.y, hcccoord_expected.y)
assert_quantity_allclose(hcccoord_out.z, hcccoord_expected.z)
def test_hcc_to_hpc_same_observer():
# This test checks transformation HCC->HPC in the case of same observer
rsun = 1*u.m
D0 = 1*u.km
observer = HeliographicStonyhurst(lat=0*u.deg, lon=0*u.deg, radius=D0)
hcc_frame = Heliocentric(observer=observer)
hpc_frame = Helioprojective(observer=observer, rsun=rsun)
hcccoord = SkyCoord(x=rsun, y=rsun, z=rsun, frame=hcc_frame)
hpccoord_out = hcccoord.transform_to(hpc_frame)
hpccoord_expected = hcccoord.transform_to(HeliographicStonyhurst).transform_to(hpc_frame)
assert_quantity_allclose(hpccoord_out.Tx, hpccoord_expected.Tx)
assert_quantity_allclose(hpccoord_out.Ty, hpccoord_expected.Ty)
assert_quantity_allclose(hpccoord_out.distance, hpccoord_expected.distance)
def test_hpc_to_hcc_same_observer():
# This test checks transformation HPC->HCC in the case of same observer
rsun = 1*u.m
D0 = 1 * u.km
observer = HeliographicStonyhurst(lat=0 * u.deg, lon=0 * u.deg, radius=D0)
hcc_frame = Heliocentric(observer=observer)
hpc_frame = Helioprojective(observer=observer, rsun=rsun)
hpccoord = SkyCoord(Tx=0 * u.arcsec, Ty=0 * u.arcsec, frame=hpc_frame)
hcccoord_out = hpccoord.transform_to(hcc_frame)
hcccoord_expected = hpccoord.transform_to(HeliographicStonyhurst).transform_to(hcc_frame)
assert_quantity_allclose(hcccoord_out.x, hcccoord_expected.x)
assert_quantity_allclose(hcccoord_out.y, hcccoord_expected.y)
assert_quantity_allclose(hcccoord_out.z, hcccoord_expected.z)
def test_hpc_hcc_different_observer_radius():
# Tests HPC->HCC with a change in observer at different distances from the Sun
observer1 = HeliographicStonyhurst(0*u.deg, 0*u.deg, 1*u.AU)
hpc = Helioprojective(0*u.arcsec, 0*u.arcsec, 0.5*u.AU, observer=observer1)
observer2 = HeliographicStonyhurst(90*u.deg, 0*u.deg, 0.75*u.AU)
hcc = hpc.transform_to(Heliocentric(observer=observer2))
assert_quantity_allclose(hcc.x, -0.5*u.AU)
assert_quantity_allclose(hcc.y, 0*u.AU, atol=1e-10*u.AU)
assert_quantity_allclose(hcc.z, 0*u.AU, atol=1e-10*u.AU)
def test_hgs_hgs():
# Test HGS loopback transformation
obstime = Time('2001-01-01')
old = SkyCoord(90*u.deg, 10*u.deg, 1*u.AU, frame=HeliographicStonyhurst(obstime=obstime))
new = old.transform_to(HeliographicStonyhurst(obstime=obstime + 1*u.day))
assert_quantity_allclose(new.lon, old.lon - 1*u.deg, atol=0.1*u.deg) # due to Earth motion
assert_quantity_allclose(new.lat, old.lat, atol=1e-3*u.deg)
assert_quantity_allclose(new.radius, old.radius, atol=1e-5*u.AU)
def test_hgc_hgc():
# Test HGC loopback transformation
obstime = Time('2001-01-01')
old = SkyCoord(90*u.deg, 10*u.deg, 1*u.AU, frame=HeliographicCarrington(obstime=obstime))
new = old.transform_to(HeliographicCarrington(obstime=obstime + 1*u.day))
assert_quantity_allclose(new.lon, 75.815607 * u.deg, atol=1e-7*u.deg) # solar rotation
# These are not equal to the old values, because the coordinates stay fixed
# in inertial space, whilst the frame (fixed to the center of the Sun)
# moves slightly.
assert_quantity_allclose(new.lat, 9.999963 * u.deg, atol=1e-7*u.deg)
assert_quantity_allclose(new.radius, 1.000009 * u.AU, atol=1e-7*u.AU)
def test_hcc_hcc():
# Test same observer and changing obstime
observer = HeliographicStonyhurst(0*u.deg, 0*u.deg, 1*u.AU, obstime='2001-02-01')
from_hcc = Heliocentric(0.2*u.AU, 0.3*u.AU, 0.4*u.AU, observer=observer, obstime='2001-01-01')
to_hcc = from_hcc.transform_to(Heliocentric(observer=observer, obstime='2001-03-31'))
# Since the observer is the same, the coordinates should be nearly the same but not exactly
# equal due to motion of the origin (the Sun)
assert np.all(from_hcc.cartesian.xyz != to_hcc.cartesian.xyz)
assert_quantity_allclose(from_hcc.cartesian.xyz, to_hcc.cartesian.xyz, rtol=2e-3)
# Test changing observer and same obstime
observer1 = HeliographicStonyhurst(0*u.deg, 0*u.deg, 1*u.AU, obstime='2001-01-01')
observer2 = HeliographicStonyhurst(0*u.deg, 0*u.deg, 1*u.AU, obstime='2001-03-31')
from_hcc = Heliocentric(0.2*u.AU, 0.3*u.AU, 0.4*u.AU, observer=observer1, obstime='2001-02-01')
to_hcc = from_hcc.transform_to(Heliocentric(observer=observer2, obstime='2001-02-01'))
# This change in observer is approximately a 90-degree rotation about the Y axis
assert_quantity_allclose(to_hcc.x, -from_hcc.z, rtol=2e-3)
assert_quantity_allclose(to_hcc.y, from_hcc.y, rtol=2e-3)
assert_quantity_allclose(to_hcc.z, from_hcc.x, rtol=2e-3)
def test_hcc_hgs_observer_mismatch():
# Test whether the transformation gives the same answer regardless of what obstime the observer
# coordinate is represented in
observer1 = HeliographicStonyhurst(0*u.deg, 0*u.deg, 1*u.AU, obstime='2001-01-01')
observer2 = observer1.transform_to(HeliographicStonyhurst(obstime='2001-03-31'))
hcc1 = Heliocentric(0.2*u.AU, 0.3*u.AU, 0.4*u.AU, observer=observer1, obstime=observer1.obstime)
hgs1 = hcc1.transform_to(HeliographicStonyhurst(obstime=hcc1.obstime))
hcc2 = Heliocentric(0.2*u.AU, 0.3*u.AU, 0.4*u.AU, observer=observer2, obstime=observer1.obstime)
hgs2 = hcc2.transform_to(HeliographicStonyhurst(obstime=hcc2.obstime))
assert_quantity_allclose(hgs1.lon, hgs2.lon)
assert_quantity_allclose(hgs1.lat, hgs2.lat)
assert_quantity_allclose(hgs1.radius, hgs2.radius)
def test_hgs_hcc_observer_mismatch():
# Test whether the transformation gives the same answer regardless of what obstime the observer
# coordinate is represented in
observer1 = HeliographicStonyhurst(0*u.deg, 0*u.deg, 1*u.AU, obstime='2001-01-01')
observer2 = observer1.transform_to(HeliographicStonyhurst(obstime='2001-03-31'))
hgs = HeliographicStonyhurst(20*u.deg, 40*u.deg, 0.5*u.AU, obstime=observer1.obstime)
hcc1 = hgs.transform_to(Heliocentric(observer=observer1, obstime=hgs.obstime))
hcc2 = hgs.transform_to(Heliocentric(observer=observer2, obstime=hgs.obstime))
assert_quantity_allclose(hcc1.cartesian.xyz, hcc2.cartesian.xyz)
def test_hgs_hcrs_sunspice():
# Compare our HGS->HCRS transformation against SunSPICE by transforming beyond it
# "HEQ" is another name for HEEQ, which is equivalent to Heliographic Stonyhurst
# "HAE" is equivalent to Astropy's Heliocentric Mean Ecliptic, and defaults to J2000.0
#
# IDL> coord = [1.d, 0.d, 10.d]
# IDL> convert_sunspice_lonlat, '2019-06-01', coord, 'HEQ', 'HAE', /au, /degrees
# IDL> print, coord
# 1.0000000 -108.65371 10.642778
old = SkyCoord(0*u.deg, 10*u.deg, 1*u.AU, frame=HeliographicStonyhurst(obstime='2019-06-01'))
new = old.transform_to(HeliocentricMeanEcliptic)
assert_quantity_allclose(new.lon, Longitude(-108.65371*u.deg), atol=0.1*u.arcsec, rtol=0)
assert_quantity_allclose(new.lat, 10.642778*u.deg, atol=0.1*u.arcsec, rtol=0)
assert_quantity_allclose(new.distance, old.radius)
# Transform to HAE precessed to the mean ecliptic of date instead of J2000.0
# IDL> coord = [1.d, 0.d, 10.d]
# IDL> convert_sunspice_lonlat, '2019-06-01', coord, 'HEQ', 'HAE', /precess, /au, /degrees
# IDL> print, coord
# 1.0000000 -108.38240 10.640314
new = old.transform_to(HeliocentricMeanEcliptic(equinox='2019-06-01'))
assert_quantity_allclose(new.lon, Longitude(-108.38240*u.deg), atol=0.1*u.arcsec, rtol=0)
assert_quantity_allclose(new.lat, 10.640314*u.deg, atol=0.1*u.arcsec, rtol=0)
assert_quantity_allclose(new.distance, old.radius)
def test_hgs_hgc_sunspice():
# Compare our HGS->HGC transformation against SunSPICE
# "HEQ" is another name for HEEQ, which is equivalent to Heliographic Stonyhurst
# "Carrington" is offset by 0.076 degrees in longitude from our Heliographic Carrington (HGC)
# because "Carrington" does not include light travel time to the observer, while our
# HGC includes the light travel time to Earth (see Seidelmann et al. 2007).
#
# IDL> coord = [1.d, 0.d, 10.d]
# IDL> convert_sunspice_lonlat, '2019-06-01', coord, 'HEQ', 'Carrington', /au, /degrees
# IDL> print, coord
# 1.0000000 16.688242 10.000000
old = SkyCoord(0*u.deg, 10*u.deg, 1*u.AU, frame=HeliographicStonyhurst(obstime='2019-06-01'))
new = old.heliographic_carrington
assert_quantity_allclose(new.lon, 16.688242*u.deg + 0.076*u.deg, atol=1e-2*u.arcsec, rtol=0)
assert_quantity_allclose(new.lat, old.lat)
assert_quantity_allclose(new.radius, old.radius)
def test_hgs_hcc_sunspice():
# Compare our HGS->HCC transformation against SunSPICE
# "HEQ" is another name for HEEQ, which is equivalent to Heliographic Stonyhurst
# "HGRTN" is equivalent to our Heliocentric, but with the axes permuted
# SunSPICE, like us, assumes an Earth observer if not explicitly specified
#
# IDL> coord = [7d5, 8d5, 9d5]
# IDL> convert_sunspice_coord, '2019-06-01', coord, 'HEQ', 'HGRTN'
# Assuming Earth observation
# IDL> print, coord
# 688539.32 800000.00 908797.89
old = SkyCoord(CartesianRepresentation([7e5, 8e5, 9e5]*u.km),
frame=HeliographicStonyhurst(obstime='2019-06-01'))
new = old.transform_to(Heliocentric(observer='earth'))
assert_quantity_allclose(new.x, 800000.00*u.km, atol=1e-2*u.km)
assert_quantity_allclose(new.y, 908797.89*u.km, atol=1e-2*u.km)
assert_quantity_allclose(new.z, 688539.32*u.km, atol=1e-2*u.km)
def test_hpc_hgs_implicit_hcc():
# An HPC->HGS transformation should give the same answer whether the transformation step
# through HCC is implicit or explicit
start = SkyCoord(0*u.arcsec, 0*u.arcsec, 0.5*u.AU,
frame=Helioprojective(obstime='2019-06-01', observer='earth'))
frame = HeliographicStonyhurst(obstime='2019-12-01')
implicit = start.transform_to(frame)
explicit1 = start.transform_to(Heliocentric(obstime=start.obstime, observer='earth')).\
transform_to(frame)
explicit2 = start.transform_to(Heliocentric(obstime=frame.obstime, observer='earth')).\
transform_to(frame)
assert_quantity_allclose(implicit.separation_3d(explicit1), 0*u.AU, atol=1e-10*u.AU)
assert_quantity_allclose(implicit.separation_3d(explicit2), 0*u.AU, atol=1e-10*u.AU)
@pytest.mark.skipif(astropy.__version__ < '3.2.0', reason="Not supported by Astropy <3.2")
def test_velocity_hcrs_hgs():
# Obtain the position/velocity of Earth in ICRS
obstime = Time(['2019-01-01', '2019-04-01', '2019-07-01', '2019-10-01'])
pos, vel = get_body_barycentric_posvel('earth', obstime)
loc = pos.with_differentials(vel.represent_as(CartesianDifferential))
earth = SkyCoord(loc, frame='icrs', obstime=obstime)
# The velocity of Earth in HGS should be very close to zero. The velocity in the HGS Y
# direction is slightly further away from zero because there is true latitudinal motion.
new = earth.heliographic_stonyhurst
assert_quantity_allclose(new.velocity.d_x, 0*u.km/u.s, atol=1e-15*u.km/u.s)
assert_quantity_allclose(new.velocity.d_y, 0*u.km/u.s, atol=1e-14*u.km/u.s)
assert_quantity_allclose(new.velocity.d_x, 0*u.km/u.s, atol=1e-15*u.km/u.s)
# Test the loopback to ICRS
newer = new.icrs
assert_quantity_allclose(newer.velocity.d_x, vel.x)
assert_quantity_allclose(newer.velocity.d_y, vel.y)
assert_quantity_allclose(newer.velocity.d_z, vel.z)
def test_velocity_hgs_hgc():
# Construct a simple HGS coordinate with zero velocity
obstime = Time(['2019-01-01', '2019-04-01', '2019-07-01', '2019-10-01'])
pos = CartesianRepresentation(1, 0, 0)*u.AU
vel = CartesianDifferential(0, 0, 0)*u.km/u.s
loc = (pos.with_differentials(vel))._apply('repeat', obstime.size)
coord = SkyCoord(HeliographicStonyhurst(loc, obstime=obstime))
# The induced velocity in HGC should be entirely longitudinal, and approximately equal to one
# full rotation every mean synodic period (27.2753 days)
new = coord.heliographic_carrington
new_vel = new.data.differentials['s'].represent_as(SphericalDifferential, new.data)
assert_quantity_allclose(new_vel.d_lon, -360*u.deg / (27.27253*u.day), rtol=1e-2)
assert_quantity_allclose(new_vel.d_lat, 0*u.deg/u.s)
assert_quantity_allclose(new_vel.d_distance, 0*u.km/u.s, atol=1e-7*u.km/u.s)
def test_hme_hee_sunspice():
# Compare our HME->HEE transformation against SunSPICE
# "HAE" is equivalent to Astropy's Heliocentric Mean Ecliptic, and defaults to J2000.0
#
# IDL> coord = [1.d, 0.d, 10.d]
# IDL> convert_sunspice_lonlat, '2019-06-01', coord, 'HAE', 'HEE', /au, /degrees
# IDL> print, coord
# 1.0000000 110.01610 10.000300
old = SkyCoord(0*u.deg, 10*u.deg, 1*u.AU, frame=HeliocentricMeanEcliptic(obstime='2019-06-01'))
new = old.transform_to(HeliocentricEarthEcliptic)
assert_quantity_allclose(new.lon, Longitude(110.01610*u.deg), atol=0.01*u.arcsec, rtol=0)
assert_quantity_allclose(new.lat, 10.000300*u.deg, atol=0.01*u.arcsec, rtol=0)
assert_quantity_allclose(new.distance, old.distance)
# Transform from HAE precessed to the mean ecliptic of date instead of J2000.0
# IDL> coord = [1.d, 0.d, 10.d]
# IDL> convert_sunspice_lonlat, '2019-06-01', coord, 'HAE', 'HEE', /au, /degrees, /precess
# IDL> print, coord
# 1.0000000 109.74535 10.000070
old = SkyCoord(0*u.deg, 10*u.deg, 1*u.AU, frame=HeliocentricMeanEcliptic(obstime='2019-06-01',
equinox='2019-06-01'))
new = old.transform_to(HeliocentricEarthEcliptic)
assert_quantity_allclose(new.lon, Longitude(109.74535*u.deg), atol=0.05*u.arcsec, rtol=0)
assert_quantity_allclose(new.lat, 10.000070*u.deg, atol=0.01*u.arcsec, rtol=0)
assert_quantity_allclose(new.distance, old.distance)
def test_hee_hee():
# Test HEE loopback transformation
obstime = Time('2001-01-01')
old = SkyCoord(90*u.deg, 10*u.deg, 1*u.AU, frame=HeliocentricEarthEcliptic(obstime=obstime))
new = old.transform_to(HeliocentricEarthEcliptic)
assert_quantity_allclose(new.lon, old.lon)
assert_quantity_allclose(new.lat, old.lat)
assert_quantity_allclose(new.distance, old.distance)
new = old.transform_to(HeliocentricEarthEcliptic(obstime=obstime + 1*u.day))
assert_quantity_allclose(new.lon, old.lon - 1*u.deg, atol=0.1*u.deg) # due to Earth motion
assert_quantity_allclose(new.lat, old.lat, atol=0.5*u.arcsec)
assert_quantity_allclose(new.distance, old.distance, rtol=1e-5)
def test_hee_gse_sunspice():
# Compare our HEE->GSE transformation against SunSPICE
#
# IDL> coord = [0.7d, -20.d, 10.d]
# IDL> convert_sunspice_coord, '2019-06-01', coord, 'HEE', 'GSE', /au, /degrees
# IDL> print, coord
# 0.45215884 32.777377 15.594639
old = SkyCoord(-20*u.deg, 10*u.deg, 0.7*u.AU,
frame=HeliocentricEarthEcliptic(obstime='2019-06-01'))
new = old.geocentricsolarecliptic
assert_quantity_allclose(new.lon, 32.777377*u.deg, atol=0.01*u.arcsec, rtol=0)
assert_quantity_allclose(new.lat, 15.594639*u.deg, atol=0.01*u.arcsec, rtol=0)
assert_quantity_allclose(new.distance, 0.45215884*u.AU)
def test_gse_gse():
# Test GSE loopback transformation
old = SkyCoord(90*u.deg, 10*u.deg, 0.7*u.AU,
frame=GeocentricSolarEcliptic(obstime='2001-01-01'))
new = old.transform_to(GeocentricSolarEcliptic)
assert_quantity_allclose(new.lon, old.lon)
assert_quantity_allclose(new.lat, old.lat)
assert_quantity_allclose(new.distance, old.distance)
def test_hgs_hci_sunspice():
# Compare our HGS->HCI transformation against SunSPICE
# "HEQ" is another name for HEEQ, which is equivalent to Heliographic Stonyhurst
#
# IDL> coord = [1.d, 120.d, 10.d]
# IDL> convert_sunspice_lonlat, '2019-06-01', coord, 'HEQ', 'HCI', /au, /degrees
# IDL> print, coord
# 1.0000000 -65.736793 10.000000
old = SkyCoord(120*u.deg, 10*u.deg, 1*u.AU, frame=HeliographicStonyhurst(obstime='2019-06-01'))
new = old.transform_to(HeliocentricInertial)
assert_quantity_allclose(new.lon, -65.736793*u.deg, atol=0.5*u.arcsec, rtol=0)
assert_quantity_allclose(new.lat, old.lat)
assert_quantity_allclose(new.distance, old.radius)
def test_hci_hci():
# Test HCI loopback transformation
obstime = Time('2001-01-01')
old = SkyCoord(90*u.deg, 10*u.deg, 0.7*u.AU, frame=HeliocentricInertial(obstime=obstime))
new = old.transform_to(HeliocentricInertial)
assert_quantity_allclose(new.lon, old.lon)
assert_quantity_allclose(new.lat, old.lat)
assert_quantity_allclose(new.distance, old.distance)
new = old.transform_to(HeliocentricInertial(obstime=obstime + 1*u.day))
assert_quantity_allclose(new.lon, old.lon, atol=0.1*u.deg) # due to Earth motion
assert_quantity_allclose(new.lat, old.lat, atol=1e-3*u.deg)
assert_quantity_allclose(new.distance, old.distance, atol=1e-5*u.AU)
def test_hme_gei_sunspice():
# Compare our HME->GEI transformation against SunSPICE
# "HAE" is equivalent to Astropy's Heliocentric Mean Ecliptic, and defaults to J2000.0
#
# IDL> coord = [1.d, 120.d, 10.d]
# IDL> convert_sunspice_lonlat, '2019-06-01', coord, 'HAE', 'GEI', /au, /degrees
# IDL> print, coord
# 1.8197210 95.230617 28.830109
old = SkyCoord(120*u.deg, 10*u.deg, 1*u.AU,
frame=HeliocentricMeanEcliptic(obstime='2019-06-01'))
new = old.transform_to(GeocentricEarthEquatorial)
assert_quantity_allclose(new.lon, Longitude(95.230617*u.deg), atol=0.01*u.arcsec, rtol=0)
assert_quantity_allclose(new.lat, 28.830109*u.deg, atol=0.05*u.arcsec, rtol=0)
assert_quantity_allclose(new.distance, 1.8197210*u.AU)
# Transform from HAE precessed to the mean ecliptic of date instead of J2000.0
# IDL> coord = [1.d, 120.d, 10.d]
# IDL> convert_sunspice_lonlat, '2019-06-01', coord, 'HAE', 'GEI', /au, /degrees, /precess
# IDL> print, coord
# 1.8217103 95.079030 28.827750
old = SkyCoord(120*u.deg, 10*u.deg, 1*u.AU,
frame=HeliocentricMeanEcliptic(obstime='2019-06-01', equinox='2019-06-01'))
new = old.transform_to(GeocentricEarthEquatorial(equinox=_J2000))
assert_quantity_allclose(new.lon, Longitude(95.079030*u.deg), atol=0.05*u.arcsec, rtol=0)
assert_quantity_allclose(new.lat, 28.827750*u.deg, atol=0.05*u.arcsec, rtol=0)
assert_quantity_allclose(new.distance, 1.8217103*u.AU)
def test_gei_gei():
# Test GEI loopback transformation using the 2017 revision to Franz & Harper 2002
t = Time('1996-08-28 16:46:00', scale='tt')
gei_j2000 = CartesianRepresentation([-5.7840451, -4.1082375, 1.9146822] * (6378.14*u.km))
gei_d = CartesianRepresentation([-5.7864918, -4.1039136, 1.9165612] * (6378.14*u.km))
old = SkyCoord(gei_j2000, frame=GeocentricEarthEquatorial(obstime=t))
new = old.transform_to(GeocentricEarthEquatorial(equinox=t, obstime=t)).cartesian
assert_quantity_allclose(new.xyz, gei_d.xyz)
def test_no_observer():
# Tests transformations to and from observer-based frames with no observer defined
frames_in = [Heliocentric(0*u.km, 0*u.km, 0*u.km, observer=None),
Heliocentric(0*u.km, 0*u.km, 0*u.km, observer=None, obstime='2001-01-01'),
Helioprojective(0*u.deg, 0*u.deg, observer=None),
Helioprojective(0*u.deg, 0*u.deg, observer=None, obstime='2001-01-01')]
frames_out = frames_in + [
HeliographicStonyhurst(0*u.deg, 0*u.deg, obstime=None),
HeliographicStonyhurst(0*u.deg, 0*u.deg, obstime='2001-01-01'),
Heliocentric(0*u.km, 0*u.km, 0*u.km, observer=None, obstime='2012-12-12'),
Heliocentric(0*u.km, 0*u.km, 0*u.km, observer="earth", obstime=None),
Heliocentric(0*u.km, 0*u.km, 0*u.km, observer="earth", obstime='2001-01-01'),
Helioprojective(0*u.deg, 0*u.deg, observer=None, obstime='2012-12-12'),
Helioprojective(0*u.deg, 0*u.deg, observer="earth", obstime=None),
Helioprojective(0*u.deg, 0*u.deg, observer="earth", obstime='2001-01-01')]
# Self-transformations should succeed
for f in frames_in:
f.transform_to(f.replicate_without_data())
# All other transformations should error
for i, f1 in enumerate(frames_in):
for f2 in frames_out[i + 1:]:
with pytest.raises(ConvertError):
f1.transform_to(f2)
with pytest.raises(ConvertError):
f2.transform_to(f1)
def test_array_obstime():
# Validate that you can transform from an array of obstimes to no obstimes,
# or different obstimes.
a = SkyCoord([10]*2, [10]*2, unit=u.deg,
observer="earth",
obstime=["2019-01-01", "2019-01-02"],
frame="heliographic_carrington")
t = a.transform_to(Helioprojective)
assert isinstance(t.frame, Helioprojective)
t2 = a.transform_to(Helioprojective(obstime=["2019-01-03", "2019-01-04"]))
assert isinstance(t2.frame, Helioprojective)
_frameset1 = [HeliographicStonyhurst, HeliographicCarrington, HeliocentricInertial]
_frameset2 = [Heliocentric, Helioprojective]
@pytest.mark.parametrize("start_class", _frameset1 + _frameset2)
@pytest.mark.parametrize("end_class", _frameset1)
def test_no_obstime_on_one_end(start_class, end_class):
start_obstime = Time("2001-01-01")
if hasattr(start_class, 'observer'):
coord = start_class(CartesianRepresentation(0, 0, 0)*u.km,
obstime=start_obstime, observer="earth")
else:
coord = start_class(CartesianRepresentation(0, 0, 0)*u.km, obstime=start_obstime)
result = coord.transform_to(end_class)
assert result.obstime == start_obstime
def test_transform_with_sun_center():
sun_center = SkyCoord(0*u.deg, 0*u.deg, 0*u.AU,
frame=HeliographicStonyhurst(obstime="2001-01-01"))
with transform_with_sun_center():
result1 = sun_center.transform_to(HeliographicStonyhurst(obstime="2001-02-01"))
# The coordinate should stay pointing at Sun center
assert_quantity_allclose(result1.lon, sun_center.lon)
assert_quantity_allclose(result1.lat, sun_center.lat)
assert_quantity_allclose(result1.radius, sun_center.radius)
other = SkyCoord(10*u.deg, 20*u.deg, 1*u.AU,
frame=HeliographicStonyhurst(obstime="2001-01-01"))
with transform_with_sun_center():
result2 = other.transform_to(HeliographicCarrington(obstime="2001-02-01"))
# The coordinate should stay at the same latitude and the same distance from Sun center
assert_quantity_allclose(result2.lat, other.lat)
assert_quantity_allclose(result2.radius, other.radius)
def test_transform_with_sun_center_reset():
# This test sequence ensures that the context manager does not change anything permanently
sun_center = SkyCoord(0*u.deg, 0*u.deg, 0*u.AU,
frame=HeliographicStonyhurst(obstime="2001-01-01"))
end_frame = HeliocentricInertial(obstime="2001-02-01")
# Without the context manager, the coordinate should not point at Sun center
result1 = sun_center.transform_to(end_frame)
assert result1.lon != sun_center.lon
assert result1.lat != sun_center.lat
assert result1.distance != sun_center.radius
# Using the context manager, the coordinate should point at Sun center
with transform_with_sun_center():
result2 = sun_center.transform_to(end_frame)
assert_quantity_allclose(result2.lon, sun_center.lon)
assert_quantity_allclose(result2.lat, sun_center.lat)
assert_quantity_allclose(result2.distance, sun_center.radius)
# After the context manager, the coordinate should have the same result as the first transform
result3 = sun_center.transform_to(end_frame)
assert_quantity_allclose(result3.lon, result1.lon)
assert_quantity_allclose(result3.lat, result1.lat)
assert_quantity_allclose(result3.distance, result1.distance)
|
[
"astropy.coordinates.Longitude",
"numpy.arctan2",
"astropy.coordinates.get_body_barycentric_posvel",
"sunpy.coordinates.HeliocentricInertial",
"sunpy.coordinates.Heliocentric",
"pytest.mark.skipif",
"astropy.coordinates.CartesianRepresentation",
"sunpy.coordinates.transformations.transform_with_sun_center",
"pytest.mark.parametrize",
"astropy.coordinates.Angle",
"astropy.coordinates.HeliocentricTrueEcliptic",
"pytest.raises",
"numpy.tan",
"sunpy.coordinates.GeocentricEarthEquatorial",
"astropy.tests.helper.quantity_allclose",
"sunpy.time.parse_time",
"astropy.coordinates.get_body_barycentric",
"astropy.time.Time",
"astropy.tests.helper.assert_quantity_allclose",
"sunpy.coordinates.HeliocentricEarthEcliptic",
"sunpy.coordinates.GeocentricSolarEcliptic",
"sunpy.coordinates.sun.L0",
"numpy.all",
"astropy.coordinates.CartesianDifferential",
"sunpy.coordinates.HeliographicCarrington",
"sunpy.coordinates.Helioprojective",
"sunpy.coordinates.HeliographicStonyhurst",
"astropy.coordinates.SkyCoord"
] |
[((22357, 22451), 'pytest.mark.skipif', 'pytest.mark.skipif', (["(astropy.__version__ < '3.2.0')"], {'reason': '"""Not supported by Astropy <3.2"""'}), "(astropy.__version__ < '3.2.0', reason=\n 'Not supported by Astropy <3.2')\n", (22375, 22451), False, 'import pytest\n'), ((33417, 33480), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""start_class"""', '(_frameset1 + _frameset2)'], {}), "('start_class', _frameset1 + _frameset2)\n", (33440, 33480), False, 'import pytest\n'), ((33482, 33530), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""end_class"""', '_frameset1'], {}), "('end_class', _frameset1)\n", (33505, 33530), False, 'import pytest\n'), ((1595, 1635), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'lat': 'lat', 'lon': 'lon'}), '(lat=lat, lon=lon)\n', (1617, 1635), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((1649, 1716), 'sunpy.coordinates.Heliocentric', 'Heliocentric', ([], {'x': '(0 * u.km)', 'y': '(0 * u.km)', 'z': '(1 * u.km)', 'observer': 'observer'}), '(x=0 * u.km, y=0 * u.km, z=1 * u.km, observer=observer)\n', (1661, 1716), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((1774, 1816), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hgs_out.lat', 'lat'], {}), '(hgs_out.lat, lat)\n', (1798, 1816), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((1821, 1863), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hgs_out.lon', 'lon'], {}), '(hgs_out.lon, lon)\n', (1845, 1863), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((2076, 2139), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'lat': '(0 * u.deg)', 'lon': '(0 * u.deg)', 'radius': 'D0'}), '(lat=0 * u.deg, lon=0 * u.deg, radius=D0)\n', (2098, 2139), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((2155, 2211), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'lat': '(0 * u.deg)', 'lon': 'L0', 'radius': 'D0'}), '(lat=0 * u.deg, lon=L0, radius=D0)\n', (2177, 2211), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((2224, 2300), 'sunpy.coordinates.Helioprojective', 'Helioprojective', (['(0 * u.arcsec)', '(0 * u.arcsec)'], {'rsun': 'rsun', 'observer': 'observer_in'}), '(0 * u.arcsec, 0 * u.arcsec, rsun=rsun, observer=observer_in)\n', (2239, 2300), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((2311, 2360), 'sunpy.coordinates.Helioprojective', 'Helioprojective', ([], {'observer': 'observer_out', 'rsun': 'rsun'}), '(observer=observer_out, rsun=rsun)\n', (2326, 2360), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((2682, 2707), 'numpy.arctan2', 'np.arctan2', (['dd', '(D0 - rsun)'], {}), '(dd, D0 - rsun)\n', (2692, 2707), True, 'import numpy as np\n'), ((2722, 2770), 'astropy.tests.helper.quantity_allclose', 'quantity_allclose', (['theta', 'hpc_new.Tx'], {'rtol': '(0.001)'}), '(theta, hpc_new.Tx, rtol=0.001)\n', (2739, 2770), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((2985, 3048), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'lat': '(0 * u.deg)', 'lon': '(0 * u.deg)', 'radius': 'D0'}), '(lat=0 * u.deg, lon=0 * u.deg, radius=D0)\n', (3007, 3048), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((3064, 3120), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'lat': '(0 * u.deg)', 'lon': 'L0', 'radius': 'D0'}), '(lat=0 * u.deg, lon=L0, radius=D0)\n', (3086, 3120), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((3132, 3231), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(0 * u.arcsec)', '(0 * u.arcsec)'], {'rsun': 'rsun', 'observer': 'observer_in', 'frame': '"""helioprojective"""'}), "(0 * u.arcsec, 0 * u.arcsec, rsun=rsun, observer=observer_in, frame\n ='helioprojective')\n", (3140, 3231), False, 'from astropy.coordinates import SkyCoord, get_body_barycentric, Angle, ConvertError, Longitude, CartesianRepresentation, get_body_barycentric_posvel, CartesianDifferential, SphericalDifferential\n'), ((3258, 3307), 'sunpy.coordinates.Helioprojective', 'Helioprojective', ([], {'observer': 'observer_out', 'rsun': 'rsun'}), '(observer=observer_out, rsun=rsun)\n', (3273, 3307), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((3566, 3609), 'sunpy.coordinates.Helioprojective', 'Helioprojective', (['(0 * u.arcsec)', '(0 * u.arcsec)'], {}), '(0 * u.arcsec, 0 * u.arcsec)\n', (3581, 3609), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((3620, 3637), 'sunpy.coordinates.Helioprojective', 'Helioprojective', ([], {}), '()\n', (3635, 3637), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((3727, 3767), 'astropy.tests.helper.quantity_allclose', 'quantity_allclose', (['hpc_new.Tx', 'hpc_in.Tx'], {}), '(hpc_new.Tx, hpc_in.Tx)\n', (3744, 3767), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((3779, 3819), 'astropy.tests.helper.quantity_allclose', 'quantity_allclose', (['hpc_new.Ty', 'hpc_in.Ty'], {}), '(hpc_new.Ty, hpc_in.Ty)\n', (3796, 3819), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((3948, 3981), 'sunpy.time.parse_time', 'parse_time', (['"""2015/05/01 01:13:00"""'], {}), "('2015/05/01 01:13:00')\n", (3958, 3981), False, 'from sunpy.time import parse_time\n'), ((4339, 4436), 'astropy.tests.helper.quantity_allclose', 'quantity_allclose', (['((earth_hgs.lon + 1 * u.deg) % (360 * u.deg))', '(1 * u.deg)'], {'atol': '(1e-12 * u.deg)'}), '((earth_hgs.lon + 1 * u.deg) % (360 * u.deg), 1 * u.deg,\n atol=1e-12 * u.deg)\n', (4356, 4436), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((4499, 4560), 'astropy.tests.helper.quantity_allclose', 'quantity_allclose', (['earth_hgs.lat', '(0 * u.deg)'], {'atol': '(7.3 * u.deg)'}), '(earth_hgs.lat, 0 * u.deg, atol=7.3 * u.deg)\n', (4516, 4560), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((4568, 4632), 'astropy.tests.helper.quantity_allclose', 'quantity_allclose', (['earth_hgs.radius', '(1 * u.AU)'], {'atol': '(0.017 * u.AU)'}), '(earth_hgs.radius, 1 * u.AU, atol=0.017 * u.AU)\n', (4585, 4632), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((4728, 4762), 'astropy.time.Time', 'Time', (["['2017-01-01', '2017-06-01']"], {}), "(['2017-01-01', '2017-06-01'])\n", (4732, 4762), False, 'from astropy.time import Time\n'), ((5266, 5338), 'astropy.tests.helper.quantity_allclose', 'quantity_allclose', (['earth_hgs_0.lon', 'earth_hgs[0].lon'], {'atol': '(1e-12 * u.deg)'}), '(earth_hgs_0.lon, earth_hgs[0].lon, atol=1e-12 * u.deg)\n', (5283, 5338), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((5348, 5412), 'astropy.tests.helper.quantity_allclose', 'quantity_allclose', (['earth_hgs_0.lat', 'earth_hgs[0].lat'], {'rtol': '(1e-10)'}), '(earth_hgs_0.lat, earth_hgs[0].lat, rtol=1e-10)\n', (5365, 5412), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((5424, 5494), 'astropy.tests.helper.quantity_allclose', 'quantity_allclose', (['earth_hgs_0.radius', 'earth_hgs[0].radius'], {'rtol': '(1e-10)'}), '(earth_hgs_0.radius, earth_hgs[0].radius, rtol=1e-10)\n', (5441, 5494), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((5506, 5578), 'astropy.tests.helper.quantity_allclose', 'quantity_allclose', (['earth_hgs_1.lon', 'earth_hgs[1].lon'], {'atol': '(1e-12 * u.deg)'}), '(earth_hgs_1.lon, earth_hgs[1].lon, atol=1e-12 * u.deg)\n', (5523, 5578), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((5588, 5652), 'astropy.tests.helper.quantity_allclose', 'quantity_allclose', (['earth_hgs_1.lat', 'earth_hgs[1].lat'], {'rtol': '(1e-10)'}), '(earth_hgs_1.lat, earth_hgs[1].lat, rtol=1e-10)\n', (5605, 5652), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((5664, 5734), 'astropy.tests.helper.quantity_allclose', 'quantity_allclose', (['earth_hgs_1.radius', 'earth_hgs[1].radius'], {'rtol': '(1e-10)'}), '(earth_hgs_1.radius, earth_hgs[1].radius, rtol=1e-10)\n', (5681, 5734), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((6069, 6087), 'astropy.time.Time', 'Time', (['"""2013-01-28"""'], {}), "('2013-01-28')\n", (6073, 6087), False, 'from astropy.time import Time\n'), ((6104, 6208), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(0 * u.deg)', '(-5.73 * u.deg)', '(0.9848139 * u.AU)'], {'frame': 'HeliographicStonyhurst', 'obstime': 'obstime'}), '(0 * u.deg, -5.73 * u.deg, 0.9848139 * u.AU, frame=\n HeliographicStonyhurst, obstime=obstime)\n', (6112, 6208), False, 'from astropy.coordinates import SkyCoord, get_body_barycentric, Angle, ConvertError, Longitude, CartesianRepresentation, get_body_barycentric_posvel, CartesianDifferential, SphericalDifferential\n'), ((6740, 6814), 'astropy.tests.helper.quantity_allclose', 'quantity_allclose', (['earth_hme.distance', '(0.9848139 * u.AU)'], {'atol': '(5e-07 * u.AU)'}), '(earth_hme.distance, 0.9848139 * u.AU, atol=5e-07 * u.AU)\n', (6757, 6814), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((6882, 6953), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'lat': '(10 * u.deg)', 'lon': '(20 * u.deg)', 'obstime': 'obstime'}), '(lat=10 * u.deg, lon=20 * u.deg, obstime=obstime)\n', (6904, 6953), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((7028, 7075), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hgsin.lat', 'hgcout.lat'], {}), '(hgsin.lat, hgcout.lat)\n', (7052, 7075), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((7226, 7273), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hgsout.lat', 'hgsin.lat'], {}), '(hgsout.lat, hgsin.lat)\n', (7250, 7273), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((7278, 7325), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hgsout.lon', 'hgsin.lon'], {}), '(hgsout.lon, hgsin.lon)\n', (7302, 7325), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((7814, 7864), 'sunpy.coordinates.Helioprojective', 'Helioprojective', ([], {'observer': '"""earth"""', 'obstime': 'obstime'}), "(observer='earth', obstime=obstime)\n", (7829, 7864), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((8074, 8133), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hpccoord_cart.Tx', 'hpccoord_sph.Tx'], {}), '(hpccoord_cart.Tx, hpccoord_sph.Tx)\n', (8098, 8133), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((8138, 8197), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hpccoord_cart.Ty', 'hpccoord_sph.Ty'], {}), '(hpccoord_cart.Ty, hpccoord_sph.Ty)\n', (8162, 8197), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((8202, 8273), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hpccoord_cart.distance', 'hpccoord_sph.distance'], {}), '(hpccoord_cart.distance, hpccoord_sph.distance)\n', (8226, 8273), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((8762, 8809), 'sunpy.coordinates.Heliocentric', 'Heliocentric', ([], {'observer': '"""earth"""', 'obstime': 'obstime'}), "(observer='earth', obstime=obstime)\n", (8774, 8809), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((9019, 9076), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hcccoord_cart.x', 'hcccoord_sph.x'], {}), '(hcccoord_cart.x, hcccoord_sph.x)\n', (9043, 9076), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((9081, 9138), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hcccoord_cart.y', 'hcccoord_sph.y'], {}), '(hcccoord_cart.y, hcccoord_sph.y)\n', (9105, 9138), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((9143, 9200), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hcccoord_cart.z', 'hcccoord_sph.z'], {}), '(hcccoord_cart.z, hcccoord_sph.z)\n', (9167, 9200), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((9952, 10013), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hgccoord_cart.lat', 'hgccoord_sph.lat'], {}), '(hgccoord_cart.lat, hgccoord_sph.lat)\n', (9976, 10013), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((10018, 10079), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hgccoord_cart.lon', 'hgccoord_sph.lon'], {}), '(hgccoord_cart.lon, hgccoord_sph.lon)\n', (10042, 10079), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((10084, 10151), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hgccoord_cart.radius', 'hgccoord_sph.radius'], {}), '(hgccoord_cart.radius, hgccoord_sph.radius)\n', (10108, 10151), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((10394, 10457), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'lat': '(0 * u.deg)', 'lon': '(0 * u.deg)', 'radius': 'D0'}), '(lat=0 * u.deg, lon=0 * u.deg, radius=D0)\n', (10416, 10457), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((10471, 10527), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'lat': '(0 * u.deg)', 'lon': 'L0', 'radius': 'D0'}), '(lat=0 * u.deg, lon=L0, radius=D0)\n', (10493, 10527), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((10542, 10575), 'sunpy.coordinates.Heliocentric', 'Heliocentric', ([], {'observer': 'observer_1'}), '(observer=observer_1)\n', (10554, 10575), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((10592, 10628), 'sunpy.coordinates.Helioprojective', 'Helioprojective', ([], {'observer': 'observer_2'}), '(observer=observer_2)\n', (10607, 10628), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((10644, 10693), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'x': 'rsun', 'y': 'rsun', 'z': 'rsun', 'frame': 'hcc_frame'}), '(x=rsun, y=rsun, z=rsun, frame=hcc_frame)\n', (10652, 10693), False, 'from astropy.coordinates import SkyCoord, get_body_barycentric, Angle, ConvertError, Longitude, CartesianRepresentation, get_body_barycentric_posvel, CartesianDifferential, SphericalDifferential\n'), ((10844, 10907), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hpccoord_out.Tx', 'hpccoord_expected.Tx'], {}), '(hpccoord_out.Tx, hpccoord_expected.Tx)\n', (10868, 10907), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((10912, 10975), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hpccoord_out.Ty', 'hpccoord_expected.Ty'], {}), '(hpccoord_out.Ty, hpccoord_expected.Ty)\n', (10936, 10975), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((10980, 11055), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hpccoord_out.distance', 'hpccoord_expected.distance'], {}), '(hpccoord_out.distance, hpccoord_expected.distance)\n', (11004, 11055), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((11298, 11361), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'lat': '(0 * u.deg)', 'lon': '(0 * u.deg)', 'radius': 'D0'}), '(lat=0 * u.deg, lon=0 * u.deg, radius=D0)\n', (11320, 11361), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((11375, 11431), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'lat': '(0 * u.deg)', 'lon': 'L0', 'radius': 'D0'}), '(lat=0 * u.deg, lon=L0, radius=D0)\n', (11397, 11431), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((11446, 11479), 'sunpy.coordinates.Heliocentric', 'Heliocentric', ([], {'observer': 'observer_1'}), '(observer=observer_1)\n', (11458, 11479), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((11496, 11543), 'sunpy.coordinates.Helioprojective', 'Helioprojective', ([], {'observer': 'observer_2', 'rsun': 'rsun'}), '(observer=observer_2, rsun=rsun)\n', (11511, 11543), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((11559, 11618), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'Tx': '(0 * u.arcsec)', 'Ty': '(0 * u.arcsec)', 'frame': 'hpc_frame'}), '(Tx=0 * u.arcsec, Ty=0 * u.arcsec, frame=hpc_frame)\n', (11567, 11618), False, 'from astropy.coordinates import SkyCoord, get_body_barycentric, Angle, ConvertError, Longitude, CartesianRepresentation, get_body_barycentric_posvel, CartesianDifferential, SphericalDifferential\n'), ((11765, 11826), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hcccoord_out.x', 'hcccoord_expected.x'], {}), '(hcccoord_out.x, hcccoord_expected.x)\n', (11789, 11826), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((11831, 11892), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hcccoord_out.y', 'hcccoord_expected.y'], {}), '(hcccoord_out.y, hcccoord_expected.y)\n', (11855, 11892), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((11897, 11958), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hcccoord_out.z', 'hcccoord_expected.z'], {}), '(hcccoord_out.z, hcccoord_expected.z)\n', (11921, 11958), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((12123, 12186), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'lat': '(0 * u.deg)', 'lon': '(0 * u.deg)', 'radius': 'D0'}), '(lat=0 * u.deg, lon=0 * u.deg, radius=D0)\n', (12145, 12186), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((12199, 12230), 'sunpy.coordinates.Heliocentric', 'Heliocentric', ([], {'observer': 'observer'}), '(observer=observer)\n', (12211, 12230), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((12247, 12292), 'sunpy.coordinates.Helioprojective', 'Helioprojective', ([], {'observer': 'observer', 'rsun': 'rsun'}), '(observer=observer, rsun=rsun)\n', (12262, 12292), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((12308, 12357), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'x': 'rsun', 'y': 'rsun', 'z': 'rsun', 'frame': 'hcc_frame'}), '(x=rsun, y=rsun, z=rsun, frame=hcc_frame)\n', (12316, 12357), False, 'from astropy.coordinates import SkyCoord, get_body_barycentric, Angle, ConvertError, Longitude, CartesianRepresentation, get_body_barycentric_posvel, CartesianDifferential, SphericalDifferential\n'), ((12508, 12571), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hpccoord_out.Tx', 'hpccoord_expected.Tx'], {}), '(hpccoord_out.Tx, hpccoord_expected.Tx)\n', (12532, 12571), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((12576, 12639), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hpccoord_out.Ty', 'hpccoord_expected.Ty'], {}), '(hpccoord_out.Ty, hpccoord_expected.Ty)\n', (12600, 12639), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((12644, 12719), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hpccoord_out.distance', 'hpccoord_expected.distance'], {}), '(hpccoord_out.distance, hpccoord_expected.distance)\n', (12668, 12719), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((12886, 12949), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'lat': '(0 * u.deg)', 'lon': '(0 * u.deg)', 'radius': 'D0'}), '(lat=0 * u.deg, lon=0 * u.deg, radius=D0)\n', (12908, 12949), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((12966, 12997), 'sunpy.coordinates.Heliocentric', 'Heliocentric', ([], {'observer': 'observer'}), '(observer=observer)\n', (12978, 12997), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((13014, 13059), 'sunpy.coordinates.Helioprojective', 'Helioprojective', ([], {'observer': 'observer', 'rsun': 'rsun'}), '(observer=observer, rsun=rsun)\n', (13029, 13059), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((13075, 13134), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'Tx': '(0 * u.arcsec)', 'Ty': '(0 * u.arcsec)', 'frame': 'hpc_frame'}), '(Tx=0 * u.arcsec, Ty=0 * u.arcsec, frame=hpc_frame)\n', (13083, 13134), False, 'from astropy.coordinates import SkyCoord, get_body_barycentric, Angle, ConvertError, Longitude, CartesianRepresentation, get_body_barycentric_posvel, CartesianDifferential, SphericalDifferential\n'), ((13285, 13346), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hcccoord_out.x', 'hcccoord_expected.x'], {}), '(hcccoord_out.x, hcccoord_expected.x)\n', (13309, 13346), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((13351, 13412), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hcccoord_out.y', 'hcccoord_expected.y'], {}), '(hcccoord_out.y, hcccoord_expected.y)\n', (13375, 13412), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((13417, 13478), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hcccoord_out.z', 'hcccoord_expected.z'], {}), '(hcccoord_out.z, hcccoord_expected.z)\n', (13441, 13478), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((13626, 13680), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', (['(0 * u.deg)', '(0 * u.deg)', '(1 * u.AU)'], {}), '(0 * u.deg, 0 * u.deg, 1 * u.AU)\n', (13648, 13680), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((13685, 13760), 'sunpy.coordinates.Helioprojective', 'Helioprojective', (['(0 * u.arcsec)', '(0 * u.arcsec)', '(0.5 * u.AU)'], {'observer': 'observer1'}), '(0 * u.arcsec, 0 * u.arcsec, 0.5 * u.AU, observer=observer1)\n', (13700, 13760), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((13772, 13830), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', (['(90 * u.deg)', '(0 * u.deg)', '(0.75 * u.AU)'], {}), '(90 * u.deg, 0 * u.deg, 0.75 * u.AU)\n', (13794, 13830), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((13891, 13935), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hcc.x', '(-0.5 * u.AU)'], {}), '(hcc.x, -0.5 * u.AU)\n', (13915, 13935), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((13938, 13998), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hcc.y', '(0 * u.AU)'], {'atol': '(1e-10 * u.AU)'}), '(hcc.y, 0 * u.AU, atol=1e-10 * u.AU)\n', (13962, 13998), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((13999, 14059), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hcc.z', '(0 * u.AU)'], {'atol': '(1e-10 * u.AU)'}), '(hcc.z, 0 * u.AU, atol=1e-10 * u.AU)\n', (14023, 14059), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((14131, 14149), 'astropy.time.Time', 'Time', (['"""2001-01-01"""'], {}), "('2001-01-01')\n", (14135, 14149), False, 'from astropy.time import Time\n'), ((14327, 14399), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.lon', '(old.lon - 1 * u.deg)'], {'atol': '(0.1 * u.deg)'}), '(new.lon, old.lon - 1 * u.deg, atol=0.1 * u.deg)\n', (14351, 14399), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((14423, 14485), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.lat', 'old.lat'], {'atol': '(0.001 * u.deg)'}), '(new.lat, old.lat, atol=0.001 * u.deg)\n', (14447, 14485), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((14487, 14554), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.radius', 'old.radius'], {'atol': '(1e-05 * u.AU)'}), '(new.radius, old.radius, atol=1e-05 * u.AU)\n', (14511, 14554), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((14627, 14645), 'astropy.time.Time', 'Time', (['"""2001-01-01"""'], {}), "('2001-01-01')\n", (14631, 14645), False, 'from astropy.time import Time\n'), ((14823, 14895), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.lon', '(75.815607 * u.deg)'], {'atol': '(1e-07 * u.deg)'}), '(new.lon, 75.815607 * u.deg, atol=1e-07 * u.deg)\n', (14847, 14895), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((15092, 15163), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.lat', '(9.999963 * u.deg)'], {'atol': '(1e-07 * u.deg)'}), '(new.lat, 9.999963 * u.deg, atol=1e-07 * u.deg)\n', (15116, 15163), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((15165, 15237), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.radius', '(1.000009 * u.AU)'], {'atol': '(1e-07 * u.AU)'}), '(new.radius, 1.000009 * u.AU, atol=1e-07 * u.AU)\n', (15189, 15237), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((15318, 15394), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', (['(0 * u.deg)', '(0 * u.deg)', '(1 * u.AU)'], {'obstime': '"""2001-02-01"""'}), "(0 * u.deg, 0 * u.deg, 1 * u.AU, obstime='2001-02-01')\n", (15340, 15394), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((15404, 15498), 'sunpy.coordinates.Heliocentric', 'Heliocentric', (['(0.2 * u.AU)', '(0.3 * u.AU)', '(0.4 * u.AU)'], {'observer': 'observer', 'obstime': '"""2001-01-01"""'}), "(0.2 * u.AU, 0.3 * u.AU, 0.4 * u.AU, observer=observer, obstime\n ='2001-01-01')\n", (15416, 15498), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((15736, 15790), 'numpy.all', 'np.all', (['(from_hcc.cartesian.xyz != to_hcc.cartesian.xyz)'], {}), '(from_hcc.cartesian.xyz != to_hcc.cartesian.xyz)\n', (15742, 15790), True, 'import numpy as np\n'), ((15795, 15882), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['from_hcc.cartesian.xyz', 'to_hcc.cartesian.xyz'], {'rtol': '(0.002)'}), '(from_hcc.cartesian.xyz, to_hcc.cartesian.xyz, rtol\n =0.002)\n', (15819, 15882), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((15940, 16016), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', (['(0 * u.deg)', '(0 * u.deg)', '(1 * u.AU)'], {'obstime': '"""2001-01-01"""'}), "(0 * u.deg, 0 * u.deg, 1 * u.AU, obstime='2001-01-01')\n", (15962, 16016), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((16027, 16103), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', (['(0 * u.deg)', '(0 * u.deg)', '(1 * u.AU)'], {'obstime': '"""2001-03-31"""'}), "(0 * u.deg, 0 * u.deg, 1 * u.AU, obstime='2001-03-31')\n", (16049, 16103), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((16113, 16207), 'sunpy.coordinates.Heliocentric', 'Heliocentric', (['(0.2 * u.AU)', '(0.3 * u.AU)', '(0.4 * u.AU)'], {'observer': 'observer1', 'obstime': '"""2001-02-01"""'}), "(0.2 * u.AU, 0.3 * u.AU, 0.4 * u.AU, observer=observer1,\n obstime='2001-02-01')\n", (16125, 16207), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((16379, 16438), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['to_hcc.x', '(-from_hcc.z)'], {'rtol': '(0.002)'}), '(to_hcc.x, -from_hcc.z, rtol=0.002)\n', (16403, 16438), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((16442, 16500), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['to_hcc.y', 'from_hcc.y'], {'rtol': '(0.002)'}), '(to_hcc.y, from_hcc.y, rtol=0.002)\n', (16466, 16500), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((16504, 16562), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['to_hcc.z', 'from_hcc.x'], {'rtol': '(0.002)'}), '(to_hcc.z, from_hcc.x, rtol=0.002)\n', (16528, 16562), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((16753, 16829), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', (['(0 * u.deg)', '(0 * u.deg)', '(1 * u.AU)'], {'obstime': '"""2001-01-01"""'}), "(0 * u.deg, 0 * u.deg, 1 * u.AU, obstime='2001-01-01')\n", (16775, 16829), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((16921, 17020), 'sunpy.coordinates.Heliocentric', 'Heliocentric', (['(0.2 * u.AU)', '(0.3 * u.AU)', '(0.4 * u.AU)'], {'observer': 'observer1', 'obstime': 'observer1.obstime'}), '(0.2 * u.AU, 0.3 * u.AU, 0.4 * u.AU, observer=observer1,\n obstime=observer1.obstime)\n', (16933, 17020), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((17098, 17197), 'sunpy.coordinates.Heliocentric', 'Heliocentric', (['(0.2 * u.AU)', '(0.3 * u.AU)', '(0.4 * u.AU)'], {'observer': 'observer2', 'obstime': 'observer1.obstime'}), '(0.2 * u.AU, 0.3 * u.AU, 0.4 * u.AU, observer=observer2,\n obstime=observer1.obstime)\n', (17110, 17197), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((17268, 17312), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hgs1.lon', 'hgs2.lon'], {}), '(hgs1.lon, hgs2.lon)\n', (17292, 17312), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((17317, 17361), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hgs1.lat', 'hgs2.lat'], {}), '(hgs1.lat, hgs2.lat)\n', (17341, 17361), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((17366, 17416), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hgs1.radius', 'hgs2.radius'], {}), '(hgs1.radius, hgs2.radius)\n', (17390, 17416), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((17608, 17684), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', (['(0 * u.deg)', '(0 * u.deg)', '(1 * u.AU)'], {'obstime': '"""2001-01-01"""'}), "(0 * u.deg, 0 * u.deg, 1 * u.AU, obstime='2001-01-01')\n", (17630, 17684), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((17775, 17865), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', (['(20 * u.deg)', '(40 * u.deg)', '(0.5 * u.AU)'], {'obstime': 'observer1.obstime'}), '(20 * u.deg, 40 * u.deg, 0.5 * u.AU, obstime=\n observer1.obstime)\n', (17797, 17865), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((18026, 18090), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['hcc1.cartesian.xyz', 'hcc2.cartesian.xyz'], {}), '(hcc1.cartesian.xyz, hcc2.cartesian.xyz)\n', (18050, 18090), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((18842, 18927), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.lat', '(10.642778 * u.deg)'], {'atol': '(0.1 * u.arcsec)', 'rtol': '(0)'}), '(new.lat, 10.642778 * u.deg, atol=0.1 * u.arcsec,\n rtol=0)\n', (18866, 18927), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((18924, 18974), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.distance', 'old.radius'], {}), '(new.distance, old.radius)\n', (18948, 18974), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((19442, 19527), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.lat', '(10.640314 * u.deg)'], {'atol': '(0.1 * u.arcsec)', 'rtol': '(0)'}), '(new.lat, 10.640314 * u.deg, atol=0.1 * u.arcsec,\n rtol=0)\n', (19466, 19527), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((19524, 19574), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.distance', 'old.radius'], {}), '(new.distance, old.radius)\n', (19548, 19574), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((20376, 20479), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.lon', '(16.688242 * u.deg + 0.076 * u.deg)'], {'atol': '(0.01 * u.arcsec)', 'rtol': '(0)'}), '(new.lon, 16.688242 * u.deg + 0.076 * u.deg, atol=\n 0.01 * u.arcsec, rtol=0)\n', (20400, 20479), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((20473, 20515), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.lat', 'old.lat'], {}), '(new.lat, old.lat)\n', (20497, 20515), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((20520, 20568), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.radius', 'old.radius'], {}), '(new.radius, old.radius)\n', (20544, 20568), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((21325, 21391), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.x', '(800000.0 * u.km)'], {'atol': '(0.01 * u.km)'}), '(new.x, 800000.0 * u.km, atol=0.01 * u.km)\n', (21349, 21391), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((21393, 21460), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.y', '(908797.89 * u.km)'], {'atol': '(0.01 * u.km)'}), '(new.y, 908797.89 * u.km, atol=0.01 * u.km)\n', (21417, 21460), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((21461, 21528), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.z', '(688539.32 * u.km)'], {'atol': '(0.01 * u.km)'}), '(new.z, 688539.32 * u.km, atol=0.01 * u.km)\n', (21485, 21528), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((21848, 21892), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'obstime': '"""2019-12-01"""'}), "(obstime='2019-12-01')\n", (21870, 21892), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((22543, 22605), 'astropy.time.Time', 'Time', (["['2019-01-01', '2019-04-01', '2019-07-01', '2019-10-01']"], {}), "(['2019-01-01', '2019-04-01', '2019-07-01', '2019-10-01'])\n", (22547, 22605), False, 'from astropy.time import Time\n'), ((22621, 22666), 'astropy.coordinates.get_body_barycentric_posvel', 'get_body_barycentric_posvel', (['"""earth"""', 'obstime'], {}), "('earth', obstime)\n", (22648, 22666), False, 'from astropy.coordinates import SkyCoord, get_body_barycentric, Angle, ConvertError, Longitude, CartesianRepresentation, get_body_barycentric_posvel, CartesianDifferential, SphericalDifferential\n'), ((22753, 22797), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['loc'], {'frame': '"""icrs"""', 'obstime': 'obstime'}), "(loc, frame='icrs', obstime=obstime)\n", (22761, 22797), False, 'from astropy.coordinates import SkyCoord, get_body_barycentric, Angle, ConvertError, Longitude, CartesianRepresentation, get_body_barycentric_posvel, CartesianDifferential, SphericalDifferential\n'), ((23028, 23116), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.velocity.d_x', '(0 * u.km / u.s)'], {'atol': '(1e-15 * u.km / u.s)'}), '(new.velocity.d_x, 0 * u.km / u.s, atol=1e-15 * u.\n km / u.s)\n', (23052, 23116), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((23108, 23196), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.velocity.d_y', '(0 * u.km / u.s)'], {'atol': '(1e-14 * u.km / u.s)'}), '(new.velocity.d_y, 0 * u.km / u.s, atol=1e-14 * u.\n km / u.s)\n', (23132, 23196), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((23188, 23276), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.velocity.d_x', '(0 * u.km / u.s)'], {'atol': '(1e-15 * u.km / u.s)'}), '(new.velocity.d_x, 0 * u.km / u.s, atol=1e-15 * u.\n km / u.s)\n', (23212, 23276), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((23322, 23373), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['newer.velocity.d_x', 'vel.x'], {}), '(newer.velocity.d_x, vel.x)\n', (23346, 23373), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((23378, 23429), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['newer.velocity.d_y', 'vel.y'], {}), '(newer.velocity.d_y, vel.y)\n', (23402, 23429), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((23434, 23485), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['newer.velocity.d_z', 'vel.z'], {}), '(newer.velocity.d_z, vel.z)\n', (23458, 23485), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((23590, 23652), 'astropy.time.Time', 'Time', (["['2019-01-01', '2019-04-01', '2019-07-01', '2019-10-01']"], {}), "(['2019-01-01', '2019-04-01', '2019-07-01', '2019-10-01'])\n", (23594, 23652), False, 'from astropy.time import Time\n'), ((24181, 24270), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new_vel.d_lon', '(-360 * u.deg / (27.27253 * u.day))'], {'rtol': '(0.01)'}), '(new_vel.d_lon, -360 * u.deg / (27.27253 * u.day),\n rtol=0.01)\n', (24205, 24270), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((24267, 24323), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new_vel.d_lat', '(0 * u.deg / u.s)'], {}), '(new_vel.d_lat, 0 * u.deg / u.s)\n', (24291, 24323), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((24324, 24414), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new_vel.d_distance', '(0 * u.km / u.s)'], {'atol': '(1e-07 * u.km / u.s)'}), '(new_vel.d_distance, 0 * u.km / u.s, atol=1e-07 * u\n .km / u.s)\n', (24348, 24414), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((25042, 25127), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.lat', '(10.0003 * u.deg)'], {'atol': '(0.01 * u.arcsec)', 'rtol': '(0)'}), '(new.lat, 10.0003 * u.deg, atol=0.01 * u.arcsec, rtol=0\n )\n', (25066, 25127), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((25125, 25177), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.distance', 'old.distance'], {}), '(new.distance, old.distance)\n', (25149, 25177), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((25825, 25910), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.lat', '(10.00007 * u.deg)'], {'atol': '(0.01 * u.arcsec)', 'rtol': '(0)'}), '(new.lat, 10.00007 * u.deg, atol=0.01 * u.arcsec,\n rtol=0)\n', (25849, 25910), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((25908, 25960), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.distance', 'old.distance'], {}), '(new.distance, old.distance)\n', (25932, 25960), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((26036, 26054), 'astropy.time.Time', 'Time', (['"""2001-01-01"""'], {}), "('2001-01-01')\n", (26040, 26054), False, 'from astropy.time import Time\n'), ((26212, 26254), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.lon', 'old.lon'], {}), '(new.lon, old.lon)\n', (26236, 26254), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((26259, 26301), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.lat', 'old.lat'], {}), '(new.lat, old.lat)\n', (26283, 26301), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((26306, 26358), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.distance', 'old.distance'], {}), '(new.distance, old.distance)\n', (26330, 26358), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((26446, 26518), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.lon', '(old.lon - 1 * u.deg)'], {'atol': '(0.1 * u.deg)'}), '(new.lon, old.lon - 1 * u.deg, atol=0.1 * u.deg)\n', (26470, 26518), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((26542, 26605), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.lat', 'old.lat'], {'atol': '(0.5 * u.arcsec)'}), '(new.lat, old.lat, atol=0.5 * u.arcsec)\n', (26566, 26605), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((26608, 26672), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.distance', 'old.distance'], {'rtol': '(1e-05)'}), '(new.distance, old.distance, rtol=1e-05)\n', (26632, 26672), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((27138, 27224), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.lon', '(32.777377 * u.deg)'], {'atol': '(0.01 * u.arcsec)', 'rtol': '(0)'}), '(new.lon, 32.777377 * u.deg, atol=0.01 * u.arcsec,\n rtol=0)\n', (27162, 27224), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((27221, 27307), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.lat', '(15.594639 * u.deg)'], {'atol': '(0.01 * u.arcsec)', 'rtol': '(0)'}), '(new.lat, 15.594639 * u.deg, atol=0.01 * u.arcsec,\n rtol=0)\n', (27245, 27307), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((27304, 27361), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.distance', '(0.45215884 * u.AU)'], {}), '(new.distance, 0.45215884 * u.AU)\n', (27328, 27361), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((27599, 27641), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.lon', 'old.lon'], {}), '(new.lon, old.lon)\n', (27623, 27641), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((27646, 27688), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.lat', 'old.lat'], {}), '(new.lat, old.lat)\n', (27670, 27688), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((27693, 27745), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.distance', 'old.distance'], {}), '(new.distance, old.distance)\n', (27717, 27745), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((28284, 28370), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.lon', '(-65.736793 * u.deg)'], {'atol': '(0.5 * u.arcsec)', 'rtol': '(0)'}), '(new.lon, -65.736793 * u.deg, atol=0.5 * u.arcsec,\n rtol=0)\n', (28308, 28370), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((28367, 28409), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.lat', 'old.lat'], {}), '(new.lat, old.lat)\n', (28391, 28409), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((28414, 28464), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.distance', 'old.radius'], {}), '(new.distance, old.radius)\n', (28438, 28464), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((28540, 28558), 'astropy.time.Time', 'Time', (['"""2001-01-01"""'], {}), "('2001-01-01')\n", (28544, 28558), False, 'from astropy.time import Time\n'), ((28707, 28749), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.lon', 'old.lon'], {}), '(new.lon, old.lon)\n', (28731, 28749), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((28754, 28796), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.lat', 'old.lat'], {}), '(new.lat, old.lat)\n', (28778, 28796), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((28801, 28853), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.distance', 'old.distance'], {}), '(new.distance, old.distance)\n', (28825, 28853), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((28936, 28996), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.lon', 'old.lon'], {'atol': '(0.1 * u.deg)'}), '(new.lon, old.lon, atol=0.1 * u.deg)\n', (28960, 28996), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((29022, 29084), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.lat', 'old.lat'], {'atol': '(0.001 * u.deg)'}), '(new.lat, old.lat, atol=0.001 * u.deg)\n', (29046, 29084), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((29086, 29157), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.distance', 'old.distance'], {'atol': '(1e-05 * u.AU)'}), '(new.distance, old.distance, atol=1e-05 * u.AU)\n', (29110, 29157), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((29819, 29905), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.lat', '(28.830109 * u.deg)'], {'atol': '(0.05 * u.arcsec)', 'rtol': '(0)'}), '(new.lat, 28.830109 * u.deg, atol=0.05 * u.arcsec,\n rtol=0)\n', (29843, 29905), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((29902, 29957), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.distance', '(1.819721 * u.AU)'], {}), '(new.distance, 1.819721 * u.AU)\n', (29926, 29957), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((30566, 30651), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.lat', '(28.82775 * u.deg)'], {'atol': '(0.05 * u.arcsec)', 'rtol': '(0)'}), '(new.lat, 28.82775 * u.deg, atol=0.05 * u.arcsec,\n rtol=0)\n', (30590, 30651), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((30649, 30705), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.distance', '(1.8217103 * u.AU)'], {}), '(new.distance, 1.8217103 * u.AU)\n', (30673, 30705), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((30820, 30859), 'astropy.time.Time', 'Time', (['"""1996-08-28 16:46:00"""'], {'scale': '"""tt"""'}), "('1996-08-28 16:46:00', scale='tt')\n", (30824, 30859), False, 'from astropy.time import Time\n'), ((30876, 30955), 'astropy.coordinates.CartesianRepresentation', 'CartesianRepresentation', (['([-5.7840451, -4.1082375, 1.9146822] * (6378.14 * u.km))'], {}), '([-5.7840451, -4.1082375, 1.9146822] * (6378.14 * u.km))\n', (30899, 30955), False, 'from astropy.coordinates import SkyCoord, get_body_barycentric, Angle, ConvertError, Longitude, CartesianRepresentation, get_body_barycentric_posvel, CartesianDifferential, SphericalDifferential\n'), ((30966, 31045), 'astropy.coordinates.CartesianRepresentation', 'CartesianRepresentation', (['([-5.7864918, -4.1039136, 1.9165612] * (6378.14 * u.km))'], {}), '([-5.7864918, -4.1039136, 1.9165612] * (6378.14 * u.km))\n', (30989, 31045), False, 'from astropy.coordinates import SkyCoord, get_body_barycentric, Angle, ConvertError, Longitude, CartesianRepresentation, get_body_barycentric_posvel, CartesianDifferential, SphericalDifferential\n'), ((31210, 31254), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['new.xyz', 'gei_d.xyz'], {}), '(new.xyz, gei_d.xyz)\n', (31234, 31254), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((32888, 33022), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['([10] * 2)', '([10] * 2)'], {'unit': 'u.deg', 'observer': '"""earth"""', 'obstime': "['2019-01-01', '2019-01-02']", 'frame': '"""heliographic_carrington"""'}), "([10] * 2, [10] * 2, unit=u.deg, observer='earth', obstime=[\n '2019-01-01', '2019-01-02'], frame='heliographic_carrington')\n", (32896, 33022), False, 'from astropy.coordinates import SkyCoord, get_body_barycentric, Angle, ConvertError, Longitude, CartesianRepresentation, get_body_barycentric_posvel, CartesianDifferential, SphericalDifferential\n'), ((33607, 33625), 'astropy.time.Time', 'Time', (['"""2001-01-01"""'], {}), "('2001-01-01')\n", (33611, 33625), False, 'from astropy.time import Time\n'), ((34349, 34402), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['result1.lon', 'sun_center.lon'], {}), '(result1.lon, sun_center.lon)\n', (34373, 34402), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((34407, 34460), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['result1.lat', 'sun_center.lat'], {}), '(result1.lat, sun_center.lat)\n', (34431, 34460), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((34465, 34524), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['result1.radius', 'sun_center.radius'], {}), '(result1.radius, sun_center.radius)\n', (34489, 34524), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((34867, 34915), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['result2.lat', 'other.lat'], {}), '(result2.lat, other.lat)\n', (34891, 34915), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((34920, 34974), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['result2.radius', 'other.radius'], {}), '(result2.radius, other.radius)\n', (34944, 34974), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((35263, 35305), 'sunpy.coordinates.HeliocentricInertial', 'HeliocentricInertial', ([], {'obstime': '"""2001-02-01"""'}), "(obstime='2001-02-01')\n", (35283, 35305), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((35739, 35792), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['result2.lon', 'sun_center.lon'], {}), '(result2.lon, sun_center.lon)\n', (35763, 35792), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((35797, 35850), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['result2.lat', 'sun_center.lat'], {}), '(result2.lat, sun_center.lat)\n', (35821, 35850), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((35855, 35916), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['result2.distance', 'sun_center.radius'], {}), '(result2.distance, sun_center.radius)\n', (35879, 35916), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((36070, 36120), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['result3.lon', 'result1.lon'], {}), '(result3.lon, result1.lon)\n', (36094, 36120), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((36125, 36175), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['result3.lat', 'result1.lat'], {}), '(result3.lat, result1.lat)\n', (36149, 36175), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((36180, 36240), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['result3.distance', 'result1.distance'], {}), '(result3.distance, result1.distance)\n', (36204, 36240), False, 'from astropy.tests.helper import quantity_allclose, assert_quantity_allclose\n'), ((2567, 2577), 'numpy.tan', 'np.tan', (['L0'], {}), '(L0)\n', (2573, 2577), True, 'import numpy as np\n'), ((6314, 6355), 'astropy.coordinates.HeliocentricTrueEcliptic', 'HeliocentricMeanEcliptic', ([], {'equinox': 'obstime'}), '(equinox=obstime)\n', (6338, 6355), True, 'from astropy.coordinates import HeliocentricTrueEcliptic as HeliocentricMeanEcliptic\n'), ((6982, 7021), 'sunpy.coordinates.HeliographicCarrington', 'HeliographicCarrington', ([], {'obstime': 'obstime'}), '(obstime=obstime)\n', (7004, 7021), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((7180, 7219), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'obstime': 'obstime'}), '(obstime=obstime)\n', (7202, 7219), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((9821, 9860), 'sunpy.coordinates.HeliographicCarrington', 'HeliographicCarrington', ([], {'obstime': 'obstime'}), '(obstime=obstime)\n', (9843, 9860), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((9907, 9946), 'sunpy.coordinates.HeliographicCarrington', 'HeliographicCarrington', ([], {'obstime': 'obstime'}), '(obstime=obstime)\n', (9929, 9946), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((13852, 13884), 'sunpy.coordinates.Heliocentric', 'Heliocentric', ([], {'observer': 'observer2'}), '(observer=observer2)\n', (13864, 13884), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((14271, 14322), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'obstime': '(obstime + 1 * u.day)'}), '(obstime=obstime + 1 * u.day)\n', (14293, 14322), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((14767, 14818), 'sunpy.coordinates.HeliographicCarrington', 'HeliographicCarrington', ([], {'obstime': '(obstime + 1 * u.day)'}), '(obstime=obstime + 1 * u.day)\n', (14789, 14818), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((15523, 15576), 'sunpy.coordinates.Heliocentric', 'Heliocentric', ([], {'observer': 'observer', 'obstime': '"""2001-03-31"""'}), "(observer=observer, obstime='2001-03-31')\n", (15535, 15576), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((16233, 16287), 'sunpy.coordinates.Heliocentric', 'Heliocentric', ([], {'observer': 'observer2', 'obstime': '"""2001-02-01"""'}), "(observer=observer2, obstime='2001-02-01')\n", (16245, 16287), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((16863, 16907), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'obstime': '"""2001-03-31"""'}), "(obstime='2001-03-31')\n", (16885, 16907), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((17040, 17084), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'obstime': 'hcc1.obstime'}), '(obstime=hcc1.obstime)\n', (17062, 17084), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((17217, 17261), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'obstime': 'hcc2.obstime'}), '(obstime=hcc2.obstime)\n', (17239, 17261), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((17718, 17762), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'obstime': '"""2001-03-31"""'}), "(obstime='2001-03-31')\n", (17740, 17762), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((17883, 17936), 'sunpy.coordinates.Heliocentric', 'Heliocentric', ([], {'observer': 'observer1', 'obstime': 'hgs.obstime'}), '(observer=observer1, obstime=hgs.obstime)\n', (17895, 17936), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((17966, 18019), 'sunpy.coordinates.Heliocentric', 'Heliocentric', ([], {'observer': 'observer2', 'obstime': 'hgs.obstime'}), '(observer=observer2, obstime=hgs.obstime)\n', (17978, 18019), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((18782, 18811), 'astropy.coordinates.Longitude', 'Longitude', (['(-108.65371 * u.deg)'], {}), '(-108.65371 * u.deg)\n', (18791, 18811), False, 'from astropy.coordinates import SkyCoord, get_body_barycentric, Angle, ConvertError, Longitude, CartesianRepresentation, get_body_barycentric_posvel, CartesianDifferential, SphericalDifferential\n'), ((19295, 19341), 'astropy.coordinates.HeliocentricTrueEcliptic', 'HeliocentricMeanEcliptic', ([], {'equinox': '"""2019-06-01"""'}), "(equinox='2019-06-01')\n", (19319, 19341), True, 'from astropy.coordinates import HeliocentricTrueEcliptic as HeliocentricMeanEcliptic\n'), ((19382, 19410), 'astropy.coordinates.Longitude', 'Longitude', (['(-108.3824 * u.deg)'], {}), '(-108.3824 * u.deg)\n', (19391, 19410), False, 'from astropy.coordinates import SkyCoord, get_body_barycentric, Angle, ConvertError, Longitude, CartesianRepresentation, get_body_barycentric_posvel, CartesianDifferential, SphericalDifferential\n'), ((21143, 21205), 'astropy.coordinates.CartesianRepresentation', 'CartesianRepresentation', (['([700000.0, 800000.0, 900000.0] * u.km)'], {}), '([700000.0, 800000.0, 900000.0] * u.km)\n', (21166, 21205), False, 'from astropy.coordinates import SkyCoord, get_body_barycentric, Angle, ConvertError, Longitude, CartesianRepresentation, get_body_barycentric_posvel, CartesianDifferential, SphericalDifferential\n'), ((21288, 21318), 'sunpy.coordinates.Heliocentric', 'Heliocentric', ([], {'observer': '"""earth"""'}), "(observer='earth')\n", (21300, 21318), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((23663, 23695), 'astropy.coordinates.CartesianRepresentation', 'CartesianRepresentation', (['(1)', '(0)', '(0)'], {}), '(1, 0, 0)\n', (23686, 23695), False, 'from astropy.coordinates import SkyCoord, get_body_barycentric, Angle, ConvertError, Longitude, CartesianRepresentation, get_body_barycentric_posvel, CartesianDifferential, SphericalDifferential\n'), ((23843, 23887), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', (['loc'], {'obstime': 'obstime'}), '(loc, obstime=obstime)\n', (23865, 23887), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((24982, 25009), 'astropy.coordinates.Longitude', 'Longitude', (['(110.0161 * u.deg)'], {}), '(110.0161 * u.deg)\n', (24991, 25009), False, 'from astropy.coordinates import SkyCoord, get_body_barycentric, Angle, ConvertError, Longitude, CartesianRepresentation, get_body_barycentric_posvel, CartesianDifferential, SphericalDifferential\n'), ((25765, 25793), 'astropy.coordinates.Longitude', 'Longitude', (['(109.74535 * u.deg)'], {}), '(109.74535 * u.deg)\n', (25774, 25793), False, 'from astropy.coordinates import SkyCoord, get_body_barycentric, Angle, ConvertError, Longitude, CartesianRepresentation, get_body_barycentric_posvel, CartesianDifferential, SphericalDifferential\n'), ((26387, 26441), 'sunpy.coordinates.HeliocentricEarthEcliptic', 'HeliocentricEarthEcliptic', ([], {'obstime': '(obstime + 1 * u.day)'}), '(obstime=obstime + 1 * u.day)\n', (26412, 26441), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((28882, 28931), 'sunpy.coordinates.HeliocentricInertial', 'HeliocentricInertial', ([], {'obstime': '(obstime + 1 * u.day)'}), '(obstime=obstime + 1 * u.day)\n', (28902, 28931), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((29759, 29787), 'astropy.coordinates.Longitude', 'Longitude', (['(95.230617 * u.deg)'], {}), '(95.230617 * u.deg)\n', (29768, 29787), False, 'from astropy.coordinates import SkyCoord, get_body_barycentric, Angle, ConvertError, Longitude, CartesianRepresentation, get_body_barycentric_posvel, CartesianDifferential, SphericalDifferential\n'), ((30424, 30465), 'sunpy.coordinates.GeocentricEarthEquatorial', 'GeocentricEarthEquatorial', ([], {'equinox': '_J2000'}), '(equinox=_J2000)\n', (30449, 30465), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((30506, 30533), 'astropy.coordinates.Longitude', 'Longitude', (['(95.07903 * u.deg)'], {}), '(95.07903 * u.deg)\n', (30515, 30533), False, 'from astropy.coordinates import SkyCoord, get_body_barycentric, Angle, ConvertError, Longitude, CartesianRepresentation, get_body_barycentric_posvel, CartesianDifferential, SphericalDifferential\n'), ((31385, 31442), 'sunpy.coordinates.Heliocentric', 'Heliocentric', (['(0 * u.km)', '(0 * u.km)', '(0 * u.km)'], {'observer': 'None'}), '(0 * u.km, 0 * u.km, 0 * u.km, observer=None)\n', (31397, 31442), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((31455, 31534), 'sunpy.coordinates.Heliocentric', 'Heliocentric', (['(0 * u.km)', '(0 * u.km)', '(0 * u.km)'], {'observer': 'None', 'obstime': '"""2001-01-01"""'}), "(0 * u.km, 0 * u.km, 0 * u.km, observer=None, obstime='2001-01-01')\n", (31467, 31534), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((31547, 31599), 'sunpy.coordinates.Helioprojective', 'Helioprojective', (['(0 * u.deg)', '(0 * u.deg)'], {'observer': 'None'}), '(0 * u.deg, 0 * u.deg, observer=None)\n', (31562, 31599), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((31614, 31688), 'sunpy.coordinates.Helioprojective', 'Helioprojective', (['(0 * u.deg)', '(0 * u.deg)'], {'observer': 'None', 'obstime': '"""2001-01-01"""'}), "(0 * u.deg, 0 * u.deg, observer=None, obstime='2001-01-01')\n", (31629, 31688), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((33179, 33232), 'sunpy.coordinates.Helioprojective', 'Helioprojective', ([], {'obstime': "['2019-01-03', '2019-01-04']"}), "(obstime=['2019-01-03', '2019-01-04'])\n", (33194, 33232), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((34171, 34198), 'sunpy.coordinates.transformations.transform_with_sun_center', 'transform_with_sun_center', ([], {}), '()\n', (34196, 34198), False, 'from sunpy.coordinates.transformations import transform_with_sun_center\n'), ((34658, 34685), 'sunpy.coordinates.transformations.transform_with_sun_center', 'transform_with_sun_center', ([], {}), '()\n', (34683, 34685), False, 'from sunpy.coordinates.transformations import transform_with_sun_center\n'), ((35653, 35680), 'sunpy.coordinates.transformations.transform_with_sun_center', 'transform_with_sun_center', ([], {}), '()\n', (35678, 35680), False, 'from sunpy.coordinates.transformations import transform_with_sun_center\n'), ((4008, 4044), 'astropy.coordinates.get_body_barycentric', 'get_body_barycentric', (['"""earth"""', 'adate'], {}), "('earth', adate)\n", (4028, 4044), False, 'from astropy.coordinates import SkyCoord, get_body_barycentric, Angle, ConvertError, Longitude, CartesianRepresentation, get_body_barycentric_posvel, CartesianDifferential, SphericalDifferential\n'), ((4789, 4825), 'astropy.coordinates.get_body_barycentric', 'get_body_barycentric', (['"""earth"""', 'times'], {}), "('earth', times)\n", (4809, 4825), False, 'from astropy.coordinates import SkyCoord, get_body_barycentric, Angle, ConvertError, Longitude, CartesianRepresentation, get_body_barycentric_posvel, CartesianDifferential, SphericalDifferential\n'), ((6596, 6618), 'astropy.coordinates.Angle', 'Angle', (['"""308d13m30.51s"""'], {}), "('308d13m30.51s')\n", (6601, 6618), False, 'from astropy.coordinates import SkyCoord, get_body_barycentric, Angle, ConvertError, Longitude, CartesianRepresentation, get_body_barycentric_posvel, CartesianDifferential, SphericalDifferential\n'), ((6694, 6709), 'astropy.coordinates.Angle', 'Angle', (['"""-0.27s"""'], {}), "('-0.27s')\n", (6699, 6709), False, 'from astropy.coordinates import SkyCoord, get_body_barycentric, Angle, ConvertError, Longitude, CartesianRepresentation, get_body_barycentric_posvel, CartesianDifferential, SphericalDifferential\n'), ((7117, 7132), 'sunpy.coordinates.sun.L0', 'sun.L0', (['obstime'], {}), '(obstime)\n', (7123, 7132), False, 'from sunpy.coordinates import sun\n'), ((7695, 7734), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'obstime': 'obstime'}), '(obstime=obstime)\n', (7717, 7734), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((8643, 8682), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'obstime': 'obstime'}), '(obstime=obstime)\n', (8665, 8682), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((9570, 9609), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'obstime': 'obstime'}), '(obstime=obstime)\n', (9592, 9609), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((14203, 14242), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'obstime': 'obstime'}), '(obstime=obstime)\n', (14225, 14242), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((14699, 14738), 'sunpy.coordinates.HeliographicCarrington', 'HeliographicCarrington', ([], {'obstime': 'obstime'}), '(obstime=obstime)\n', (14721, 14738), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((18644, 18688), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'obstime': '"""2019-06-01"""'}), "(obstime='2019-06-01')\n", (18666, 18688), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((20287, 20331), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'obstime': '"""2019-06-01"""'}), "(obstime='2019-06-01')\n", (20309, 20331), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((21215, 21259), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'obstime': '"""2019-06-01"""'}), "(obstime='2019-06-01')\n", (21237, 21259), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((21779, 21834), 'sunpy.coordinates.Helioprojective', 'Helioprojective', ([], {'obstime': '"""2019-06-01"""', 'observer': '"""earth"""'}), "(obstime='2019-06-01', observer='earth')\n", (21794, 21834), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((23711, 23741), 'astropy.coordinates.CartesianDifferential', 'CartesianDifferential', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (23732, 23741), False, 'from astropy.coordinates import SkyCoord, get_body_barycentric, Angle, ConvertError, Longitude, CartesianRepresentation, get_body_barycentric_posvel, CartesianDifferential, SphericalDifferential\n'), ((24841, 24887), 'astropy.coordinates.HeliocentricTrueEcliptic', 'HeliocentricMeanEcliptic', ([], {'obstime': '"""2019-06-01"""'}), "(obstime='2019-06-01')\n", (24865, 24887), True, 'from astropy.coordinates import HeliocentricTrueEcliptic as HeliocentricMeanEcliptic\n'), ((25525, 25593), 'astropy.coordinates.HeliocentricTrueEcliptic', 'HeliocentricMeanEcliptic', ([], {'obstime': '"""2019-06-01"""', 'equinox': '"""2019-06-01"""'}), "(obstime='2019-06-01', equinox='2019-06-01')\n", (25549, 25593), True, 'from astropy.coordinates import HeliocentricTrueEcliptic as HeliocentricMeanEcliptic\n'), ((26108, 26150), 'sunpy.coordinates.HeliocentricEarthEcliptic', 'HeliocentricEarthEcliptic', ([], {'obstime': 'obstime'}), '(obstime=obstime)\n', (26133, 26150), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((27046, 27093), 'sunpy.coordinates.HeliocentricEarthEcliptic', 'HeliocentricEarthEcliptic', ([], {'obstime': '"""2019-06-01"""'}), "(obstime='2019-06-01')\n", (27071, 27093), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((27495, 27540), 'sunpy.coordinates.GeocentricSolarEcliptic', 'GeocentricSolarEcliptic', ([], {'obstime': '"""2001-01-01"""'}), "(obstime='2001-01-01')\n", (27518, 27540), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((28184, 28228), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'obstime': '"""2019-06-01"""'}), "(obstime='2019-06-01')\n", (28206, 28228), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((28614, 28651), 'sunpy.coordinates.HeliocentricInertial', 'HeliocentricInertial', ([], {'obstime': 'obstime'}), '(obstime=obstime)\n', (28634, 28651), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((29618, 29664), 'astropy.coordinates.HeliocentricTrueEcliptic', 'HeliocentricMeanEcliptic', ([], {'obstime': '"""2019-06-01"""'}), "(obstime='2019-06-01')\n", (29642, 29664), True, 'from astropy.coordinates import HeliocentricTrueEcliptic as HeliocentricMeanEcliptic\n'), ((30327, 30395), 'astropy.coordinates.HeliocentricTrueEcliptic', 'HeliocentricMeanEcliptic', ([], {'obstime': '"""2019-06-01"""', 'equinox': '"""2019-06-01"""'}), "(obstime='2019-06-01', equinox='2019-06-01')\n", (30351, 30395), True, 'from astropy.coordinates import HeliocentricTrueEcliptic as HeliocentricMeanEcliptic\n'), ((31081, 31117), 'sunpy.coordinates.GeocentricEarthEquatorial', 'GeocentricEarthEquatorial', ([], {'obstime': 't'}), '(obstime=t)\n', (31106, 31117), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((31146, 31193), 'sunpy.coordinates.GeocentricEarthEquatorial', 'GeocentricEarthEquatorial', ([], {'equinox': 't', 'obstime': 't'}), '(equinox=t, obstime=t)\n', (31171, 31193), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((31725, 31783), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', (['(0 * u.deg)', '(0 * u.deg)'], {'obstime': 'None'}), '(0 * u.deg, 0 * u.deg, obstime=None)\n', (31747, 31783), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((31789, 31855), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', (['(0 * u.deg)', '(0 * u.deg)'], {'obstime': '"""2001-01-01"""'}), "(0 * u.deg, 0 * u.deg, obstime='2001-01-01')\n", (31811, 31855), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((31861, 31940), 'sunpy.coordinates.Heliocentric', 'Heliocentric', (['(0 * u.km)', '(0 * u.km)', '(0 * u.km)'], {'observer': 'None', 'obstime': '"""2012-12-12"""'}), "(0 * u.km, 0 * u.km, 0 * u.km, observer=None, obstime='2012-12-12')\n", (31873, 31940), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((31944, 32018), 'sunpy.coordinates.Heliocentric', 'Heliocentric', (['(0 * u.km)', '(0 * u.km)', '(0 * u.km)'], {'observer': '"""earth"""', 'obstime': 'None'}), "(0 * u.km, 0 * u.km, 0 * u.km, observer='earth', obstime=None)\n", (31956, 32018), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((32022, 32109), 'sunpy.coordinates.Heliocentric', 'Heliocentric', (['(0 * u.km)', '(0 * u.km)', '(0 * u.km)'], {'observer': '"""earth"""', 'obstime': '"""2001-01-01"""'}), "(0 * u.km, 0 * u.km, 0 * u.km, observer='earth', obstime=\n '2001-01-01')\n", (32034, 32109), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((32108, 32182), 'sunpy.coordinates.Helioprojective', 'Helioprojective', (['(0 * u.deg)', '(0 * u.deg)'], {'observer': 'None', 'obstime': '"""2012-12-12"""'}), "(0 * u.deg, 0 * u.deg, observer=None, obstime='2012-12-12')\n", (32123, 32182), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((32188, 32257), 'sunpy.coordinates.Helioprojective', 'Helioprojective', (['(0 * u.deg)', '(0 * u.deg)'], {'observer': '"""earth"""', 'obstime': 'None'}), "(0 * u.deg, 0 * u.deg, observer='earth', obstime=None)\n", (32203, 32257), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((32263, 32340), 'sunpy.coordinates.Helioprojective', 'Helioprojective', (['(0 * u.deg)', '(0 * u.deg)'], {'observer': '"""earth"""', 'obstime': '"""2001-01-01"""'}), "(0 * u.deg, 0 * u.deg, observer='earth', obstime='2001-01-01')\n", (32278, 32340), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((34115, 34159), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'obstime': '"""2001-01-01"""'}), "(obstime='2001-01-01')\n", (34137, 34159), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((34242, 34286), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'obstime': '"""2001-02-01"""'}), "(obstime='2001-02-01')\n", (34264, 34286), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((34602, 34646), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'obstime': '"""2001-01-01"""'}), "(obstime='2001-01-01')\n", (34624, 34646), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((34724, 34768), 'sunpy.coordinates.HeliographicCarrington', 'HeliographicCarrington', ([], {'obstime': '"""2001-02-01"""'}), "(obstime='2001-02-01')\n", (34746, 34768), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((35201, 35245), 'sunpy.coordinates.HeliographicStonyhurst', 'HeliographicStonyhurst', ([], {'obstime': '"""2001-01-01"""'}), "(obstime='2001-01-01')\n", (35223, 35245), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((21970, 22023), 'sunpy.coordinates.Heliocentric', 'Heliocentric', ([], {'obstime': 'start.obstime', 'observer': '"""earth"""'}), "(obstime=start.obstime, observer='earth')\n", (21982, 22023), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((22090, 22143), 'sunpy.coordinates.Heliocentric', 'Heliocentric', ([], {'obstime': 'frame.obstime', 'observer': '"""earth"""'}), "(obstime=frame.obstime, observer='earth')\n", (22102, 22143), False, 'from sunpy.coordinates import Helioprojective, HeliographicStonyhurst, HeliographicCarrington, Heliocentric, HeliocentricEarthEcliptic, GeocentricSolarEcliptic, HeliocentricInertial, GeocentricEarthEquatorial, get_earth\n'), ((32596, 32623), 'pytest.raises', 'pytest.raises', (['ConvertError'], {}), '(ConvertError)\n', (32609, 32623), False, 'import pytest\n'), ((32678, 32705), 'pytest.raises', 'pytest.raises', (['ConvertError'], {}), '(ConvertError)\n', (32691, 32705), False, 'import pytest\n'), ((33696, 33728), 'astropy.coordinates.CartesianRepresentation', 'CartesianRepresentation', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (33719, 33728), False, 'from astropy.coordinates import SkyCoord, get_body_barycentric, Angle, ConvertError, Longitude, CartesianRepresentation, get_body_barycentric_posvel, CartesianDifferential, SphericalDifferential\n'), ((33842, 33874), 'astropy.coordinates.CartesianRepresentation', 'CartesianRepresentation', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (33865, 33874), False, 'from astropy.coordinates import SkyCoord, get_body_barycentric, Angle, ConvertError, Longitude, CartesianRepresentation, get_body_barycentric_posvel, CartesianDifferential, SphericalDifferential\n')]
|
"""
tests for time conversions relevant to MSISE00
"""
from __future__ import annotations
import datetime
import typing
import numpy as np
from pytest import approx
import sciencedates as sd
T: list[typing.Any] = [datetime.datetime(2013, 7, 2, 12, 0, 0)]
T.append(T[0].date())
T.append(np.datetime64(T[0]))
T.append(str(T[0]))
def test_str():
t = T[3]
assert isinstance(t, str)
iyd, utsec, stl = sd.datetime2gtd(t, glon=42)
assert iyd == 183
assert utsec == 43200
assert stl == approx(14.8)
def test_dt64():
t = T[2]
assert isinstance(t, np.datetime64)
iyd, utsec, stl = sd.datetime2gtd(t, glon=42)
assert iyd == 183
assert utsec == 43200
assert stl == approx(14.8)
def test_date():
t = T[1]
assert isinstance(t, datetime.date)
iyd, utsec, stl = sd.datetime2gtd(t, glon=42)
assert iyd == 183
assert utsec == 0
assert stl == approx(2.8)
def test_datetime():
t = T[0]
assert isinstance(t, datetime.datetime)
iyd, utsec, stl = sd.datetime2gtd(t, glon=42)
assert iyd == 183
assert utsec == 43200
assert stl == approx(14.8)
def test_list():
iyd, utsec, stl = sd.datetime2gtd(T, glon=42)
assert (iyd == 183).all()
assert utsec == approx((43200, 0, 43200, 43200))
assert stl == approx((14.8, 2.8, 14.8, 14.8))
def test_glon():
glon = range(-180, 180 + 45, 45)
iyd, utsec, stl = sd.datetime2gtd(T, glon)
Estl = np.array(
[
np.arange(0, 24 + 3, 3),
np.arange(-12, 12 + 3, 3),
np.arange(0, 24 + 3, 3),
np.arange(0, 24 + 3, 3),
]
)
assert utsec == approx((43200, 0, 43200, 43200))
assert stl == approx(Estl)
|
[
"numpy.datetime64",
"sciencedates.datetime2gtd",
"datetime.datetime",
"numpy.arange",
"pytest.approx"
] |
[((218, 257), 'datetime.datetime', 'datetime.datetime', (['(2013)', '(7)', '(2)', '(12)', '(0)', '(0)'], {}), '(2013, 7, 2, 12, 0, 0)\n', (235, 257), False, 'import datetime\n'), ((290, 309), 'numpy.datetime64', 'np.datetime64', (['T[0]'], {}), '(T[0])\n', (303, 309), True, 'import numpy as np\n'), ((415, 442), 'sciencedates.datetime2gtd', 'sd.datetime2gtd', (['t'], {'glon': '(42)'}), '(t, glon=42)\n', (430, 442), True, 'import sciencedates as sd\n'), ((618, 645), 'sciencedates.datetime2gtd', 'sd.datetime2gtd', (['t'], {'glon': '(42)'}), '(t, glon=42)\n', (633, 645), True, 'import sciencedates as sd\n'), ((821, 848), 'sciencedates.datetime2gtd', 'sd.datetime2gtd', (['t'], {'glon': '(42)'}), '(t, glon=42)\n', (836, 848), True, 'import sciencedates as sd\n'), ((1027, 1054), 'sciencedates.datetime2gtd', 'sd.datetime2gtd', (['t'], {'glon': '(42)'}), '(t, glon=42)\n', (1042, 1054), True, 'import sciencedates as sd\n'), ((1177, 1204), 'sciencedates.datetime2gtd', 'sd.datetime2gtd', (['T'], {'glon': '(42)'}), '(T, glon=42)\n', (1192, 1204), True, 'import sciencedates as sd\n'), ((1418, 1442), 'sciencedates.datetime2gtd', 'sd.datetime2gtd', (['T', 'glon'], {}), '(T, glon)\n', (1433, 1442), True, 'import sciencedates as sd\n'), ((510, 522), 'pytest.approx', 'approx', (['(14.8)'], {}), '(14.8)\n', (516, 522), False, 'from pytest import approx\n'), ((713, 725), 'pytest.approx', 'approx', (['(14.8)'], {}), '(14.8)\n', (719, 725), False, 'from pytest import approx\n'), ((912, 923), 'pytest.approx', 'approx', (['(2.8)'], {}), '(2.8)\n', (918, 923), False, 'from pytest import approx\n'), ((1122, 1134), 'pytest.approx', 'approx', (['(14.8)'], {}), '(14.8)\n', (1128, 1134), False, 'from pytest import approx\n'), ((1256, 1288), 'pytest.approx', 'approx', (['(43200, 0, 43200, 43200)'], {}), '((43200, 0, 43200, 43200))\n', (1262, 1288), False, 'from pytest import approx\n'), ((1307, 1338), 'pytest.approx', 'approx', (['(14.8, 2.8, 14.8, 14.8)'], {}), '((14.8, 2.8, 14.8, 14.8))\n', (1313, 1338), False, 'from pytest import approx\n'), ((1662, 1694), 'pytest.approx', 'approx', (['(43200, 0, 43200, 43200)'], {}), '((43200, 0, 43200, 43200))\n', (1668, 1694), False, 'from pytest import approx\n'), ((1713, 1725), 'pytest.approx', 'approx', (['Estl'], {}), '(Estl)\n', (1719, 1725), False, 'from pytest import approx\n'), ((1487, 1510), 'numpy.arange', 'np.arange', (['(0)', '(24 + 3)', '(3)'], {}), '(0, 24 + 3, 3)\n', (1496, 1510), True, 'import numpy as np\n'), ((1524, 1549), 'numpy.arange', 'np.arange', (['(-12)', '(12 + 3)', '(3)'], {}), '(-12, 12 + 3, 3)\n', (1533, 1549), True, 'import numpy as np\n'), ((1563, 1586), 'numpy.arange', 'np.arange', (['(0)', '(24 + 3)', '(3)'], {}), '(0, 24 + 3, 3)\n', (1572, 1586), True, 'import numpy as np\n'), ((1600, 1623), 'numpy.arange', 'np.arange', (['(0)', '(24 + 3)', '(3)'], {}), '(0, 24 + 3, 3)\n', (1609, 1623), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import ovito as ov
import glob
import numpy as np
import matplotlib.pyplot as plt
import os
from scipy import optimize
import pickle
from itertools import product
from multiprocessing import get_context
def func(x, phi_l):
a = (4/3)*np.pi*-2
b = 2*1.919
q = (a*(x**3)) + (b*x) - phi_l
return q
def main(file):
# hgs = np.zeros(0)
# time = np.zeros(0)
# vols = np.zeros(0)
try:
pipeline = ov.io.import_file(file)
#select W particles in the system.
pipeline.modifiers.append(ov.modifiers.SelectTypeModifier(property = 'Particle Type',
types = {'W'}))
solid_vols = np.zeros(0)
cell_vols = np.zeros(0)
sa = np.zeros(0)
calc_area = np.zeros(0)
mod1 = ov.modifiers.ConstructSurfaceModifier(only_selected = True,
radius = 1,
smoothing_level = 8,
identify_regions = True)
pipeline.modifiers.append(mod1)
loops = np.linspace(10,16,15)
for i in loops:
mod1.radius = np.round(i, decimals = 1)
data = pipeline.compute()
area = data.attributes['ConstructSurfaceMesh.surface_area']
solid_volume = data.attributes['ConstructSurfaceMesh.filled_volume']
cell_volume = data.attributes['ConstructSurfaceMesh.cell_volume']
fraction = solid_volume/cell_volume
tprop = data.particles['Particle Type']
#get the c5a id
c5a_id = tprop.type_by_name('C5A').id
try:
c2_id = tprop.type_by_name('C2').id
n_lip = np.count_nonzero(tprop == c5a_id) + np.count_nonzero(tprop == c2_id)
except KeyError:
n_lip = np.count_nonzero(tprop == c5a_id)
pass
#count the number of terminal carbons
phi_l = 1-fraction
#need a in nm not Angstroms
a = data.cell.matrix[0,0]
sigma = 1.919
chi = -2
root = optimize.fsolve(func, x0 = [0], args = phi_l)
l = root[0]*a
A_L = (sigma*(a**2))+((2*np.pi*chi)*(l**2))
a_0 = ((2*A_L)/(n_lip))
calc_area = np.append(calc_area, a_0)
solid_vols = np.append(solid_vols, solid_volume)
cell_vols = np.append(cell_vols, cell_volume)
sa = np.append(sa, area/n_lip)
# print('MAKING PLOT NOW')
# fig, (ax0,ax1) = plt.subplots(2,1,sharex = True)
# ax0.scatter(loops, v)
# ax1.scatter(loops, sa, label = 'measured')
# ax1.scatter(loops, calc_area, label = 'calculated')
# ax1.legend()
# ax0.set_ylabel('Surface Volume\nFraction in Unit Cell')
# ax1.set_ylabel('Surface Area\nper molecule (Å$^2$)')
# ax0.axhline(v.mean())
# ax1.axhline(sa.mean())
# ax1.axhline(calc_area.mean())
# ax0.text(loops[1],v.mean(),'Mean = '+str(v.mean())[:4])
# ax1.text(loops[1],sa.mean(),'Mean = '+str(sa.mean())[:4] +' Å')
# ax1.text(loops[1],calc_area.mean(),'Mean = '+str(calc_area.mean())[:4] +' Å')
# ax1.set_xlabel('Probe Sphere Radius')
# fig.subplots_adjust(hspace=0.1)
# name = files[f].split('.pdb')[0] + ' headgroup analysis.png'
# fig.savefig(name, dpi =200)
d = {'Solid Volume': solid_vols,
'Cell Volume': cell_vols,
'Surface Area per Lipid': sa,
'Calculated Area per Lipid': calc_area,
'Number of Lipids': n_lip}
dname = file.split('.pdb')[0] + '_headgroup_analysis.p'
pickle.dump(d, open(dname, 'wb'))
# t = file.split('md')[1].split('-')[0]
# hgs = np.append(hgs, sa.mean())
# vols = np.append(vols, v.mean())
# time = np.append(time, int(t))
except RuntimeError:
print('error!', file)
pass
# fig1, ax2 = plt.subplots(1,1)
# ax2.scatter(time/100, hgs)
# ax2.set_xlabel('Simulation Time ($\mu$s)')
# ax2.set_ylabel('Mean Head Group Area (Å$^{2}$)')
# ax2.axhline(hgs.mean())
# ax2.set_xlim(0,time.max()/100+0.1)
# ax2.text(0,hgs.mean(), 'mean = %.3f, std = %.3f' %(hgs.mean(), hgs.std()))
# fig1.savefig(folder+'/head group areas.png', dpi =200)
# fig2, ax3 = plt.subplots(1,1)
# ax3.scatter(time/100, vols)
# ax3.set_xlabel('Simulation Time ($\mu$s)')
# ax3.set_ylabel('Fractional Volume of Surface')
# ax3.axhline(vols.mean())
# ax3.set_xlim(0,time.max()/100+0.1)
# ax3.text(0,vols.mean(), 'mean = %.3f, std = %.3f' %(vols.mean(), vols.std()))
# fig2.savefig(folder+'/volumes.png', dpi =200)
if __name__ == '__main__':
folder = os.getcwd()
files = glob.glob(folder+'/*.pdb')
paramlist = list(product(files))
k = len(paramlist)/14
if k < 1:
csize = 1
else:
csize = int(k)
print(paramlist, csize)
with get_context("spawn").Pool(processes = 14) as pool:
pool.starmap(main, paramlist, chunksize = csize)
|
[
"ovito.io.import_file",
"numpy.count_nonzero",
"os.getcwd",
"ovito.modifiers.ConstructSurfaceModifier",
"numpy.zeros",
"scipy.optimize.fsolve",
"multiprocessing.get_context",
"numpy.append",
"numpy.linspace",
"glob.glob",
"itertools.product",
"numpy.round",
"ovito.modifiers.SelectTypeModifier"
] |
[((5286, 5297), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5295, 5297), False, 'import os\n'), ((5315, 5343), 'glob.glob', 'glob.glob', (["(folder + '/*.pdb')"], {}), "(folder + '/*.pdb')\n", (5324, 5343), False, 'import glob\n'), ((507, 530), 'ovito.io.import_file', 'ov.io.import_file', (['file'], {}), '(file)\n', (524, 530), True, 'import ovito as ov\n'), ((802, 813), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (810, 813), True, 'import numpy as np\n'), ((838, 849), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (846, 849), True, 'import numpy as np\n'), ((867, 878), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (875, 878), True, 'import numpy as np\n'), ((899, 910), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (907, 910), True, 'import numpy as np\n'), ((935, 1048), 'ovito.modifiers.ConstructSurfaceModifier', 'ov.modifiers.ConstructSurfaceModifier', ([], {'only_selected': '(True)', 'radius': '(1)', 'smoothing_level': '(8)', 'identify_regions': '(True)'}), '(only_selected=True, radius=1,\n smoothing_level=8, identify_regions=True)\n', (972, 1048), True, 'import ovito as ov\n'), ((1277, 1300), 'numpy.linspace', 'np.linspace', (['(10)', '(16)', '(15)'], {}), '(10, 16, 15)\n', (1288, 1300), True, 'import numpy as np\n'), ((5364, 5378), 'itertools.product', 'product', (['files'], {}), '(files)\n', (5371, 5378), False, 'from itertools import product\n'), ((621, 691), 'ovito.modifiers.SelectTypeModifier', 'ov.modifiers.SelectTypeModifier', ([], {'property': '"""Particle Type"""', 'types': "{'W'}"}), "(property='Particle Type', types={'W'})\n", (652, 691), True, 'import ovito as ov\n'), ((1350, 1373), 'numpy.round', 'np.round', (['i'], {'decimals': '(1)'}), '(i, decimals=1)\n', (1358, 1373), True, 'import numpy as np\n'), ((2450, 2491), 'scipy.optimize.fsolve', 'optimize.fsolve', (['func'], {'x0': '[0]', 'args': 'phi_l'}), '(func, x0=[0], args=phi_l)\n', (2465, 2491), False, 'from scipy import optimize\n'), ((2673, 2698), 'numpy.append', 'np.append', (['calc_area', 'a_0'], {}), '(calc_area, a_0)\n', (2682, 2698), True, 'import numpy as np\n'), ((2724, 2759), 'numpy.append', 'np.append', (['solid_vols', 'solid_volume'], {}), '(solid_vols, solid_volume)\n', (2733, 2759), True, 'import numpy as np\n'), ((2784, 2817), 'numpy.append', 'np.append', (['cell_vols', 'cell_volume'], {}), '(cell_vols, cell_volume)\n', (2793, 2817), True, 'import numpy as np\n'), ((2835, 2862), 'numpy.append', 'np.append', (['sa', '(area / n_lip)'], {}), '(sa, area / n_lip)\n', (2844, 2862), True, 'import numpy as np\n'), ((5534, 5554), 'multiprocessing.get_context', 'get_context', (['"""spawn"""'], {}), "('spawn')\n", (5545, 5554), False, 'from multiprocessing import get_context\n'), ((1982, 2015), 'numpy.count_nonzero', 'np.count_nonzero', (['(tprop == c5a_id)'], {}), '(tprop == c5a_id)\n', (1998, 2015), True, 'import numpy as np\n'), ((2018, 2050), 'numpy.count_nonzero', 'np.count_nonzero', (['(tprop == c2_id)'], {}), '(tprop == c2_id)\n', (2034, 2050), True, 'import numpy as np\n'), ((2105, 2138), 'numpy.count_nonzero', 'np.count_nonzero', (['(tprop == c5a_id)'], {}), '(tprop == c5a_id)\n', (2121, 2138), True, 'import numpy as np\n')]
|
import cv2
import torch
import numpy as np
from torch import nn
from collections import OrderedDict
from torch.nn.functional import one_hot
from utils.box.bbox import bbox_switch, angle_switch, bbox_iou, encode, decode
from utils.box.ext.rotate_overlap_diff.oriented_iou_loss import cal_iou, cal_diou, cal_giou
from utils.box.rbbox import rbbox_batched_nms as nms
from utils.utils import soft_weight
def iou_obb_diff(gts, preds, type='diou'):
gt_bboxes = angle_switch(gts)
pred_bboxes = angle_switch(preds)
if type == 'riou':
iou, *_ = cal_iou(gt_bboxes.unsqueeze(0), pred_bboxes.unsqueeze(0))
linear = False
if linear:
iou_loss = 1 - iou
else:
iou_loss = - iou.clamp(min=1e-6).log()
elif type in ['giou', 'diou']:
riou_func = cal_giou if type == 'giou' else cal_diou
iou_loss, iou = riou_func(gt_bboxes.unsqueeze(0), pred_bboxes.unsqueeze(0))
else:
raise NotImplementedError
return iou, iou_loss
def match(bboxes_xyxy, anchors_xyxy, bboxes, anchors, iou_thresh, process=None, batch=32):
# Reduce GPU memory usage
ious = torch.cat([bbox_iou(bboxes_xyxy[i: i + batch], anchors_xyxy) for i in range(0, bboxes_xyxy.size(0), batch)])
max_ious, bbox_indexes = torch.max(ious, dim=0)
mask_neg = max_ious < iou_thresh[0]
mask_pos = max_ious > iou_thresh[1]
max_gt, argmax_gt = torch.max(ious, dim=1)
if (max_gt <= iou_thresh[1]).any():
mask_pos[argmax_gt[max_gt <= iou_thresh[1]]] = True
mask_neg[argmax_gt[max_gt <= iou_thresh[1]]] = False
pnms_thres = soft_weight(process)
r_anchors = torch.cat([anchors, torch.zeros_like(anchors[:,0]).unsqueeze(1)], -1)
scores = iou_obb_diff(bboxes[bbox_indexes[mask_pos]], r_anchors[mask_pos], type='riou')[0].squeeze(0)
labels = torch.zeros_like(scores)
keeps = nms(r_anchors[mask_pos], scores, labels, pnms_thres)[:500]
mask_keep = mask_pos.nonzero()[keeps]
mask_pos = torch.zeros_like(mask_pos)
mask_pos[mask_keep] = True
iou_balance = True
num_pos = mask_pos.sum().item()
if not iou_balance:
ratio = 1 # neg2pos
num_neg = ratio * num_pos
neg_indices = mask_neg.nonzero().squeeze()
sampled_neg_indices = np.random.choice(neg_indices.cpu(), size=num_neg)
mask_neg.fill_(False)[sampled_neg_indices] = True
else:
ratio_hard = 2 # hard2pos
ratio_bg = 100 # bg2pos
num_hard = ratio_hard * num_pos
num_bg = ratio_bg * num_pos
hard_indices = ((max_ious > 0.1) & (max_ious < iou_thresh[0])).nonzero().squeeze()
bg_indices = (max_ious < 1e-2).nonzero().squeeze()
sampled_hard_indices = np.random.choice(hard_indices.cpu(), size=num_hard)
sampled_bg_indices = np.random.choice(bg_indices.cpu(), size=num_bg)
sampled_neg_indices = np.concatenate([sampled_bg_indices, sampled_hard_indices])
mask_neg.fill_(False)[sampled_neg_indices] = True
return mask_pos, mask_neg, bbox_indexes
def calc_loss(pred_cls, pred_loc, targets, anchors, iou_thresh, variance, balance, process=None):
device = pred_cls.device
num_classes = pred_cls.size(-1)
weight_pos, weight_neg = 2 * balance, 2 * (1 - balance)
anchors_xyxy = bbox_switch(anchors, 'xywh', 'xyxy')
criterion_cls = nn.BCEWithLogitsLoss(reduction='none')
criterion_loc = nn.SmoothL1Loss(reduction='sum')
loss_cls, loss_loc = torch.zeros([2], dtype=torch.float, device=device, requires_grad=True)
num_pos = 0
for i, target in enumerate(targets):
if target:
bboxes = target['bboxes'].to(device)
labels = target['labels'].to(device)
bboxes_xyxy = bbox_switch(bboxes[:, :4], 'xywh', 'xyxy')
pred_box = decode(pred_loc[i], anchors, variance)
mask_pos, mask_neg, bbox_indexes = match(bboxes_xyxy, anchors_xyxy, bboxes, anchors, iou_thresh, process=process)
labels = labels[bbox_indexes]
indexes_pos = bbox_indexes[mask_pos]
bboxes_matched = bboxes[indexes_pos]
anchors_matched = anchors[mask_pos]
bboxes_pred = pred_loc[i][mask_pos] # offsets
gt_bboxes, det_bboxes = encode(bboxes_matched, bboxes_pred, anchors_matched, variance)
labels = one_hot(labels, num_classes=num_classes).float()
labels[mask_neg] = 0
loss_cls_ = criterion_cls(pred_cls[i], labels)
loss_cls = loss_cls + loss_cls_[mask_pos].sum() * weight_pos + loss_cls_[mask_neg].sum() * weight_neg
use_iou = False
if use_iou:
rious, riou_loss = iou_obb_diff(bboxes_matched, pred_box[mask_pos])
loss_loc = loss_loc + riou_loss.sum()
else:
loss_loc = loss_loc + criterion_loc(gt_bboxes, det_bboxes)
num_pos += mask_pos.sum().item()
else:
loss_cls = loss_cls + criterion_cls(pred_cls[i], torch.zeros_like(pred_cls[i])).sum()
num_pos = max(num_pos, 1)
return OrderedDict([('loss_cls', loss_cls / num_pos), ('loss_loc', loss_loc / num_pos)])
|
[
"torch.nn.BCEWithLogitsLoss",
"torch.zeros_like",
"utils.box.bbox.encode",
"utils.box.bbox.angle_switch",
"torch.nn.functional.one_hot",
"utils.utils.soft_weight",
"utils.box.rbbox.rbbox_batched_nms",
"utils.box.bbox.decode",
"torch.max",
"torch.zeros",
"utils.box.bbox.bbox_switch",
"torch.nn.SmoothL1Loss",
"collections.OrderedDict",
"numpy.concatenate",
"utils.box.bbox.bbox_iou"
] |
[((461, 478), 'utils.box.bbox.angle_switch', 'angle_switch', (['gts'], {}), '(gts)\n', (473, 478), False, 'from utils.box.bbox import bbox_switch, angle_switch, bbox_iou, encode, decode\n'), ((497, 516), 'utils.box.bbox.angle_switch', 'angle_switch', (['preds'], {}), '(preds)\n', (509, 516), False, 'from utils.box.bbox import bbox_switch, angle_switch, bbox_iou, encode, decode\n'), ((1276, 1298), 'torch.max', 'torch.max', (['ious'], {'dim': '(0)'}), '(ious, dim=0)\n', (1285, 1298), False, 'import torch\n'), ((1403, 1425), 'torch.max', 'torch.max', (['ious'], {'dim': '(1)'}), '(ious, dim=1)\n', (1412, 1425), False, 'import torch\n'), ((1610, 1630), 'utils.utils.soft_weight', 'soft_weight', (['process'], {}), '(process)\n', (1621, 1630), False, 'from utils.utils import soft_weight\n'), ((1836, 1860), 'torch.zeros_like', 'torch.zeros_like', (['scores'], {}), '(scores)\n', (1852, 1860), False, 'import torch\n'), ((1989, 2015), 'torch.zeros_like', 'torch.zeros_like', (['mask_pos'], {}), '(mask_pos)\n', (2005, 2015), False, 'import torch\n'), ((3288, 3324), 'utils.box.bbox.bbox_switch', 'bbox_switch', (['anchors', '"""xywh"""', '"""xyxy"""'], {}), "(anchors, 'xywh', 'xyxy')\n", (3299, 3324), False, 'from utils.box.bbox import bbox_switch, angle_switch, bbox_iou, encode, decode\n'), ((3346, 3384), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (3366, 3384), False, 'from torch import nn\n'), ((3405, 3437), 'torch.nn.SmoothL1Loss', 'nn.SmoothL1Loss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (3420, 3437), False, 'from torch import nn\n'), ((3463, 3533), 'torch.zeros', 'torch.zeros', (['[2]'], {'dtype': 'torch.float', 'device': 'device', 'requires_grad': '(True)'}), '([2], dtype=torch.float, device=device, requires_grad=True)\n', (3474, 3533), False, 'import torch\n'), ((5070, 5155), 'collections.OrderedDict', 'OrderedDict', (["[('loss_cls', loss_cls / num_pos), ('loss_loc', loss_loc / num_pos)]"], {}), "([('loss_cls', loss_cls / num_pos), ('loss_loc', loss_loc /\n num_pos)])\n", (5081, 5155), False, 'from collections import OrderedDict\n'), ((1873, 1925), 'utils.box.rbbox.rbbox_batched_nms', 'nms', (['r_anchors[mask_pos]', 'scores', 'labels', 'pnms_thres'], {}), '(r_anchors[mask_pos], scores, labels, pnms_thres)\n', (1876, 1925), True, 'from utils.box.rbbox import rbbox_batched_nms as nms\n'), ((2879, 2937), 'numpy.concatenate', 'np.concatenate', (['[sampled_bg_indices, sampled_hard_indices]'], {}), '([sampled_bg_indices, sampled_hard_indices])\n', (2893, 2937), True, 'import numpy as np\n'), ((1149, 1197), 'utils.box.bbox.bbox_iou', 'bbox_iou', (['bboxes_xyxy[i:i + batch]', 'anchors_xyxy'], {}), '(bboxes_xyxy[i:i + batch], anchors_xyxy)\n', (1157, 1197), False, 'from utils.box.bbox import bbox_switch, angle_switch, bbox_iou, encode, decode\n'), ((3734, 3776), 'utils.box.bbox.bbox_switch', 'bbox_switch', (['bboxes[:, :4]', '"""xywh"""', '"""xyxy"""'], {}), "(bboxes[:, :4], 'xywh', 'xyxy')\n", (3745, 3776), False, 'from utils.box.bbox import bbox_switch, angle_switch, bbox_iou, encode, decode\n'), ((3800, 3838), 'utils.box.bbox.decode', 'decode', (['pred_loc[i]', 'anchors', 'variance'], {}), '(pred_loc[i], anchors, variance)\n', (3806, 3838), False, 'from utils.box.bbox import bbox_switch, angle_switch, bbox_iou, encode, decode\n'), ((4249, 4311), 'utils.box.bbox.encode', 'encode', (['bboxes_matched', 'bboxes_pred', 'anchors_matched', 'variance'], {}), '(bboxes_matched, bboxes_pred, anchors_matched, variance)\n', (4255, 4311), False, 'from utils.box.bbox import bbox_switch, angle_switch, bbox_iou, encode, decode\n'), ((1667, 1698), 'torch.zeros_like', 'torch.zeros_like', (['anchors[:, 0]'], {}), '(anchors[:, 0])\n', (1683, 1698), False, 'import torch\n'), ((4334, 4374), 'torch.nn.functional.one_hot', 'one_hot', (['labels'], {'num_classes': 'num_classes'}), '(labels, num_classes=num_classes)\n', (4341, 4374), False, 'from torch.nn.functional import one_hot\n'), ((4992, 5021), 'torch.zeros_like', 'torch.zeros_like', (['pred_cls[i]'], {}), '(pred_cls[i])\n', (5008, 5021), False, 'import torch\n')]
|
import os
import numpy as np
import string
import re
dataDir = '/u/cs401/A3/data/'
# dataDir = './subdata/'
def Levenshtein(r, h):
"""
Calculation of WER with Levenshtein distance.
Works only for iterables up to 254 elements (uint8).
O(nm) time ans space complexity.
Parameters
----------
r : list of strings
h : list of strings
Returns
-------
(WER, nS, nI, nD): (float, int, int, int) WER, number of substitutions, insertions, and deletions respectively
Examples
--------
>>> wer("who is there".split(), "is there".split())
0.333 0 0 1
>>> wer("who is there".split(), "".split())
1.0 0 0 3
>>> wer("".split(), "who is there".split())
Inf 0 3 0
"""
n = len(r)
m = len(h)
R = np.zeros((n + 1, m + 1)) # matrix of distances
B = np.zeros((n + 1, m + 1)) # backtracing matrix
# initialize R
R[:, 0] = np.arange(n + 1)
R[0, :] = np.arange(m + 1)
# initialize backtrace, first row can only go left, first column can only go up
B[1:, 0] = 1
B[0, 1:] = 2
# statr loop
for i in range(1, n + 1):
for j in range(1, m + 1):
dele = R[i - 1, j] + 1
sub = R[i - 1, j - 1] if r[i - 1] == h[j - 1] else R[i - 1, j - 1] + 1
ins = R[i, j - 1] + 1
R[i, j] = min(dele, sub, ins)
if(R[i, j] == dele):
B[i, j] = 1 # up
elif(R[i, j] == ins):
B[i, j] = 2 # left
else:
B[i, j] = 3 # up-left
# get wer
wer = R[n, m] / n
# backtrace to get nS, nI, nD
nS, nI, nD = 0, 0, 0
i, j = n, m
while i != 0 or j != 0:
if(B[i, j] == 1): # up, delete
nD += 1
i -= 1
elif(B[i, j] == 2): # left, insert
nI += 1
j -= 1
else:
# up-left substitute
if(R[i, j] == R[i - 1, j - 1] + 1):
nS += 1
i -= 1
j -= 1
return wer, nS, nI, nD
def preprocess(sent):
puncs = list(string.punctuation)
puncs.remove('[')
puncs.remove(']')
# lowercase and ignore [i] [label]
sent = sent.strip().lower().split()
trans = sent[2:]
trans = ' '.join(trans)
# remove <> and [] contents in transcripts
pattern = re.compile(r"<\w+>")
trans = re.sub(pattern, '', trans)
pattern = re.compile(r"\[\w+\]")
trans = re.sub(pattern, '', trans)
# remove punctuations
for punc in puncs:
trans = trans.replace(punc, '')
return trans.split()
if __name__ == "__main__":
google_wer = []
kaldi_wer = []
# discussion file
with open("asrDiscussion.txt", "w+") as f:
for subdir, dirs, files in os.walk(dataDir):
for speaker in dirs:
# read in transcript files for such speaker
trans_path = os.path.join(dataDir, speaker, 'transcripts.txt')
google_path = os.path.join(dataDir, speaker, 'transcripts.Google.txt')
kaldi_path = os.path.join(dataDir, speaker, 'transcripts.Kaldi.txt')
trans = open(trans_path, 'r').readlines()
google = open(google_path, 'r').readlines()
kaldi = open(kaldi_path, 'r').readlines()
# only process when transcript is nonempty and reference exist
valid = len(trans) != 0 and (len(google) != 0 or len(kaldi) != 0)
if(valid):
lines = min(len(trans), len(google), len(kaldi))
# for each paired lines, we find its wer
for i in range(lines):
curr_trans = preprocess(trans[i])
# calculate result for google
if(len(google) != 0):
curr_google = preprocess(google[i])
g_wer, g_sub, g_ins, g_del = Levenshtein(curr_trans, curr_google)
google_wer.append(g_wer)
g_res = speaker + " Google " + str(i) + " " + str(g_wer) + " S: " + str(g_sub) + " I: " + str(g_ins) + " D: " + str(g_del)
f.write(g_res)
f.write('\n')
print(g_res)
# calculate result for kaldi
if(len(kaldi) != 0):
curr_kaldi = preprocess(kaldi[i])
k_wer, k_sub, k_ins, k_del = Levenshtein(curr_trans, curr_kaldi)
kaldi_wer.append(k_wer)
k_res = speaker + " Kaldi " + str(i) + " " + str(k_wer) + " S: " + str(k_sub) + " I: " + str(k_ins) + " D: " + str(k_del)
f.write(k_res)
f.write('\n')
print(k_res)
f.write('\n')
f.write('\n')
# report summary of result
g_mean, g_std = np.mean(google_wer), np.std(google_wer)
k_mean, k_std = np.mean(kaldi_wer), np.std(kaldi_wer)
g_sum = "Google: mean is " + str(g_mean) + ", std is " + str(g_std)
k_sum = "Kaldi: mean is " + str(k_mean) + ", std is " + str(k_std)
f.write(g_sum)
f.write('\n')
f.write(k_sum)
print(g_sum)
print(k_sum)
f.close()
|
[
"os.path.join",
"numpy.std",
"os.walk",
"numpy.zeros",
"numpy.mean",
"numpy.arange",
"re.sub",
"re.compile"
] |
[((2123, 2147), 'numpy.zeros', 'np.zeros', (['(n + 1, m + 1)'], {}), '((n + 1, m + 1))\n', (2131, 2147), True, 'import numpy as np\n'), ((2178, 2202), 'numpy.zeros', 'np.zeros', (['(n + 1, m + 1)'], {}), '((n + 1, m + 1))\n', (2186, 2202), True, 'import numpy as np\n'), ((2258, 2274), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (2267, 2274), True, 'import numpy as np\n'), ((2289, 2305), 'numpy.arange', 'np.arange', (['(m + 1)'], {}), '(m + 1)\n', (2298, 2305), True, 'import numpy as np\n'), ((3691, 3711), 're.compile', 're.compile', (['"""<\\\\w+>"""'], {}), "('<\\\\w+>')\n", (3701, 3711), False, 'import re\n'), ((3724, 3750), 're.sub', 're.sub', (['pattern', '""""""', 'trans'], {}), "(pattern, '', trans)\n", (3730, 3750), False, 'import re\n'), ((3765, 3789), 're.compile', 're.compile', (['"""\\\\[\\\\w+\\\\]"""'], {}), "('\\\\[\\\\w+\\\\]')\n", (3775, 3789), False, 'import re\n'), ((3800, 3826), 're.sub', 're.sub', (['pattern', '""""""', 'trans'], {}), "(pattern, '', trans)\n", (3806, 3826), False, 'import re\n'), ((4121, 4137), 'os.walk', 'os.walk', (['dataDir'], {}), '(dataDir)\n', (4128, 4137), False, 'import os\n'), ((6386, 6405), 'numpy.mean', 'np.mean', (['google_wer'], {}), '(google_wer)\n', (6393, 6405), True, 'import numpy as np\n'), ((6407, 6425), 'numpy.std', 'np.std', (['google_wer'], {}), '(google_wer)\n', (6413, 6425), True, 'import numpy as np\n'), ((6450, 6468), 'numpy.mean', 'np.mean', (['kaldi_wer'], {}), '(kaldi_wer)\n', (6457, 6468), True, 'import numpy as np\n'), ((6470, 6487), 'numpy.std', 'np.std', (['kaldi_wer'], {}), '(kaldi_wer)\n', (6476, 6487), True, 'import numpy as np\n'), ((4261, 4310), 'os.path.join', 'os.path.join', (['dataDir', 'speaker', '"""transcripts.txt"""'], {}), "(dataDir, speaker, 'transcripts.txt')\n", (4273, 4310), False, 'import os\n'), ((4341, 4397), 'os.path.join', 'os.path.join', (['dataDir', 'speaker', '"""transcripts.Google.txt"""'], {}), "(dataDir, speaker, 'transcripts.Google.txt')\n", (4353, 4397), False, 'import os\n'), ((4427, 4482), 'os.path.join', 'os.path.join', (['dataDir', 'speaker', '"""transcripts.Kaldi.txt"""'], {}), "(dataDir, speaker, 'transcripts.Kaldi.txt')\n", (4439, 4482), False, 'import os\n')]
|
import os
import sys
import time
from glob import glob
import numpy as np
import pandas as pd
import pytest
from PartSegCore.algorithm_describe_base import SegmentationProfile
from PartSegCore.analysis.batch_processing import batch_backend
from PartSegCore.analysis.batch_processing.batch_backend import CalculationManager, CalculationProcess
from PartSegCore.analysis.calculation_plan import (
Calculation,
CalculationPlan,
CalculationTree,
FileCalculation,
MaskCreate,
MaskSuffix,
MeasurementCalculate,
RootType,
)
from PartSegCore.analysis.measurement_base import AreaType, Leaf, MeasurementEntry, Node, PerComponent
from PartSegCore.analysis.measurement_calculation import MeasurementProfile
from PartSegCore.image_operations import RadiusType
from PartSegCore.mask_create import MaskProperty
from PartSegCore.segmentation.noise_filtering import DimensionType
from PartSegCore.universal_const import UNIT_SCALE, Units
from PartSegImage import Image, ImageWriter, TiffImageReader
class MocksCalculation:
def __init__(self, file_path):
self.file_path = file_path
@pytest.fixture
def create_test_data(tmpdir):
# for future use
spacing = tuple([x / UNIT_SCALE[Units.nm.value] for x in (210, 70, 70)])
res = []
for i in range(8):
mask_data = np.zeros((10, 20, 20 + i), dtype=np.uint8)
mask_data[1:-1, 2:-2, 2:-2] = 1
data = np.zeros(mask_data.shape + (2,), dtype=np.uint16)
data[1:-1, 2:-2, 2:-2] = 15000
data[2:-2, 3:-3, 3:7] = 33000
data[2:-2, 3:-3, -7:-3] = 33000
image = Image(data, spacing, "", mask=mask_data, axes_order="ZYXC")
ImageWriter.save(image, os.path.join(str(tmpdir), f"file_{i}.tif"))
res.append(os.path.join(str(tmpdir), f"file_{i}.tif"))
ImageWriter.save_mask(image, os.path.join(str(tmpdir), f"file_{i}_mask.tif"))
return res
# TODO add check of per component measurements
# noinspection DuplicatedCode
class TestCalculationProcess:
@staticmethod
def create_calculation_plan():
parameters = {
"channel": 1,
"minimum_size": 200,
"threshold": {
"name": "Base/Core",
"values": {
"core_threshold": {"name": "Manual", "values": {"threshold": 30000}},
"base_threshold": {"name": "Manual", "values": {"threshold": 13000}},
},
},
"noise_filtering": {"name": "Gauss", "values": {"dimension_type": DimensionType.Layer, "radius": 1.0}},
"side_connection": False,
"sprawl_type": {"name": "Euclidean", "values": {}},
}
segmentation = SegmentationProfile(name="test", algorithm="Lower threshold with watershed", values=parameters)
mask_suffix = MaskSuffix(name="", suffix="_mask")
chosen_fields = [
MeasurementEntry(
name="Segmentation Volume",
calculation_tree=Leaf(name="Volume", area=AreaType.ROI, per_component=PerComponent.No),
),
MeasurementEntry(
name="Segmentation Volume/Mask Volume",
calculation_tree=Node(
left=Leaf(name="Volume", area=AreaType.ROI, per_component=PerComponent.No),
op="/",
right=Leaf(name="Volume", area=AreaType.Mask, per_component=PerComponent.No),
),
),
MeasurementEntry(
"Segmentation Components Number",
calculation_tree=Leaf("Components number", area=AreaType.ROI, per_component=PerComponent.No),
),
]
statistic = MeasurementProfile(name="base_measure", chosen_fields=chosen_fields, name_prefix="")
statistic_calculate = MeasurementCalculate(
channel=0, units=Units.µm, statistic_profile=statistic, name_prefix=""
)
tree = CalculationTree(
RootType.Image,
[CalculationTree(mask_suffix, [CalculationTree(segmentation, [CalculationTree(statistic_calculate, [])])])],
)
return CalculationPlan(tree=tree, name="test")
@staticmethod
def create_calculation_plan2():
parameters = {
"channel": 0,
"minimum_size": 200,
"threshold": {
"name": "Base/Core",
"values": {
"core_threshold": {"name": "Manual", "values": {"threshold": 30000}},
"base_threshold": {"name": "Manual", "values": {"threshold": 13000}},
},
},
"noise_filtering": {"name": "Gauss", "values": {"dimension_type": DimensionType.Layer, "radius": 1.0}},
"side_connection": False,
"sprawl_type": {"name": "Euclidean", "values": {}},
}
segmentation = SegmentationProfile(name="test", algorithm="Lower threshold with watershed", values=parameters)
chosen_fields = [
MeasurementEntry(
name="Segmentation Volume",
calculation_tree=Leaf(name="Volume", area=AreaType.ROI, per_component=PerComponent.No),
),
MeasurementEntry(
name="Segmentation Volume/Mask Volume",
calculation_tree=Node(
left=Leaf(name="Volume", area=AreaType.ROI, per_component=PerComponent.No),
op="/",
right=Leaf(name="Volume", area=AreaType.Mask, per_component=PerComponent.No),
),
),
MeasurementEntry(
"Segmentation Components Number",
calculation_tree=Leaf("Components number", area=AreaType.ROI, per_component=PerComponent.No),
),
]
statistic = MeasurementProfile(name="base_measure", chosen_fields=chosen_fields, name_prefix="")
statistic_calculate = MeasurementCalculate(
channel=0, units=Units.µm, statistic_profile=statistic, name_prefix=""
)
tree = CalculationTree(
RootType.Mask_project, [CalculationTree(segmentation, [CalculationTree(statistic_calculate, [])])]
)
return CalculationPlan(tree=tree, name="test2")
@staticmethod
def create_calculation_plan3():
parameters = {
"channel": 1,
"minimum_size": 200,
"threshold": {
"name": "Base/Core",
"values": {
"core_threshold": {"name": "Manual", "values": {"threshold": 30000}},
"base_threshold": {"name": "Manual", "values": {"threshold": 13000}},
},
},
"noise_filtering": {"name": "Gauss", "values": {"dimension_type": DimensionType.Layer, "radius": 1.0}},
"side_connection": False,
"sprawl_type": {"name": "Euclidean", "values": {}},
}
segmentation = SegmentationProfile(name="test", algorithm="Lower threshold with watershed", values=parameters)
mask_suffix = MaskSuffix(name="", suffix="_mask")
chosen_fields = [
MeasurementEntry(
name="Segmentation Volume",
calculation_tree=Leaf(name="Volume", area=AreaType.ROI, per_component=PerComponent.No),
),
MeasurementEntry(
name="Segmentation Volume/Mask Volume",
calculation_tree=Node(
left=Leaf(name="Volume", area=AreaType.ROI, per_component=PerComponent.No),
op="/",
right=Leaf(name="Volume", area=AreaType.Mask, per_component=PerComponent.No),
),
),
MeasurementEntry(
"Segmentation Components Number",
calculation_tree=Leaf("Components number", area=AreaType.ROI, per_component=PerComponent.No),
),
MeasurementEntry(
"Segmentation Volume per component",
calculation_tree=Leaf("Volume", area=AreaType.ROI, per_component=PerComponent.Yes),
),
]
statistic = MeasurementProfile(name="base_measure", chosen_fields=chosen_fields, name_prefix="")
statistic_calculate = MeasurementCalculate(
channel=0, units=Units.µm, statistic_profile=statistic, name_prefix=""
)
mask_create = MaskCreate("", MaskProperty(RadiusType.NO, 0, RadiusType.NO, 0, True, False, False))
parameters2 = {
"channel": 1,
"minimum_size": 200,
"threshold": {"name": "Manual", "values": {"threshold": 30000}},
"noise_filtering": {"name": "Gauss", "values": {"dimension_type": DimensionType.Layer, "radius": 1.0}},
"side_connection": False,
}
segmentation2 = SegmentationProfile(name="test", algorithm="Lower threshold", values=parameters2)
chosen_fields = [
MeasurementEntry(
name="Segmentation Volume",
calculation_tree=Leaf(name="Volume", area=AreaType.ROI, per_component=PerComponent.No),
),
MeasurementEntry(
name="Segmentation Volume/Mask Volume",
calculation_tree=Node(
left=Leaf(name="Volume", area=AreaType.ROI, per_component=PerComponent.No),
op="/",
right=Leaf(name="Volume", area=AreaType.Mask, per_component=PerComponent.No),
),
),
MeasurementEntry(
"Segmentation Components Number",
calculation_tree=Leaf("Components number", area=AreaType.ROI, per_component=PerComponent.No),
),
MeasurementEntry(
"Mask Volume per component",
calculation_tree=Leaf("Volume", area=AreaType.Mask, per_component=PerComponent.Yes),
),
]
statistic = MeasurementProfile(name="base_measure2", chosen_fields=chosen_fields[:], name_prefix="aa_")
statistic_calculate2 = MeasurementCalculate(
channel=0, units=Units.µm, statistic_profile=statistic, name_prefix=""
)
chosen_fields.append(
MeasurementEntry(
"Segmentation Volume per component",
calculation_tree=Leaf("Volume", area=AreaType.ROI, per_component=PerComponent.Yes),
)
)
statistic = MeasurementProfile(name="base_measure3", chosen_fields=chosen_fields[:], name_prefix="bb_")
statistic_calculate3 = MeasurementCalculate(
channel=0, units=Units.µm, statistic_profile=statistic, name_prefix=""
)
tree = CalculationTree(
RootType.Image,
[
CalculationTree(
mask_suffix,
[
CalculationTree(
segmentation,
[
CalculationTree(statistic_calculate, []),
CalculationTree(
mask_create,
[
CalculationTree(
segmentation2,
[
CalculationTree(statistic_calculate2, []),
CalculationTree(statistic_calculate3, []),
],
),
],
),
],
)
],
)
],
)
return CalculationPlan(tree=tree, name="test")
def test_one_file(self, data_test_dir):
plan = self.create_calculation_plan()
process = CalculationProcess()
file_path = os.path.join(data_test_dir, "stack1_components", "stack1_component5.tif")
calc = MocksCalculation(file_path)
process.calculation = calc
process.image = TiffImageReader.read_image(file_path)
process.iterate_over(plan.execution_tree)
assert len(process.measurement[0]) == 3
@pytest.mark.filterwarnings("ignore:This method will be removed")
def test_full_pipeline(self, tmpdir, data_test_dir, monkeypatch):
monkeypatch.setattr(batch_backend, "CalculationProcess", MockCalculationProcess)
plan = self.create_calculation_plan()
file_pattern = os.path.join(data_test_dir, "stack1_components", "stack1_component*[0-9].tif")
file_paths = sorted(glob(file_pattern))
assert os.path.basename(file_paths[0]) == "stack1_component1.tif"
calc = Calculation(
file_paths,
base_prefix=data_test_dir,
result_prefix=data_test_dir,
measurement_file_path=os.path.join(tmpdir, "test.xlsx"),
sheet_name="Sheet1",
calculation_plan=plan,
voxel_size=(1, 1, 1),
)
manager = CalculationManager()
manager.set_number_of_workers(3)
manager.add_calculation(calc)
while manager.has_work:
time.sleep(0.1)
manager.get_results()
manager.writer.finish()
if sys.platform == "darwin":
time.sleep(2)
else:
time.sleep(0.4)
assert os.path.exists(os.path.join(tmpdir, "test.xlsx"))
df = pd.read_excel(os.path.join(tmpdir, "test.xlsx"), index_col=0, header=[0, 1])
assert df.shape == (8, 4)
for i in range(8):
assert os.path.basename(df.name.units[i]) == f"stack1_component{i+1}.tif"
@pytest.mark.filterwarnings("ignore:This method will be removed")
def test_full_pipeline_mask_project(self, tmpdir, data_test_dir):
plan = self.create_calculation_plan2()
file_pattern = os.path.join(data_test_dir, "*nucleus.seg")
file_paths = glob(file_pattern)
calc = Calculation(
file_paths,
base_prefix=data_test_dir,
result_prefix=data_test_dir,
measurement_file_path=os.path.join(tmpdir, "test2.xlsx"),
sheet_name="Sheet1",
calculation_plan=plan,
voxel_size=(1, 1, 1),
)
manager = CalculationManager()
manager.set_number_of_workers(2)
manager.add_calculation(calc)
while manager.has_work:
time.sleep(0.1)
manager.get_results()
if sys.platform == "darwin":
time.sleep(2)
else:
time.sleep(0.4)
manager.writer.finish()
assert os.path.exists(os.path.join(tmpdir, "test2.xlsx"))
df = pd.read_excel(os.path.join(tmpdir, "test2.xlsx"), index_col=0, header=[0, 1])
assert df.shape == (2, 4)
@pytest.mark.filterwarnings("ignore:This method will be removed")
def test_full_pipeline_component_split(self, tmpdir, data_test_dir):
plan = self.create_calculation_plan3()
file_pattern = os.path.join(data_test_dir, "stack1_components", "stack1_component*[0-9].tif")
file_paths = glob(file_pattern)
calc = Calculation(
file_paths,
base_prefix=data_test_dir,
result_prefix=data_test_dir,
measurement_file_path=os.path.join(tmpdir, "test3.xlsx"),
sheet_name="Sheet1",
calculation_plan=plan,
voxel_size=(1, 1, 1),
)
manager = CalculationManager()
manager.set_number_of_workers(2)
manager.add_calculation(calc)
while manager.has_work:
time.sleep(0.1)
res = manager.get_results()
if res.errors:
print(res.errors, file=sys.stderr)
if sys.platform == "darwin":
time.sleep(2)
else:
time.sleep(0.4)
manager.writer.finish()
assert os.path.exists(os.path.join(tmpdir, "test3.xlsx"))
df = pd.read_excel(os.path.join(tmpdir, "test3.xlsx"), index_col=0, header=[0, 1])
assert df.shape == (8, 10)
df2 = pd.read_excel(os.path.join(tmpdir, "test3.xlsx"), sheet_name=1, index_col=0, header=[0, 1])
assert df2.shape[0] > 8
assert df2.shape == (df["Segmentation Components Number"]["count"].sum(), 6)
df3 = pd.read_excel(os.path.join(tmpdir, "test3.xlsx"), sheet_name=2, index_col=0, header=[0, 1])
assert df3.shape == (df["Segmentation Components Number"]["count"].sum(), 6)
df4 = pd.read_excel(os.path.join(tmpdir, "test3.xlsx"), sheet_name=3, index_col=0, header=[0, 1])
assert df4.shape == (df["Segmentation Components Number"]["count"].sum(), 8)
class MockCalculationProcess(CalculationProcess):
def do_calculation(self, calculation: FileCalculation):
if os.path.basename(calculation.file_path) == "stack1_component1.tif":
time.sleep(0.5)
return super().do_calculation(calculation)
|
[
"PartSegCore.analysis.calculation_plan.MaskSuffix",
"PartSegCore.analysis.calculation_plan.CalculationPlan",
"os.path.basename",
"PartSegCore.analysis.calculation_plan.CalculationTree",
"PartSegImage.TiffImageReader.read_image",
"PartSegImage.Image",
"numpy.zeros",
"PartSegCore.analysis.measurement_calculation.MeasurementProfile",
"PartSegCore.analysis.batch_processing.batch_backend.CalculationManager",
"PartSegCore.mask_create.MaskProperty",
"time.sleep",
"PartSegCore.analysis.measurement_base.Leaf",
"PartSegCore.analysis.calculation_plan.MeasurementCalculate",
"glob.glob",
"pytest.mark.filterwarnings",
"PartSegCore.analysis.batch_processing.batch_backend.CalculationProcess",
"os.path.join",
"PartSegCore.algorithm_describe_base.SegmentationProfile"
] |
[((12277, 12341), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:This method will be removed"""'], {}), "('ignore:This method will be removed')\n", (12303, 12341), False, 'import pytest\n'), ((13743, 13807), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:This method will be removed"""'], {}), "('ignore:This method will be removed')\n", (13769, 13807), False, 'import pytest\n'), ((14894, 14958), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:This method will be removed"""'], {}), "('ignore:This method will be removed')\n", (14920, 14958), False, 'import pytest\n'), ((1315, 1357), 'numpy.zeros', 'np.zeros', (['(10, 20, 20 + i)'], {'dtype': 'np.uint8'}), '((10, 20, 20 + i), dtype=np.uint8)\n', (1323, 1357), True, 'import numpy as np\n'), ((1413, 1462), 'numpy.zeros', 'np.zeros', (['(mask_data.shape + (2,))'], {'dtype': 'np.uint16'}), '(mask_data.shape + (2,), dtype=np.uint16)\n', (1421, 1462), True, 'import numpy as np\n'), ((1596, 1655), 'PartSegImage.Image', 'Image', (['data', 'spacing', '""""""'], {'mask': 'mask_data', 'axes_order': '"""ZYXC"""'}), "(data, spacing, '', mask=mask_data, axes_order='ZYXC')\n", (1601, 1655), False, 'from PartSegImage import Image, ImageWriter, TiffImageReader\n'), ((2699, 2798), 'PartSegCore.algorithm_describe_base.SegmentationProfile', 'SegmentationProfile', ([], {'name': '"""test"""', 'algorithm': '"""Lower threshold with watershed"""', 'values': 'parameters'}), "(name='test', algorithm='Lower threshold with watershed',\n values=parameters)\n", (2718, 2798), False, 'from PartSegCore.algorithm_describe_base import SegmentationProfile\n'), ((2817, 2852), 'PartSegCore.analysis.calculation_plan.MaskSuffix', 'MaskSuffix', ([], {'name': '""""""', 'suffix': '"""_mask"""'}), "(name='', suffix='_mask')\n", (2827, 2852), False, 'from PartSegCore.analysis.calculation_plan import Calculation, CalculationPlan, CalculationTree, FileCalculation, MaskCreate, MaskSuffix, MeasurementCalculate, RootType\n'), ((3688, 3776), 'PartSegCore.analysis.measurement_calculation.MeasurementProfile', 'MeasurementProfile', ([], {'name': '"""base_measure"""', 'chosen_fields': 'chosen_fields', 'name_prefix': '""""""'}), "(name='base_measure', chosen_fields=chosen_fields,\n name_prefix='')\n", (3706, 3776), False, 'from PartSegCore.analysis.measurement_calculation import MeasurementProfile\n'), ((3803, 3899), 'PartSegCore.analysis.calculation_plan.MeasurementCalculate', 'MeasurementCalculate', ([], {'channel': '(0)', 'units': 'Units.μm', 'statistic_profile': 'statistic', 'name_prefix': '""""""'}), "(channel=0, units=Units.μm, statistic_profile=statistic,\n name_prefix='')\n", (3823, 3899), False, 'from PartSegCore.analysis.calculation_plan import Calculation, CalculationPlan, CalculationTree, FileCalculation, MaskCreate, MaskSuffix, MeasurementCalculate, RootType\n'), ((4124, 4163), 'PartSegCore.analysis.calculation_plan.CalculationPlan', 'CalculationPlan', ([], {'tree': 'tree', 'name': '"""test"""'}), "(tree=tree, name='test')\n", (4139, 4163), False, 'from PartSegCore.analysis.calculation_plan import Calculation, CalculationPlan, CalculationTree, FileCalculation, MaskCreate, MaskSuffix, MeasurementCalculate, RootType\n'), ((4859, 4958), 'PartSegCore.algorithm_describe_base.SegmentationProfile', 'SegmentationProfile', ([], {'name': '"""test"""', 'algorithm': '"""Lower threshold with watershed"""', 'values': 'parameters'}), "(name='test', algorithm='Lower threshold with watershed',\n values=parameters)\n", (4878, 4958), False, 'from PartSegCore.algorithm_describe_base import SegmentationProfile\n'), ((5790, 5878), 'PartSegCore.analysis.measurement_calculation.MeasurementProfile', 'MeasurementProfile', ([], {'name': '"""base_measure"""', 'chosen_fields': 'chosen_fields', 'name_prefix': '""""""'}), "(name='base_measure', chosen_fields=chosen_fields,\n name_prefix='')\n", (5808, 5878), False, 'from PartSegCore.analysis.measurement_calculation import MeasurementProfile\n'), ((5905, 6001), 'PartSegCore.analysis.calculation_plan.MeasurementCalculate', 'MeasurementCalculate', ([], {'channel': '(0)', 'units': 'Units.μm', 'statistic_profile': 'statistic', 'name_prefix': '""""""'}), "(channel=0, units=Units.μm, statistic_profile=statistic,\n name_prefix='')\n", (5925, 6001), False, 'from PartSegCore.analysis.calculation_plan import Calculation, CalculationPlan, CalculationTree, FileCalculation, MaskCreate, MaskSuffix, MeasurementCalculate, RootType\n'), ((6188, 6228), 'PartSegCore.analysis.calculation_plan.CalculationPlan', 'CalculationPlan', ([], {'tree': 'tree', 'name': '"""test2"""'}), "(tree=tree, name='test2')\n", (6203, 6228), False, 'from PartSegCore.analysis.calculation_plan import Calculation, CalculationPlan, CalculationTree, FileCalculation, MaskCreate, MaskSuffix, MeasurementCalculate, RootType\n'), ((6924, 7023), 'PartSegCore.algorithm_describe_base.SegmentationProfile', 'SegmentationProfile', ([], {'name': '"""test"""', 'algorithm': '"""Lower threshold with watershed"""', 'values': 'parameters'}), "(name='test', algorithm='Lower threshold with watershed',\n values=parameters)\n", (6943, 7023), False, 'from PartSegCore.algorithm_describe_base import SegmentationProfile\n'), ((7042, 7077), 'PartSegCore.analysis.calculation_plan.MaskSuffix', 'MaskSuffix', ([], {'name': '""""""', 'suffix': '"""_mask"""'}), "(name='', suffix='_mask')\n", (7052, 7077), False, 'from PartSegCore.analysis.calculation_plan import Calculation, CalculationPlan, CalculationTree, FileCalculation, MaskCreate, MaskSuffix, MeasurementCalculate, RootType\n'), ((8111, 8199), 'PartSegCore.analysis.measurement_calculation.MeasurementProfile', 'MeasurementProfile', ([], {'name': '"""base_measure"""', 'chosen_fields': 'chosen_fields', 'name_prefix': '""""""'}), "(name='base_measure', chosen_fields=chosen_fields,\n name_prefix='')\n", (8129, 8199), False, 'from PartSegCore.analysis.measurement_calculation import MeasurementProfile\n'), ((8226, 8322), 'PartSegCore.analysis.calculation_plan.MeasurementCalculate', 'MeasurementCalculate', ([], {'channel': '(0)', 'units': 'Units.μm', 'statistic_profile': 'statistic', 'name_prefix': '""""""'}), "(channel=0, units=Units.μm, statistic_profile=statistic,\n name_prefix='')\n", (8246, 8322), False, 'from PartSegCore.analysis.calculation_plan import Calculation, CalculationPlan, CalculationTree, FileCalculation, MaskCreate, MaskSuffix, MeasurementCalculate, RootType\n'), ((8797, 8883), 'PartSegCore.algorithm_describe_base.SegmentationProfile', 'SegmentationProfile', ([], {'name': '"""test"""', 'algorithm': '"""Lower threshold"""', 'values': 'parameters2'}), "(name='test', algorithm='Lower threshold', values=\n parameters2)\n", (8816, 8883), False, 'from PartSegCore.algorithm_describe_base import SegmentationProfile\n'), ((9905, 10000), 'PartSegCore.analysis.measurement_calculation.MeasurementProfile', 'MeasurementProfile', ([], {'name': '"""base_measure2"""', 'chosen_fields': 'chosen_fields[:]', 'name_prefix': '"""aa_"""'}), "(name='base_measure2', chosen_fields=chosen_fields[:],\n name_prefix='aa_')\n", (9923, 10000), False, 'from PartSegCore.analysis.measurement_calculation import MeasurementProfile\n'), ((10028, 10124), 'PartSegCore.analysis.calculation_plan.MeasurementCalculate', 'MeasurementCalculate', ([], {'channel': '(0)', 'units': 'Units.μm', 'statistic_profile': 'statistic', 'name_prefix': '""""""'}), "(channel=0, units=Units.μm, statistic_profile=statistic,\n name_prefix='')\n", (10048, 10124), False, 'from PartSegCore.analysis.calculation_plan import Calculation, CalculationPlan, CalculationTree, FileCalculation, MaskCreate, MaskSuffix, MeasurementCalculate, RootType\n'), ((10400, 10495), 'PartSegCore.analysis.measurement_calculation.MeasurementProfile', 'MeasurementProfile', ([], {'name': '"""base_measure3"""', 'chosen_fields': 'chosen_fields[:]', 'name_prefix': '"""bb_"""'}), "(name='base_measure3', chosen_fields=chosen_fields[:],\n name_prefix='bb_')\n", (10418, 10495), False, 'from PartSegCore.analysis.measurement_calculation import MeasurementProfile\n'), ((10523, 10619), 'PartSegCore.analysis.calculation_plan.MeasurementCalculate', 'MeasurementCalculate', ([], {'channel': '(0)', 'units': 'Units.μm', 'statistic_profile': 'statistic', 'name_prefix': '""""""'}), "(channel=0, units=Units.μm, statistic_profile=statistic,\n name_prefix='')\n", (10543, 10619), False, 'from PartSegCore.analysis.calculation_plan import Calculation, CalculationPlan, CalculationTree, FileCalculation, MaskCreate, MaskSuffix, MeasurementCalculate, RootType\n'), ((11769, 11808), 'PartSegCore.analysis.calculation_plan.CalculationPlan', 'CalculationPlan', ([], {'tree': 'tree', 'name': '"""test"""'}), "(tree=tree, name='test')\n", (11784, 11808), False, 'from PartSegCore.analysis.calculation_plan import Calculation, CalculationPlan, CalculationTree, FileCalculation, MaskCreate, MaskSuffix, MeasurementCalculate, RootType\n'), ((11918, 11938), 'PartSegCore.analysis.batch_processing.batch_backend.CalculationProcess', 'CalculationProcess', ([], {}), '()\n', (11936, 11938), False, 'from PartSegCore.analysis.batch_processing.batch_backend import CalculationManager, CalculationProcess\n'), ((11959, 12032), 'os.path.join', 'os.path.join', (['data_test_dir', '"""stack1_components"""', '"""stack1_component5.tif"""'], {}), "(data_test_dir, 'stack1_components', 'stack1_component5.tif')\n", (11971, 12032), False, 'import os\n'), ((12135, 12172), 'PartSegImage.TiffImageReader.read_image', 'TiffImageReader.read_image', (['file_path'], {}), '(file_path)\n', (12161, 12172), False, 'from PartSegImage import Image, ImageWriter, TiffImageReader\n'), ((12570, 12648), 'os.path.join', 'os.path.join', (['data_test_dir', '"""stack1_components"""', '"""stack1_component*[0-9].tif"""'], {}), "(data_test_dir, 'stack1_components', 'stack1_component*[0-9].tif')\n", (12582, 12648), False, 'import os\n'), ((13103, 13123), 'PartSegCore.analysis.batch_processing.batch_backend.CalculationManager', 'CalculationManager', ([], {}), '()\n', (13121, 13123), False, 'from PartSegCore.analysis.batch_processing.batch_backend import CalculationManager, CalculationProcess\n'), ((13948, 13991), 'os.path.join', 'os.path.join', (['data_test_dir', '"""*nucleus.seg"""'], {}), "(data_test_dir, '*nucleus.seg')\n", (13960, 13991), False, 'import os\n'), ((14013, 14031), 'glob.glob', 'glob', (['file_pattern'], {}), '(file_pattern)\n', (14017, 14031), False, 'from glob import glob\n'), ((14365, 14385), 'PartSegCore.analysis.batch_processing.batch_backend.CalculationManager', 'CalculationManager', ([], {}), '()\n', (14383, 14385), False, 'from PartSegCore.analysis.batch_processing.batch_backend import CalculationManager, CalculationProcess\n'), ((15102, 15180), 'os.path.join', 'os.path.join', (['data_test_dir', '"""stack1_components"""', '"""stack1_component*[0-9].tif"""'], {}), "(data_test_dir, 'stack1_components', 'stack1_component*[0-9].tif')\n", (15114, 15180), False, 'import os\n'), ((15202, 15220), 'glob.glob', 'glob', (['file_pattern'], {}), '(file_pattern)\n', (15206, 15220), False, 'from glob import glob\n'), ((15554, 15574), 'PartSegCore.analysis.batch_processing.batch_backend.CalculationManager', 'CalculationManager', ([], {}), '()\n', (15572, 15574), False, 'from PartSegCore.analysis.batch_processing.batch_backend import CalculationManager, CalculationProcess\n'), ((8378, 8446), 'PartSegCore.mask_create.MaskProperty', 'MaskProperty', (['RadiusType.NO', '(0)', 'RadiusType.NO', '(0)', '(True)', '(False)', '(False)'], {}), '(RadiusType.NO, 0, RadiusType.NO, 0, True, False, False)\n', (8390, 8446), False, 'from PartSegCore.mask_create import MaskProperty\n'), ((12677, 12695), 'glob.glob', 'glob', (['file_pattern'], {}), '(file_pattern)\n', (12681, 12695), False, 'from glob import glob\n'), ((12712, 12743), 'os.path.basename', 'os.path.basename', (['file_paths[0]'], {}), '(file_paths[0])\n', (12728, 12743), False, 'import os\n'), ((13248, 13263), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (13258, 13263), False, 'import time\n'), ((13379, 13392), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (13389, 13392), False, 'import time\n'), ((13419, 13434), 'time.sleep', 'time.sleep', (['(0.4)'], {}), '(0.4)\n', (13429, 13434), False, 'import time\n'), ((13465, 13498), 'os.path.join', 'os.path.join', (['tmpdir', '"""test.xlsx"""'], {}), "(tmpdir, 'test.xlsx')\n", (13477, 13498), False, 'import os\n'), ((13527, 13560), 'os.path.join', 'os.path.join', (['tmpdir', '"""test.xlsx"""'], {}), "(tmpdir, 'test.xlsx')\n", (13539, 13560), False, 'import os\n'), ((14510, 14525), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (14520, 14525), False, 'import time\n'), ((14609, 14622), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (14619, 14622), False, 'import time\n'), ((14649, 14664), 'time.sleep', 'time.sleep', (['(0.4)'], {}), '(0.4)\n', (14659, 14664), False, 'import time\n'), ((14727, 14761), 'os.path.join', 'os.path.join', (['tmpdir', '"""test2.xlsx"""'], {}), "(tmpdir, 'test2.xlsx')\n", (14739, 14761), False, 'import os\n'), ((14790, 14824), 'os.path.join', 'os.path.join', (['tmpdir', '"""test2.xlsx"""'], {}), "(tmpdir, 'test2.xlsx')\n", (14802, 14824), False, 'import os\n'), ((15699, 15714), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (15709, 15714), False, 'import time\n'), ((15882, 15895), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (15892, 15895), False, 'import time\n'), ((15922, 15937), 'time.sleep', 'time.sleep', (['(0.4)'], {}), '(0.4)\n', (15932, 15937), False, 'import time\n'), ((16000, 16034), 'os.path.join', 'os.path.join', (['tmpdir', '"""test3.xlsx"""'], {}), "(tmpdir, 'test3.xlsx')\n", (16012, 16034), False, 'import os\n'), ((16063, 16097), 'os.path.join', 'os.path.join', (['tmpdir', '"""test3.xlsx"""'], {}), "(tmpdir, 'test3.xlsx')\n", (16075, 16097), False, 'import os\n'), ((16190, 16224), 'os.path.join', 'os.path.join', (['tmpdir', '"""test3.xlsx"""'], {}), "(tmpdir, 'test3.xlsx')\n", (16202, 16224), False, 'import os\n'), ((16413, 16447), 'os.path.join', 'os.path.join', (['tmpdir', '"""test3.xlsx"""'], {}), "(tmpdir, 'test3.xlsx')\n", (16425, 16447), False, 'import os\n'), ((16604, 16638), 'os.path.join', 'os.path.join', (['tmpdir', '"""test3.xlsx"""'], {}), "(tmpdir, 'test3.xlsx')\n", (16616, 16638), False, 'import os\n'), ((16890, 16929), 'os.path.basename', 'os.path.basename', (['calculation.file_path'], {}), '(calculation.file_path)\n', (16906, 16929), False, 'import os\n'), ((16970, 16985), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (16980, 16985), False, 'import time\n'), ((12937, 12970), 'os.path.join', 'os.path.join', (['tmpdir', '"""test.xlsx"""'], {}), "(tmpdir, 'test.xlsx')\n", (12949, 12970), False, 'import os\n'), ((13670, 13704), 'os.path.basename', 'os.path.basename', (['df.name.units[i]'], {}), '(df.name.units[i])\n', (13686, 13704), False, 'import os\n'), ((14198, 14232), 'os.path.join', 'os.path.join', (['tmpdir', '"""test2.xlsx"""'], {}), "(tmpdir, 'test2.xlsx')\n", (14210, 14232), False, 'import os\n'), ((15387, 15421), 'os.path.join', 'os.path.join', (['tmpdir', '"""test3.xlsx"""'], {}), "(tmpdir, 'test3.xlsx')\n", (15399, 15421), False, 'import os\n'), ((2986, 3055), 'PartSegCore.analysis.measurement_base.Leaf', 'Leaf', ([], {'name': '"""Volume"""', 'area': 'AreaType.ROI', 'per_component': 'PerComponent.No'}), "(name='Volume', area=AreaType.ROI, per_component=PerComponent.No)\n", (2990, 3055), False, 'from PartSegCore.analysis.measurement_base import AreaType, Leaf, MeasurementEntry, Node, PerComponent\n'), ((3566, 3641), 'PartSegCore.analysis.measurement_base.Leaf', 'Leaf', (['"""Components number"""'], {'area': 'AreaType.ROI', 'per_component': 'PerComponent.No'}), "('Components number', area=AreaType.ROI, per_component=PerComponent.No)\n", (3570, 3641), False, 'from PartSegCore.analysis.measurement_base import AreaType, Leaf, MeasurementEntry, Node, PerComponent\n'), ((5088, 5157), 'PartSegCore.analysis.measurement_base.Leaf', 'Leaf', ([], {'name': '"""Volume"""', 'area': 'AreaType.ROI', 'per_component': 'PerComponent.No'}), "(name='Volume', area=AreaType.ROI, per_component=PerComponent.No)\n", (5092, 5157), False, 'from PartSegCore.analysis.measurement_base import AreaType, Leaf, MeasurementEntry, Node, PerComponent\n'), ((5668, 5743), 'PartSegCore.analysis.measurement_base.Leaf', 'Leaf', (['"""Components number"""'], {'area': 'AreaType.ROI', 'per_component': 'PerComponent.No'}), "('Components number', area=AreaType.ROI, per_component=PerComponent.No)\n", (5672, 5743), False, 'from PartSegCore.analysis.measurement_base import AreaType, Leaf, MeasurementEntry, Node, PerComponent\n'), ((7211, 7280), 'PartSegCore.analysis.measurement_base.Leaf', 'Leaf', ([], {'name': '"""Volume"""', 'area': 'AreaType.ROI', 'per_component': 'PerComponent.No'}), "(name='Volume', area=AreaType.ROI, per_component=PerComponent.No)\n", (7215, 7280), False, 'from PartSegCore.analysis.measurement_base import AreaType, Leaf, MeasurementEntry, Node, PerComponent\n'), ((7791, 7866), 'PartSegCore.analysis.measurement_base.Leaf', 'Leaf', (['"""Components number"""'], {'area': 'AreaType.ROI', 'per_component': 'PerComponent.No'}), "('Components number', area=AreaType.ROI, per_component=PerComponent.No)\n", (7795, 7866), False, 'from PartSegCore.analysis.measurement_base import AreaType, Leaf, MeasurementEntry, Node, PerComponent\n'), ((7999, 8064), 'PartSegCore.analysis.measurement_base.Leaf', 'Leaf', (['"""Volume"""'], {'area': 'AreaType.ROI', 'per_component': 'PerComponent.Yes'}), "('Volume', area=AreaType.ROI, per_component=PerComponent.Yes)\n", (8003, 8064), False, 'from PartSegCore.analysis.measurement_base import AreaType, Leaf, MeasurementEntry, Node, PerComponent\n'), ((9012, 9081), 'PartSegCore.analysis.measurement_base.Leaf', 'Leaf', ([], {'name': '"""Volume"""', 'area': 'AreaType.ROI', 'per_component': 'PerComponent.No'}), "(name='Volume', area=AreaType.ROI, per_component=PerComponent.No)\n", (9016, 9081), False, 'from PartSegCore.analysis.measurement_base import AreaType, Leaf, MeasurementEntry, Node, PerComponent\n'), ((9592, 9667), 'PartSegCore.analysis.measurement_base.Leaf', 'Leaf', (['"""Components number"""'], {'area': 'AreaType.ROI', 'per_component': 'PerComponent.No'}), "('Components number', area=AreaType.ROI, per_component=PerComponent.No)\n", (9596, 9667), False, 'from PartSegCore.analysis.measurement_base import AreaType, Leaf, MeasurementEntry, Node, PerComponent\n'), ((9792, 9858), 'PartSegCore.analysis.measurement_base.Leaf', 'Leaf', (['"""Volume"""'], {'area': 'AreaType.Mask', 'per_component': 'PerComponent.Yes'}), "('Volume', area=AreaType.Mask, per_component=PerComponent.Yes)\n", (9796, 9858), False, 'from PartSegCore.analysis.measurement_base import AreaType, Leaf, MeasurementEntry, Node, PerComponent\n'), ((10289, 10354), 'PartSegCore.analysis.measurement_base.Leaf', 'Leaf', (['"""Volume"""'], {'area': 'AreaType.ROI', 'per_component': 'PerComponent.Yes'}), "('Volume', area=AreaType.ROI, per_component=PerComponent.Yes)\n", (10293, 10354), False, 'from PartSegCore.analysis.measurement_base import AreaType, Leaf, MeasurementEntry, Node, PerComponent\n'), ((6119, 6159), 'PartSegCore.analysis.calculation_plan.CalculationTree', 'CalculationTree', (['statistic_calculate', '[]'], {}), '(statistic_calculate, [])\n', (6134, 6159), False, 'from PartSegCore.analysis.calculation_plan import Calculation, CalculationPlan, CalculationTree, FileCalculation, MaskCreate, MaskSuffix, MeasurementCalculate, RootType\n'), ((3222, 3291), 'PartSegCore.analysis.measurement_base.Leaf', 'Leaf', ([], {'name': '"""Volume"""', 'area': 'AreaType.ROI', 'per_component': 'PerComponent.No'}), "(name='Volume', area=AreaType.ROI, per_component=PerComponent.No)\n", (3226, 3291), False, 'from PartSegCore.analysis.measurement_base import AreaType, Leaf, MeasurementEntry, Node, PerComponent\n'), ((3347, 3417), 'PartSegCore.analysis.measurement_base.Leaf', 'Leaf', ([], {'name': '"""Volume"""', 'area': 'AreaType.Mask', 'per_component': 'PerComponent.No'}), "(name='Volume', area=AreaType.Mask, per_component=PerComponent.No)\n", (3351, 3417), False, 'from PartSegCore.analysis.measurement_base import AreaType, Leaf, MeasurementEntry, Node, PerComponent\n'), ((5324, 5393), 'PartSegCore.analysis.measurement_base.Leaf', 'Leaf', ([], {'name': '"""Volume"""', 'area': 'AreaType.ROI', 'per_component': 'PerComponent.No'}), "(name='Volume', area=AreaType.ROI, per_component=PerComponent.No)\n", (5328, 5393), False, 'from PartSegCore.analysis.measurement_base import AreaType, Leaf, MeasurementEntry, Node, PerComponent\n'), ((5449, 5519), 'PartSegCore.analysis.measurement_base.Leaf', 'Leaf', ([], {'name': '"""Volume"""', 'area': 'AreaType.Mask', 'per_component': 'PerComponent.No'}), "(name='Volume', area=AreaType.Mask, per_component=PerComponent.No)\n", (5453, 5519), False, 'from PartSegCore.analysis.measurement_base import AreaType, Leaf, MeasurementEntry, Node, PerComponent\n'), ((7447, 7516), 'PartSegCore.analysis.measurement_base.Leaf', 'Leaf', ([], {'name': '"""Volume"""', 'area': 'AreaType.ROI', 'per_component': 'PerComponent.No'}), "(name='Volume', area=AreaType.ROI, per_component=PerComponent.No)\n", (7451, 7516), False, 'from PartSegCore.analysis.measurement_base import AreaType, Leaf, MeasurementEntry, Node, PerComponent\n'), ((7572, 7642), 'PartSegCore.analysis.measurement_base.Leaf', 'Leaf', ([], {'name': '"""Volume"""', 'area': 'AreaType.Mask', 'per_component': 'PerComponent.No'}), "(name='Volume', area=AreaType.Mask, per_component=PerComponent.No)\n", (7576, 7642), False, 'from PartSegCore.analysis.measurement_base import AreaType, Leaf, MeasurementEntry, Node, PerComponent\n'), ((9248, 9317), 'PartSegCore.analysis.measurement_base.Leaf', 'Leaf', ([], {'name': '"""Volume"""', 'area': 'AreaType.ROI', 'per_component': 'PerComponent.No'}), "(name='Volume', area=AreaType.ROI, per_component=PerComponent.No)\n", (9252, 9317), False, 'from PartSegCore.analysis.measurement_base import AreaType, Leaf, MeasurementEntry, Node, PerComponent\n'), ((9373, 9443), 'PartSegCore.analysis.measurement_base.Leaf', 'Leaf', ([], {'name': '"""Volume"""', 'area': 'AreaType.Mask', 'per_component': 'PerComponent.No'}), "(name='Volume', area=AreaType.Mask, per_component=PerComponent.No)\n", (9377, 9443), False, 'from PartSegCore.analysis.measurement_base import AreaType, Leaf, MeasurementEntry, Node, PerComponent\n'), ((4052, 4092), 'PartSegCore.analysis.calculation_plan.CalculationTree', 'CalculationTree', (['statistic_calculate', '[]'], {}), '(statistic_calculate, [])\n', (4067, 4092), False, 'from PartSegCore.analysis.calculation_plan import Calculation, CalculationPlan, CalculationTree, FileCalculation, MaskCreate, MaskSuffix, MeasurementCalculate, RootType\n'), ((10945, 10985), 'PartSegCore.analysis.calculation_plan.CalculationTree', 'CalculationTree', (['statistic_calculate', '[]'], {}), '(statistic_calculate, [])\n', (10960, 10985), False, 'from PartSegCore.analysis.calculation_plan import Calculation, CalculationPlan, CalculationTree, FileCalculation, MaskCreate, MaskSuffix, MeasurementCalculate, RootType\n'), ((11333, 11374), 'PartSegCore.analysis.calculation_plan.CalculationTree', 'CalculationTree', (['statistic_calculate2', '[]'], {}), '(statistic_calculate2, [])\n', (11348, 11374), False, 'from PartSegCore.analysis.calculation_plan import Calculation, CalculationPlan, CalculationTree, FileCalculation, MaskCreate, MaskSuffix, MeasurementCalculate, RootType\n'), ((11424, 11465), 'PartSegCore.analysis.calculation_plan.CalculationTree', 'CalculationTree', (['statistic_calculate3', '[]'], {}), '(statistic_calculate3, [])\n', (11439, 11465), False, 'from PartSegCore.analysis.calculation_plan import Calculation, CalculationPlan, CalculationTree, FileCalculation, MaskCreate, MaskSuffix, MeasurementCalculate, RootType\n')]
|
'''
total number of delimeters
total number of hyphens
the length of the hostname
the length of the entire URL
the number of dots
a binary feature for each token in the hostname
a binary feature for each token in the path
'''
from urllib.parse import urlparse
import whois
import tldextract
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 10000)
pd.set_option('display.max_rows', 10000)
pd.set_option('display.max_colwidth', 10000)
pd.set_option('display.width',1000)
np.set_printoptions(threshold=np.inf)
All_Known_TLD = ['com', 'at', 'uk', 'pl', 'be', 'biz', 'co', 'jp', 'co_jp', 'cz', 'de', 'eu', 'fr', 'info', 'it', 'ru', 'lv', 'me', 'name', 'net', 'nz', 'org', 'us']
# List of Suspicious Words Present in URL
Suspicious_Words=['secure','account','update','banking','login','click','confirm','password','verify','signin','ebayisapi','lucky','bonus']
# List of Suspicious Top Level Domains in URLs
Suspicious_TLD=['zip','cricket','link','work','party','gq','kim','country','science','tk']
dataset_path = '../Data/train_dataset.csv'
Lexical_Feature_path = 'Lexical_FeatureSet.npy'
# Calculate the total number delimeters in a URL
def Total_delims(str):
delim = ['-', '_', '?', '=', '&']
count = 0
for i in str:
for j in delim:
if i == j:
count += 1
return count
# Calculate the total number of hyphens in a URL
def Total_hyphens(link):
hyph = '-'
count = 0
for i in link:
if i == hyph:
count += 1
return count
# Calculate the length of hostname in a URL
def Hostname_len(url):
hostname = urlparse(url).netloc
return len(hostname)
# Calculate the length of a URL
def URL_len(url):
return len(url)
# Calculate the number of dots in a URL
def get_dot_num(url):
dot = '.'
count = 0
for i in url:
if i == dot:
count += 1
return count
# Binary feature for hostname tokens
def is_known_tld(url):
tld = tldextract.extract(url).suffix
if tld in All_Known_TLD:
return 0
else:
return 1
# Binary feature for path tokens
def is_known_path(url):
path = urlparse(url).path
for i in Suspicious_Words:
if i in path:
return 1
else:
continue
return 0
if __name__ == '__main__':
## 1. Read the training Dataset file
df = pd.read_csv(dataset_path, header=0)
# print(df.head())
## 2. Get the Basic Feature set
url = df['URL']
total_delims = []
total_hyphens = []
url_len = []
dot_num = []
host_token = []
path_token = []
for i in url:
total_delims.append(Total_delims(i))
total_hyphens.append(Total_hyphens(i))
url_len.append(URL_len(i))
dot_num.append(get_dot_num(i))
host_token.append(is_known_tld(i))
path_token.append(is_known_path(i))
## 3. Form the Lexical Feature Set
Lexical_Feature = np.array((total_delims,total_hyphens,url_len,dot_num,host_token,path_token)).T
print(Lexical_Feature.shape)
# print(Lexical_Feature[:10,:])
## 4. Save the Basic Feature set
np.save(Lexical_Feature_path, Lexical_Feature)
## 5. Load the Basic Feature set
lexical = np.load(Lexical_Feature_path)
print('lexical.shape=',lexical.shape)
# print(basic)
|
[
"numpy.load",
"numpy.save",
"numpy.set_printoptions",
"tldextract.extract",
"pandas.read_csv",
"numpy.array",
"pandas.set_option",
"urllib.parse.urlparse"
] |
[((332, 375), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(10000)'], {}), "('display.max_columns', 10000)\n", (345, 375), True, 'import pandas as pd\n'), ((376, 416), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(10000)'], {}), "('display.max_rows', 10000)\n", (389, 416), True, 'import pandas as pd\n'), ((417, 461), 'pandas.set_option', 'pd.set_option', (['"""display.max_colwidth"""', '(10000)'], {}), "('display.max_colwidth', 10000)\n", (430, 461), True, 'import pandas as pd\n'), ((462, 498), 'pandas.set_option', 'pd.set_option', (['"""display.width"""', '(1000)'], {}), "('display.width', 1000)\n", (475, 498), True, 'import pandas as pd\n'), ((498, 535), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (517, 535), True, 'import numpy as np\n'), ((2380, 2415), 'pandas.read_csv', 'pd.read_csv', (['dataset_path'], {'header': '(0)'}), '(dataset_path, header=0)\n', (2391, 2415), True, 'import pandas as pd\n'), ((3141, 3187), 'numpy.save', 'np.save', (['Lexical_Feature_path', 'Lexical_Feature'], {}), '(Lexical_Feature_path, Lexical_Feature)\n', (3148, 3187), True, 'import numpy as np\n'), ((3241, 3270), 'numpy.load', 'np.load', (['Lexical_Feature_path'], {}), '(Lexical_Feature_path)\n', (3248, 3270), True, 'import numpy as np\n'), ((1624, 1637), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (1632, 1637), False, 'from urllib.parse import urlparse\n'), ((1985, 2008), 'tldextract.extract', 'tldextract.extract', (['url'], {}), '(url)\n', (2003, 2008), False, 'import tldextract\n'), ((2159, 2172), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (2167, 2172), False, 'from urllib.parse import urlparse\n'), ((2950, 3035), 'numpy.array', 'np.array', (['(total_delims, total_hyphens, url_len, dot_num, host_token, path_token)'], {}), '((total_delims, total_hyphens, url_len, dot_num, host_token,\n path_token))\n', (2958, 3035), True, 'import numpy as np\n')]
|
import numpy as np
from scipy import stats
from pm4py.algo.filtering.log.start_activities import start_activities_filter
def start_activities(log):
log_start = start_activities_filter.get_start_activities(log)
n_unique_start_activities = len(log_start)
start_activities_occurrences = list(log_start.values())
start_activities_min = np.min(start_activities_occurrences)
start_activities_max = np.max(start_activities_occurrences)
start_activities_mean = np.mean(start_activities_occurrences)
start_activities_median = np.median(start_activities_occurrences)
start_activities_std = np.std(start_activities_occurrences)
start_activities_variance = np.var(start_activities_occurrences)
start_activities_q1 = np.percentile(start_activities_occurrences, 25)
start_activities_q3 = np.percentile(start_activities_occurrences, 75)
start_activities_iqr = stats.iqr(start_activities_occurrences)
start_activities_skewness = stats.skew(start_activities_occurrences)
start_activities_kurtosis = stats.kurtosis(start_activities_occurrences)
return [
n_unique_start_activities,
start_activities_min,
start_activities_max,
start_activities_mean,
start_activities_median,
start_activities_std,
start_activities_variance,
start_activities_q1,
start_activities_q3,
start_activities_iqr,
start_activities_skewness,
start_activities_kurtosis,
]
|
[
"scipy.stats.iqr",
"pm4py.algo.filtering.log.start_activities.start_activities_filter.get_start_activities",
"numpy.median",
"numpy.std",
"numpy.percentile",
"scipy.stats.skew",
"numpy.min",
"numpy.mean",
"numpy.max",
"scipy.stats.kurtosis",
"numpy.var"
] |
[((166, 215), 'pm4py.algo.filtering.log.start_activities.start_activities_filter.get_start_activities', 'start_activities_filter.get_start_activities', (['log'], {}), '(log)\n', (210, 215), False, 'from pm4py.algo.filtering.log.start_activities import start_activities_filter\n'), ((352, 388), 'numpy.min', 'np.min', (['start_activities_occurrences'], {}), '(start_activities_occurrences)\n', (358, 388), True, 'import numpy as np\n'), ((416, 452), 'numpy.max', 'np.max', (['start_activities_occurrences'], {}), '(start_activities_occurrences)\n', (422, 452), True, 'import numpy as np\n'), ((481, 518), 'numpy.mean', 'np.mean', (['start_activities_occurrences'], {}), '(start_activities_occurrences)\n', (488, 518), True, 'import numpy as np\n'), ((549, 588), 'numpy.median', 'np.median', (['start_activities_occurrences'], {}), '(start_activities_occurrences)\n', (558, 588), True, 'import numpy as np\n'), ((616, 652), 'numpy.std', 'np.std', (['start_activities_occurrences'], {}), '(start_activities_occurrences)\n', (622, 652), True, 'import numpy as np\n'), ((685, 721), 'numpy.var', 'np.var', (['start_activities_occurrences'], {}), '(start_activities_occurrences)\n', (691, 721), True, 'import numpy as np\n'), ((748, 795), 'numpy.percentile', 'np.percentile', (['start_activities_occurrences', '(25)'], {}), '(start_activities_occurrences, 25)\n', (761, 795), True, 'import numpy as np\n'), ((822, 869), 'numpy.percentile', 'np.percentile', (['start_activities_occurrences', '(75)'], {}), '(start_activities_occurrences, 75)\n', (835, 869), True, 'import numpy as np\n'), ((897, 936), 'scipy.stats.iqr', 'stats.iqr', (['start_activities_occurrences'], {}), '(start_activities_occurrences)\n', (906, 936), False, 'from scipy import stats\n'), ((969, 1009), 'scipy.stats.skew', 'stats.skew', (['start_activities_occurrences'], {}), '(start_activities_occurrences)\n', (979, 1009), False, 'from scipy import stats\n'), ((1042, 1086), 'scipy.stats.kurtosis', 'stats.kurtosis', (['start_activities_occurrences'], {}), '(start_activities_occurrences)\n', (1056, 1086), False, 'from scipy import stats\n')]
|
import os
import numpy as np
from demo_utils import plot_image
import svmbir
"""
This file demonstrates the generation of a 3D microscopy phantom followed by sinogram projection and reconstruction using MBIR.
The phantom, sinogram, and reconstruction are then displayed.
"""
# Simulated image parameters
num_rows = 256
num_cols = 64
num_slices = 33
display_slice = 16 # Display slice at z=-0.0
# Simulated sinogram parameters
num_views = 64
tilt_angle = np.pi/3 # Tilt range of +-60deg
# Reconstruction parameters
sharpness = 2.0
T = 0.25
snr_db = 30.0
p = 1.2
# Multi-resolution works much better for limited and sparse view reconstruction
max_resolutions=2 # Use 2 additional resolutions to do reconstruction
# Display parameters
vmin = 0.0
vmax = 1.1
# Generate phantom
phantom = svmbir.phantom.gen_microscopy_sample_3d(num_rows,num_cols,num_slices)
# Generate the array of view angles
angles = np.linspace(-tilt_angle, tilt_angle, num_views)
# Generate sinogram by projecting phantom
sino = svmbir.project(phantom, angles, max(num_rows, num_cols))
# Determine resulting number of views, slices, and channels
(num_views, num_slices, num_channels) = sino.shape
# Perform MBIR reconstruction
recon = svmbir.recon(sino, angles, num_rows=num_rows, num_cols=num_cols, max_resolutions=max_resolutions, T=T, p=p, sharpness=sharpness, snr_db=snr_db )
# Compute Normalized Root Mean Squared Error
nrmse = svmbir.phantom.nrmse(recon, phantom)
# create output folder
os.makedirs('output', exist_ok=True)
# display phantom
plot_image(phantom[display_slice], title='Shepp Logan Phantom', filename='output/3D_microscopy_phantom.png', vmin=vmin, vmax=vmax)
# display reconstruction
title = f'Slice {display_slice:d} of Reconstruction with NRMSE={nrmse:.3f}.'
plot_image(recon[display_slice], title=title, filename='output/3D_microscopy_recon.png', vmin=vmin, vmax=vmax)
input("press Enter")
|
[
"os.makedirs",
"svmbir.phantom.nrmse",
"svmbir.recon",
"numpy.linspace",
"demo_utils.plot_image",
"svmbir.phantom.gen_microscopy_sample_3d"
] |
[((792, 863), 'svmbir.phantom.gen_microscopy_sample_3d', 'svmbir.phantom.gen_microscopy_sample_3d', (['num_rows', 'num_cols', 'num_slices'], {}), '(num_rows, num_cols, num_slices)\n', (831, 863), False, 'import svmbir\n'), ((908, 955), 'numpy.linspace', 'np.linspace', (['(-tilt_angle)', 'tilt_angle', 'num_views'], {}), '(-tilt_angle, tilt_angle, num_views)\n', (919, 955), True, 'import numpy as np\n'), ((1214, 1366), 'svmbir.recon', 'svmbir.recon', (['sino', 'angles'], {'num_rows': 'num_rows', 'num_cols': 'num_cols', 'max_resolutions': 'max_resolutions', 'T': 'T', 'p': 'p', 'sharpness': 'sharpness', 'snr_db': 'snr_db'}), '(sino, angles, num_rows=num_rows, num_cols=num_cols,\n max_resolutions=max_resolutions, T=T, p=p, sharpness=sharpness, snr_db=\n snr_db)\n', (1226, 1366), False, 'import svmbir\n'), ((1413, 1449), 'svmbir.phantom.nrmse', 'svmbir.phantom.nrmse', (['recon', 'phantom'], {}), '(recon, phantom)\n', (1433, 1449), False, 'import svmbir\n'), ((1474, 1510), 'os.makedirs', 'os.makedirs', (['"""output"""'], {'exist_ok': '(True)'}), "('output', exist_ok=True)\n", (1485, 1510), False, 'import os\n'), ((1530, 1665), 'demo_utils.plot_image', 'plot_image', (['phantom[display_slice]'], {'title': '"""Shepp Logan Phantom"""', 'filename': '"""output/3D_microscopy_phantom.png"""', 'vmin': 'vmin', 'vmax': 'vmax'}), "(phantom[display_slice], title='Shepp Logan Phantom', filename=\n 'output/3D_microscopy_phantom.png', vmin=vmin, vmax=vmax)\n", (1540, 1665), False, 'from demo_utils import plot_image\n'), ((1764, 1879), 'demo_utils.plot_image', 'plot_image', (['recon[display_slice]'], {'title': 'title', 'filename': '"""output/3D_microscopy_recon.png"""', 'vmin': 'vmin', 'vmax': 'vmax'}), "(recon[display_slice], title=title, filename=\n 'output/3D_microscopy_recon.png', vmin=vmin, vmax=vmax)\n", (1774, 1879), False, 'from demo_utils import plot_image\n')]
|
"""
Copyright 2013 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy.constraints.constraint import Constraint
import numpy as np
class Zero(Constraint):
"""A constraint of the form :math:`x = 0`.
The preferred way of creating a ``Zero`` constraint is through
operator overloading. To constrain an expression ``x`` to be zero,
simply write ``x == 0``. The former creates a ``Zero`` constraint with
``x`` as its argument.
"""
def __init__(self, expr, constr_id=None):
super(Zero, self).__init__([expr], constr_id)
def __str__(self):
"""Returns a string showing the mathematical constraint.
"""
return self.name()
def __repr__(self):
"""Returns a string with information about the constraint.
"""
return "%s(%s)" % (self.__class__.__name__,
repr(self.args[0]))
@property
def shape(self):
"""int : The shape of the constrained expression."""
return self.args[0].shape
@property
def size(self):
"""int : The size of the constrained expression."""
return self.args[0].size
def name(self):
return "%s == 0" % self.args[0]
def is_dcp(self):
"""A zero constraint is DCP if its argument is affine."""
return self.args[0].is_affine()
def is_dgp(self):
return False
def is_dqcp(self):
return self.is_dcp()
@property
def residual(self):
"""The residual of the constraint.
Returns
-------
Expression
"""
if self.expr.value is None:
return None
return np.abs(self.expr.value)
# The value of the dual variable.
@property
def dual_value(self):
"""NumPy.ndarray : The value of the dual variable.
"""
return self.dual_variables[0].value
# TODO(akshayka): Rename to save_dual_value to avoid collision with
# value as defined above.
def save_value(self, value):
"""Save the value of the dual variable for the constraint's parent.
Args:
value: The value of the dual variable.
"""
self.dual_variables[0].save_value(value)
class Equality(Constraint):
"""A constraint of the form :math:`x = y`.
"""
def __init__(self, lhs, rhs, constr_id=None):
self._expr = lhs - rhs
super(Equality, self).__init__([lhs, rhs], constr_id)
def __str__(self):
"""Returns a string showing the mathematical constraint.
"""
return self.name()
def __repr__(self):
"""Returns a string with information about the constraint.
"""
return "%s(%s, %s)" % (self.__class__.__name__,
repr(self.args[0]), repr(self.args[1]))
def _construct_dual_variables(self, args):
super(Equality, self)._construct_dual_variables([self._expr])
@property
def expr(self):
return self._expr
@property
def shape(self):
"""int : The shape of the constrained expression."""
return self.expr.shape
@property
def size(self):
"""int : The size of the constrained expression."""
return self.expr.size
def name(self):
return "%s == %s" % (self.args[0], self.args[1])
def is_dcp(self):
"""An equality constraint is DCP if its argument is affine."""
return self.expr.is_affine()
def is_dpp(self):
return self.is_dcp() and self.expr.is_dpp()
def is_dgp(self):
return (self.args[0].is_log_log_affine() and
self.args[1].is_log_log_affine())
def is_dqcp(self):
return self.is_dcp()
@property
def residual(self):
"""The residual of the constraint.
Returns
-------
Expression
"""
if self.expr.value is None:
return None
return np.abs(self.expr.value)
@property
def dual_value(self):
"""NumPy.ndarray : The value of the dual variable.
"""
return self.dual_variables[0].value
def save_value(self, value):
"""Save the value of the dual variable for the constraint's parent.
Args:
value: The value of the dual variable.
"""
self.dual_variables[0].save_value(value)
|
[
"numpy.abs"
] |
[((2147, 2170), 'numpy.abs', 'np.abs', (['self.expr.value'], {}), '(self.expr.value)\n', (2153, 2170), True, 'import numpy as np\n'), ((4407, 4430), 'numpy.abs', 'np.abs', (['self.expr.value'], {}), '(self.expr.value)\n', (4413, 4430), True, 'import numpy as np\n')]
|
"""Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
# avoid StratifiedKFold's Warning about least populated class in y
y = np.arange(10) % 3
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 3]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Check that errors are raised if all n_labels for individual
# classes are less than n_folds.
y = [3, 3, -1, -1, 2]
assert_raises(ValueError, cval.StratifiedKFold, y, 3)
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
error_string = ("k-fold cross validation requires at least one"
" train / test split")
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 0)
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
assert_true(np.any(np.arange(100) != ind[test]))
assert_true(np.any(np.arange(100, 200) != ind[test]))
assert_true(np.any(np.arange(200, 300) != ind[test]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
labels = np.asarray(labels, dtype=object)
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
test_size = np.ceil(0.33 * len(y))
train_size = len(y) - test_size
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train],
return_inverse=True)[1]) /
float(len(y[train])))
p_test = (np.bincount(np.unique(y[test],
return_inverse=True)[1]) /
float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(len(train) + len(test), y.size)
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
labels = [0, 1, 2, 3] * 3 + [4, 5] * 5
splits = cval.StratifiedShuffleSplit(labels, n_iter=1,
test_size=0.5, random_state=0)
train, test = next(iter(splits))
assert_array_equal(np.intersect1d(train, test), [])
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1. / 3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
neg_mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="neg_mean_squared_error")
expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC(gamma="scale")
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
with ignore_warnings(category=ConvergenceWarning):
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
with ignore_warnings(category=ConvergenceWarning):
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
|
[
"sklearn.datasets.load_digits",
"sklearn.datasets.load_iris",
"numpy.sum",
"numpy.abs",
"sklearn.utils.testing.assert_raise_message",
"sklearn.datasets.make_multilabel_classification",
"sklearn.utils.testing.assert_equal",
"sklearn.utils.mocking.CheckingClassifier",
"numpy.ones",
"sklearn.utils.testing.assert_true",
"sklearn.datasets.load_boston",
"numpy.arange",
"sklearn.cross_validation.StratifiedShuffleSplit",
"sklearn.cross_validation.LeavePOut",
"sklearn.svm.SVC",
"sklearn.cross_validation.LeaveOneOut",
"numpy.unique",
"sklearn.utils.testing.assert_raises",
"sklearn.cross_validation.permutation_test_score",
"numpy.lib.arraysetops.intersect1d",
"numpy.zeros_like",
"warnings.simplefilter",
"sklearn.utils.testing.ignore_warnings",
"sklearn.cluster.KMeans",
"sklearn.datasets.make_regression",
"sklearn.cross_validation._safe_split",
"numpy.random.RandomState",
"sklearn.cross_validation.check_cv",
"sklearn.cross_validation._check_is_partition",
"sklearn.utils.testing.assert_warns_message",
"scipy.sparse.coo_matrix",
"sklearn.utils.testing.assert_array_equal",
"warnings.catch_warnings",
"sklearn.metrics.make_scorer",
"numpy.int32",
"numpy.max",
"numpy.intersect1d",
"sklearn.linear_model.Ridge",
"numpy.repeat",
"sklearn.cross_validation.LeavePLabelOut",
"sklearn.cross_validation.cross_val_predict",
"sklearn.externals.six.moves.zip",
"numpy.median",
"sklearn.preprocessing.Imputer",
"numpy.asarray",
"scipy.stats.binom",
"numpy.min",
"sklearn.utils.mocking.MockDataFrame",
"scipy.sparse.csr_matrix",
"sklearn.utils.testing.assert_almost_equal",
"sklearn.cross_validation.KFold",
"numpy.dot",
"sklearn.utils.testing.assert_array_almost_equal",
"numpy.concatenate",
"numpy.delete",
"sklearn.cross_validation.train_test_split",
"sklearn.cross_validation.cross_val_score",
"sklearn.cross_validation.LeaveOneLabelOut",
"sklearn.utils.testing.assert_greater",
"numpy.float32",
"sklearn.utils.testing.assert_less",
"sklearn.cross_validation.LabelKFold",
"sklearn.cross_validation.ShuffleSplit",
"sklearn.cross_validation.LabelShuffleSplit",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.array",
"sklearn.cross_validation.PredefinedSplit",
"numpy.eye",
"sklearn.cross_validation.StratifiedKFold",
"numpy.in1d"
] |
[((4572, 4588), 'numpy.ones', 'np.ones', (['(10, 2)'], {}), '((10, 2))\n', (4579, 4588), True, 'import numpy as np\n'), ((4600, 4613), 'scipy.sparse.coo_matrix', 'coo_matrix', (['X'], {}), '(X)\n', (4610, 4613), False, 'from scipy.sparse import coo_matrix\n'), ((1044, 1069), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (1067, 1069), False, 'import warnings\n'), ((1075, 1106), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (1096, 1106), False, 'import warnings\n'), ((4744, 4753), 'numpy.eye', 'np.eye', (['(5)'], {}), '(5)\n', (4750, 4753), True, 'import numpy as np\n'), ((4827, 4840), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (4836, 4840), True, 'import numpy as np\n'), ((5917, 5958), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['iterations', 'expected_n_iter'], {}), '(iterations, expected_n_iter)\n', (5929, 5958), False, 'from sklearn.utils.testing import assert_equal\n'), ((6159, 6202), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.KFold', '(3)', '(4)'], {}), '(ValueError, cval.KFold, 3, 4)\n', (6172, 6202), False, 'from sklearn.utils.testing import assert_raises\n'), ((6333, 6424), 'sklearn.utils.testing.assert_warns_message', 'assert_warns_message', (['Warning', '"""The least populated class"""', 'cval.StratifiedKFold', 'y', '(3)'], {}), "(Warning, 'The least populated class', cval.\n StratifiedKFold, y, 3)\n", (6353, 6424), False, 'from sklearn.utils.testing import assert_warns_message\n'), ((6830, 6883), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.StratifiedKFold', 'y', '(3)'], {}), '(ValueError, cval.StratifiedKFold, y, 3)\n', (6843, 6883), False, 'from sklearn.utils.testing import assert_raises\n'), ((6930, 6973), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.KFold', '(2)', '(0)'], {}), '(ValueError, cval.KFold, 2, 0)\n', (6943, 6973), False, 'from sklearn.utils.testing import assert_raises\n'), ((6978, 7021), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.KFold', '(2)', '(1)'], {}), '(ValueError, cval.KFold, 2, 1)\n', (6991, 7021), False, 'from sklearn.utils.testing import assert_raises\n'), ((7137, 7211), 'sklearn.utils.testing.assert_raise_message', 'assert_raise_message', (['ValueError', 'error_string', 'cval.StratifiedKFold', 'y', '(0)'], {}), '(ValueError, error_string, cval.StratifiedKFold, y, 0)\n', (7157, 7211), False, 'from sklearn.utils.testing import assert_raise_message\n'), ((7241, 7315), 'sklearn.utils.testing.assert_raise_message', 'assert_raise_message', (['ValueError', 'error_string', 'cval.StratifiedKFold', 'y', '(1)'], {}), '(ValueError, error_string, cval.StratifiedKFold, y, 1)\n', (7261, 7315), False, 'from sklearn.utils.testing import assert_raise_message\n'), ((7375, 7420), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.KFold', '(2.5)', '(2)'], {}), '(ValueError, cval.KFold, 2.5, 2)\n', (7388, 7420), False, 'from sklearn.utils.testing import assert_raises\n'), ((7461, 7506), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.KFold', '(5)', '(1.5)'], {}), '(ValueError, cval.KFold, 5, 1.5)\n', (7474, 7506), False, 'from sklearn.utils.testing import assert_raises\n'), ((7511, 7566), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.StratifiedKFold', 'y', '(1.5)'], {}), '(ValueError, cval.StratifiedKFold, y, 1.5)\n', (7524, 7566), False, 'from sklearn.utils.testing import assert_raises\n'), ((7659, 7677), 'sklearn.cross_validation.KFold', 'cval.KFold', (['(300)', '(3)'], {}), '(300, 3)\n', (7669, 7677), True, 'from sklearn import cross_validation as cval\n'), ((7854, 7871), 'sklearn.cross_validation.KFold', 'cval.KFold', (['(17)', '(3)'], {}), '(17, 3)\n', (7864, 7871), True, 'from sklearn import cross_validation as cval\n'), ((8109, 8141), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['test', '[0, 1]'], {}), '(test, [0, 1])\n', (8127, 8141), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((8146, 8179), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['train', '[2, 3]'], {}), '(train, [2, 3])\n', (8164, 8179), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((8216, 8248), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['test', '[2, 3]'], {}), '(test, [2, 3])\n', (8234, 8248), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((8253, 8286), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['train', '[0, 1]'], {}), '(train, [0, 1])\n', (8271, 8286), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((8359, 8394), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['test', '[0, 1, 2]'], {}), '(test, [0, 1, 2])\n', (8377, 8394), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((8399, 8432), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['train', '[3, 4]'], {}), '(train, [3, 4])\n', (8417, 8432), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((8469, 8501), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['test', '[3, 4]'], {}), '(test, [3, 4])\n', (8487, 8501), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((8506, 8542), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['train', '[0, 1, 2]'], {}), '(train, [0, 1, 2])\n', (8524, 8542), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((8854, 8886), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['test', '[0, 2]'], {}), '(test, [0, 2])\n', (8872, 8886), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((8891, 8924), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['train', '[1, 3]'], {}), '(train, [1, 3])\n', (8909, 8924), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((8961, 8993), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['test', '[1, 3]'], {}), '(test, [1, 3])\n', (8979, 8993), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((8998, 9031), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['train', '[0, 2]'], {}), '(train, [0, 2])\n', (9016, 9031), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((9134, 9172), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['test', '[0, 1, 3, 4]'], {}), '(test, [0, 1, 3, 4])\n', (9152, 9172), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((9177, 9213), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['train', '[2, 5, 6]'], {}), '(train, [2, 5, 6])\n', (9195, 9213), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((9250, 9285), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['test', '[2, 5, 6]'], {}), '(test, [2, 5, 6])\n', (9268, 9285), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((9290, 9329), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['train', '[0, 1, 3, 4]'], {}), '(train, [0, 1, 3, 4])\n', (9308, 9329), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((11413, 11461), 'sklearn.cross_validation.KFold', 'cval.KFold', (['(300)', '(3)'], {'shuffle': '(True)', 'random_state': '(0)'}), '(300, 3, shuffle=True, random_state=0)\n', (11423, 11461), True, 'from sklearn import cross_validation as cval\n'), ((11472, 11486), 'numpy.arange', 'np.arange', (['(300)'], {}), '(300)\n', (11481, 11486), True, 'import numpy as np\n'), ((11892, 11926), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['all_folds', 'ind'], {}), '(all_folds, ind)\n', (11910, 11926), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((12281, 12294), 'sklearn.externals.six.moves.zip', 'zip', (['kf0', 'kf1'], {}), '(kf0, kf1)\n', (12284, 12294), False, 'from sklearn.externals.six.moves import zip\n'), ((12979, 12992), 'sklearn.datasets.load_digits', 'load_digits', ([], {}), '()\n', (12990, 12992), False, 'from sklearn.datasets import load_digits\n'), ((13055, 13077), 'sklearn.svm.SVC', 'SVC', ([], {'C': '(10)', 'gamma': '(0.005)'}), '(C=10, gamma=0.005)\n', (13058, 13077), False, 'from sklearn.svm import SVC\n'), ((13103, 13134), 'sklearn.cross_validation.KFold', 'cval.KFold', (['n', '(5)'], {'shuffle': '(False)'}), '(n, 5, shuffle=False)\n', (13113, 13134), True, 'from sklearn import cross_validation as cval\n'), ((13204, 13236), 'sklearn.utils.testing.assert_greater', 'assert_greater', (['(0.88)', 'mean_score'], {}), '(0.88, mean_score)\n', (13218, 13236), False, 'from sklearn.utils.testing import assert_greater\n'), ((13241, 13273), 'sklearn.utils.testing.assert_greater', 'assert_greater', (['mean_score', '(0.85)'], {}), '(mean_score, 0.85)\n', (13255, 13273), False, 'from sklearn.utils.testing import assert_greater\n'), ((13490, 13536), 'sklearn.cross_validation.KFold', 'cval.KFold', (['n', '(5)'], {'shuffle': '(True)', 'random_state': '(0)'}), '(n, 5, shuffle=True, random_state=0)\n', (13500, 13536), True, 'from sklearn import cross_validation as cval\n'), ((13606, 13638), 'sklearn.utils.testing.assert_greater', 'assert_greater', (['mean_score', '(0.95)'], {}), '(mean_score, 0.95)\n', (13620, 13638), False, 'from sklearn.utils.testing import assert_greater\n'), ((13649, 13695), 'sklearn.cross_validation.KFold', 'cval.KFold', (['n', '(5)'], {'shuffle': '(True)', 'random_state': '(1)'}), '(n, 5, shuffle=True, random_state=1)\n', (13659, 13695), True, 'from sklearn import cross_validation as cval\n'), ((13765, 13797), 'sklearn.utils.testing.assert_greater', 'assert_greater', (['mean_score', '(0.95)'], {}), '(mean_score, 0.95)\n', (13779, 13797), False, 'from sklearn.utils.testing import assert_greater\n'), ((14188, 14214), 'sklearn.cross_validation.StratifiedKFold', 'cval.StratifiedKFold', (['y', '(5)'], {}), '(y, 5)\n', (14208, 14214), True, 'from sklearn import cross_validation as cval\n'), ((14284, 14316), 'sklearn.utils.testing.assert_greater', 'assert_greater', (['(0.88)', 'mean_score'], {}), '(0.88, mean_score)\n', (14298, 14316), False, 'from sklearn.utils.testing import assert_greater\n'), ((14321, 14353), 'sklearn.utils.testing.assert_greater', 'assert_greater', (['mean_score', '(0.85)'], {}), '(mean_score, 0.85)\n', (14335, 14353), False, 'from sklearn.utils.testing import assert_greater\n'), ((14390, 14414), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (14411, 14414), True, 'import numpy as np\n'), ((14861, 14877), 'numpy.unique', 'np.unique', (['folds'], {}), '(folds)\n', (14870, 14877), True, 'import numpy as np\n'), ((15065, 15082), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (15074, 15082), True, 'import numpy as np\n'), ((15218, 15250), 'numpy.asarray', 'np.asarray', (['labels'], {'dtype': 'object'}), '(labels, dtype=object)\n', (15228, 15250), True, 'import numpy as np\n'), ((15274, 15314), 'sklearn.cross_validation.LabelKFold', 'cval.LabelKFold', (['labels'], {'n_folds': 'n_folds'}), '(labels, n_folds=n_folds)\n', (15289, 15314), True, 'from sklearn import cross_validation as cval\n'), ((15890, 15922), 'numpy.asarray', 'np.asarray', (['labels'], {'dtype': 'object'}), '(labels, dtype=object)\n', (15900, 15922), True, 'import numpy as np\n'), ((16287, 16303), 'numpy.unique', 'np.unique', (['folds'], {}), '(folds)\n', (16296, 16303), True, 'import numpy as np\n'), ((16491, 16508), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (16500, 16508), True, 'import numpy as np\n'), ((16654, 16694), 'sklearn.cross_validation.LabelKFold', 'cval.LabelKFold', (['labels'], {'n_folds': 'n_folds'}), '(labels, n_folds=n_folds)\n', (16669, 16694), True, 'from sklearn import cross_validation as cval\n'), ((16838, 16863), 'numpy.array', 'np.array', (['[1, 1, 1, 2, 2]'], {}), '([1, 1, 1, 2, 2])\n', (16846, 16863), True, 'import numpy as np\n'), ((16868, 16929), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.LabelKFold', 'labels'], {'n_folds': '(3)'}), '(ValueError, cval.LabelKFold, labels, n_folds=3)\n', (16881, 16929), False, 'from sklearn.utils.testing import assert_raises\n'), ((16968, 17020), 'sklearn.cross_validation.ShuffleSplit', 'cval.ShuffleSplit', (['(10)'], {'test_size': '(0.2)', 'random_state': '(0)'}), '(10, test_size=0.2, random_state=0)\n', (16985, 17020), True, 'from sklearn import cross_validation as cval\n'), ((17031, 17081), 'sklearn.cross_validation.ShuffleSplit', 'cval.ShuffleSplit', (['(10)'], {'test_size': '(2)', 'random_state': '(0)'}), '(10, test_size=2, random_state=0)\n', (17048, 17081), True, 'from sklearn import cross_validation as cval\n'), ((17283, 17306), 'sklearn.externals.six.moves.zip', 'zip', (['ss1', 'ss2', 'ss3', 'ss4'], {}), '(ss1, ss2, ss3, ss4)\n', (17286, 17306), False, 'from sklearn.externals.six.moves import zip\n'), ((17606, 17639), 'numpy.asarray', 'np.asarray', (['[0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 1, 1, 1, 2, 2, 2])\n', (17616, 17639), True, 'import numpy as np\n'), ((17718, 17783), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.StratifiedShuffleSplit', 'y', '(3)', '(0.2)'], {}), '(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)\n', (17731, 17783), False, 'from sklearn.utils.testing import assert_raises\n'), ((17869, 17932), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.StratifiedShuffleSplit', 'y', '(3)', '(2)'], {}), '(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)\n', (17882, 17932), False, 'from sklearn.utils.testing import assert_raises\n'), ((18024, 18090), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.StratifiedShuffleSplit', 'y', '(3)', '(3)', '(2)'], {}), '(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)\n', (18037, 18090), False, 'from sklearn.utils.testing import assert_raises\n'), ((18100, 18139), 'numpy.asarray', 'np.asarray', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (18110, 18139), True, 'import numpy as np\n'), ((18210, 18280), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.StratifiedShuffleSplit', 'y', '(3)', '(0.5)', '(0.6)'], {}), '(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)\n', (18223, 18280), False, 'from sklearn.utils.testing import assert_raises\n'), ((18285, 18353), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.StratifiedShuffleSplit', 'y', '(3)', '(8)', '(0.6)'], {}), '(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)\n', (18298, 18353), False, 'from sklearn.utils.testing import assert_raises\n'), ((18358, 18426), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.StratifiedShuffleSplit', 'y', '(3)', '(0.6)', '(8)'], {}), '(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)\n', (18371, 18426), False, 'from sklearn.utils.testing import assert_raises\n'), ((18472, 18543), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.StratifiedShuffleSplit', 'y'], {'train_size': '(2)'}), '(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)\n', (18485, 18543), False, 'from sklearn.utils.testing import assert_raises\n'), ((18548, 18618), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.StratifiedShuffleSplit', 'y'], {'test_size': '(2)'}), '(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)\n', (18561, 18618), False, 'from sklearn.utils.testing import assert_raises\n'), ((22000, 22076), 'sklearn.cross_validation.StratifiedShuffleSplit', 'cval.StratifiedShuffleSplit', (['labels'], {'n_iter': '(1)', 'test_size': '(0.5)', 'random_state': '(0)'}), '(labels, n_iter=1, test_size=0.5, random_state=0)\n', (22027, 22076), True, 'from sklearn import cross_validation as cval\n'), ((22618, 22645), 'sklearn.cross_validation.PredefinedSplit', 'cval.PredefinedSplit', (['folds'], {}), '(folds)\n', (22638, 22645), True, 'from sklearn import cross_validation as cval\n'), ((22753, 22791), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['ps_train', 'kf_train'], {}), '(ps_train, kf_train)\n', (22771, 22791), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((22796, 22832), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['ps_test', 'kf_test'], {}), '(ps_test, kf_test)\n', (22814, 22832), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((24629, 24663), 'numpy.array', 'np.array', (['[0, 1, 2, 1, 1, 2, 0, 0]'], {}), '([0, 1, 2, 1, 1, 2, 0, 0])\n', (24637, 24663), True, 'import numpy as np\n'), ((24686, 24713), 'numpy.array', 'np.array', (['labels'], {'copy': '(True)'}), '(labels, copy=True)\n', (24694, 24713), True, 'import numpy as np\n'), ((24725, 24754), 'sklearn.cross_validation.LeaveOneLabelOut', 'cval.LeaveOneLabelOut', (['labels'], {}), '(labels)\n', (24746, 24754), True, 'from sklearn import cross_validation as cval\n'), ((24775, 24813), 'sklearn.cross_validation.LeaveOneLabelOut', 'cval.LeaveOneLabelOut', (['labels_changing'], {}), '(labels_changing)\n', (24796, 24813), True, 'from sklearn import cross_validation as cval\n'), ((24825, 24857), 'sklearn.cross_validation.LeavePLabelOut', 'cval.LeavePLabelOut', (['labels'], {'p': '(2)'}), '(labels, p=2)\n', (24844, 24857), True, 'from sklearn import cross_validation as cval\n'), ((24878, 24919), 'sklearn.cross_validation.LeavePLabelOut', 'cval.LeavePLabelOut', (['labels_changing'], {'p': '(2)'}), '(labels_changing, p=2)\n', (24897, 24919), True, 'from sklearn import cross_validation as cval\n'), ((25932, 25970), 'sklearn.utils.mocking.CheckingClassifier', 'CheckingClassifier', ([], {'check_X': 'list_check'}), '(check_X=list_check)\n', (25950, 25970), False, 'from sklearn.utils.mocking import CheckingClassifier, MockDataFrame\n'), ((26045, 26083), 'sklearn.utils.mocking.CheckingClassifier', 'CheckingClassifier', ([], {'check_y': 'list_check'}), '(check_y=list_check)\n', (26063, 26083), False, 'from sklearn.utils.mocking import CheckingClassifier, MockDataFrame\n'), ((26143, 26220), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.cross_val_score', 'clf', 'X', 'y'], {'scoring': '"""sklearn"""'}), "(ValueError, cval.cross_val_score, clf, X, y, scoring='sklearn')\n", (26156, 26220), False, 'from sklearn.utils.testing import assert_raises\n'), ((26349, 26383), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['clf', 'X_3d', 'y'], {}), '(clf, X_3d, y)\n', (26369, 26383), True, 'from sklearn import cross_validation as cval\n'), ((26430, 26491), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.cross_val_score', 'clf', 'X_3d', 'y'], {}), '(ValueError, cval.cross_val_score, clf, X_3d, y)\n', (26443, 26491), False, 'from sklearn.utils.testing import assert_raises\n'), ((27246, 27266), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (27249, 27266), False, 'from sklearn.svm import SVC\n'), ((27278, 27289), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (27287, 27289), False, 'from sklearn.datasets import load_iris\n'), ((27384, 27430), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['svm', 'X', 'y'], {'cv': 'cv_indices'}), '(svm, X, y, cv=cv_indices)\n', (27404, 27430), True, 'from sklearn import cross_validation as cval\n'), ((27744, 27788), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['svm', 'X', 'y'], {'cv': 'cv_masks'}), '(svm, X, y, cv=cv_masks)\n', (27764, 27788), True, 'from sklearn import cross_validation as cval\n'), ((27793, 27841), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['scores_indices', 'scores_masks'], {}), '(scores_indices, scores_masks)\n', (27811, 27841), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((27937, 27962), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""precomputed"""'}), "(kernel='precomputed')\n", (27940, 27962), False, 'from sklearn.svm import SVC\n'), ((27974, 27985), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (27983, 27985), False, 'from sklearn.datasets import load_iris\n'), ((28040, 28054), 'numpy.dot', 'np.dot', (['X', 'X.T'], {}), '(X, X.T)\n', (28046, 28054), True, 'import numpy as np\n'), ((28079, 28122), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['svm', 'linear_kernel', 'y'], {}), '(svm, linear_kernel, y)\n', (28099, 28122), True, 'from sklearn import cross_validation as cval\n'), ((28133, 28153), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (28136, 28153), False, 'from sklearn.svm import SVC\n'), ((28173, 28204), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['svm', 'X', 'y'], {}), '(svm, X, y)\n', (28193, 28204), True, 'from sklearn import cross_validation as cval\n'), ((28209, 28260), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['score_precomputed', 'score_linear'], {}), '(score_precomputed, score_linear)\n', (28227, 28260), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((28308, 28333), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""precomputed"""'}), "(kernel='precomputed')\n", (28311, 28333), False, 'from sklearn.svm import SVC\n'), ((28338, 28396), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.cross_val_score', 'svm', 'X', 'y'], {}), '(ValueError, cval.cross_val_score, svm, X, y)\n', (28351, 28396), False, 'from sklearn.utils.testing import assert_raises\n'), ((29476, 29530), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['clf', 'X', 'y'], {'fit_params': 'fit_params'}), '(clf, X, y, fit_params=fit_params)\n', (29496, 29530), True, 'from sklearn import cross_validation as cval\n'), ((29896, 29938), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['score', '[1.0, 1.0, 1.0]'], {}), '(score, [1.0, 1.0, 1.0])\n', (29914, 29938), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((30170, 30218), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.train_test_split'], {}), '(ValueError, cval.train_test_split)\n', (30183, 30218), False, 'from sklearn.utils.testing import assert_raises\n'), ((31014, 31027), 'scipy.sparse.coo_matrix', 'coo_matrix', (['X'], {}), '(X)\n', (31024, 31027), False, 'from scipy.sparse import coo_matrix\n'), ((31036, 31049), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (31045, 31049), True, 'import numpy as np\n'), ((31081, 31140), 'sklearn.cross_validation.train_test_split', 'cval.train_test_split', (['X', 'y'], {'test_size': 'None', 'train_size': '(0.5)'}), '(X, y, test_size=None, train_size=0.5)\n', (31102, 31140), True, 'from sklearn import cross_validation as cval\n'), ((31270, 31317), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['X_train[:, 0]', '(y_train * 10)'], {}), '(X_train[:, 0], y_train * 10)\n', (31288, 31317), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((31322, 31367), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['X_test[:, 0]', '(y_test * 10)'], {}), '(X_test[:, 0], y_test * 10)\n', (31340, 31367), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((32100, 32133), 'sklearn.cross_validation.train_test_split', 'cval.train_test_split', (['X_4d', 'y_3d'], {}), '(X_4d, y_3d)\n', (32121, 32133), True, 'from sklearn import cross_validation as cval\n'), ((32138, 32180), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['split[0].shape', '(7, 5, 3, 2)'], {}), '(split[0].shape, (7, 5, 3, 2))\n', (32150, 32180), False, 'from sklearn.utils.testing import assert_equal\n'), ((32185, 32227), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['split[1].shape', '(3, 5, 3, 2)'], {}), '(split[1].shape, (3, 5, 3, 2))\n', (32197, 32227), False, 'from sklearn.utils.testing import assert_equal\n'), ((32232, 32272), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['split[2].shape', '(7, 7, 11)'], {}), '(split[2].shape, (7, 7, 11))\n', (32244, 32272), False, 'from sklearn.utils.testing import assert_equal\n'), ((32277, 32317), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['split[3].shape', '(3, 7, 11)'], {}), '(split[3].shape, (3, 7, 11))\n', (32289, 32317), False, 'from sklearn.utils.testing import assert_equal\n'), ((32360, 32394), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 2, 2, 2, 2]'], {}), '([1, 1, 1, 1, 2, 2, 2, 2])\n', (32368, 32394), True, 'import numpy as np\n'), ((32431, 32476), 'sklearn.externals.six.moves.zip', 'zip', (['[2, 4, 0.25, 0.5, 0.75]', '[2, 4, 2, 4, 6]'], {}), '([2, 4, 0.25, 0.5, 0.75], [2, 4, 2, 4, 6])\n', (32434, 32476), False, 'from sklearn.externals.six.moves import zip\n'), ((33550, 33566), 'sklearn.utils.mocking.MockDataFrame', 'MockDataFrame', (['X'], {}), '(X)\n', (33563, 33566), False, 'from sklearn.utils.mocking import CheckingClassifier, MockDataFrame\n'), ((33589, 33616), 'sklearn.cross_validation.train_test_split', 'cval.train_test_split', (['X_df'], {}), '(X_df)\n', (33610, 33616), True, 'from sklearn import cross_validation as cval\n'), ((33792, 33803), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (33801, 33803), False, 'from sklearn.datasets import load_iris\n'), ((33814, 33834), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (33817, 33834), False, 'from sklearn.svm import SVC\n'), ((33900, 33955), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['clf', 'iris.data', 'iris.target'], {'cv': '(5)'}), '(clf, iris.data, iris.target, cv=5)\n', (33920, 33955), True, 'from sklearn import cross_validation as cval\n'), ((33960, 34026), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['scores', '[0.97, 1.0, 0.97, 0.97, 1.0]', '(2)'], {}), '(scores, [0.97, 1.0, 0.97, 0.97, 1.0], 2)\n', (33985, 34026), False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((34159, 34234), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['clf', 'iris.data', 'iris.target'], {'scoring': '"""accuracy"""', 'cv': '(5)'}), "(clf, iris.data, iris.target, scoring='accuracy', cv=5)\n", (34179, 34234), True, 'from sklearn import cross_validation as cval\n'), ((34276, 34345), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['zo_scores', '[0.97, 1.0, 0.97, 0.97, 1.0]', '(2)'], {}), '(zo_scores, [0.97, 1.0, 0.97, 0.97, 1.0], 2)\n', (34301, 34345), False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((34448, 34526), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['clf', 'iris.data', 'iris.target'], {'scoring': '"""f1_weighted"""', 'cv': '(5)'}), "(clf, iris.data, iris.target, scoring='f1_weighted', cv=5)\n", (34468, 34526), True, 'from sklearn import cross_validation as cval\n'), ((34568, 34637), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['f1_scores', '[0.97, 1.0, 0.97, 0.97, 1.0]', '(2)'], {}), '(f1_scores, [0.97, 1.0, 0.97, 0.97, 1.0], 2)\n', (34593, 34637), False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((34704, 34781), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_samples': '(30)', 'n_features': '(20)', 'n_informative': '(5)', 'random_state': '(0)'}), '(n_samples=30, n_features=20, n_informative=5, random_state=0)\n', (34719, 34781), False, 'from sklearn.datasets import make_regression\n'), ((34819, 34826), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (34824, 34826), False, 'from sklearn.linear_model import Ridge\n'), ((34895, 34932), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['reg', 'X', 'y'], {'cv': '(5)'}), '(reg, X, y, cv=5)\n', (34915, 34932), True, 'from sklearn import cross_validation as cval\n'), ((34937, 35005), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['scores', '[0.94, 0.97, 0.97, 0.99, 0.92]', '(2)'], {}), '(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)\n', (34962, 35005), False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((35129, 35180), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['reg', 'X', 'y'], {'scoring': '"""r2"""', 'cv': '(5)'}), "(reg, X, y, scoring='r2', cv=5)\n", (35149, 35180), True, 'from sklearn import cross_validation as cval\n'), ((35185, 35256), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['r2_scores', '[0.94, 0.97, 0.97, 0.99, 0.92]', '(2)'], {}), '(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)\n', (35210, 35256), False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((35355, 35426), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['reg', 'X', 'y'], {'cv': '(5)', 'scoring': '"""neg_mean_squared_error"""'}), "(reg, X, y, cv=5, scoring='neg_mean_squared_error')\n", (35375, 35426), True, 'from sklearn import cross_validation as cval\n'), ((35492, 35548), 'numpy.array', 'np.array', (['[-763.07, -553.16, -274.38, -273.26, -1681.99]'], {}), '([-763.07, -553.16, -274.38, -273.26, -1681.99])\n', (35500, 35548), True, 'import numpy as np\n'), ((35553, 35615), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['neg_mse_scores', 'expected_neg_mse', '(2)'], {}), '(neg_mse_scores, expected_neg_mse, 2)\n', (35578, 35615), False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((35656, 35693), 'sklearn.metrics.make_scorer', 'make_scorer', (['explained_variance_score'], {}), '(explained_variance_score)\n', (35667, 35693), False, 'from sklearn.metrics import make_scorer\n'), ((35710, 35764), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['reg', 'X', 'y'], {'cv': '(5)', 'scoring': 'scoring'}), '(reg, X, y, cv=5, scoring=scoring)\n', (35730, 35764), True, 'from sklearn import cross_validation as cval\n'), ((35769, 35840), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['ev_scores', '[0.94, 0.97, 0.97, 0.99, 0.92]', '(2)'], {}), '(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)\n', (35794, 35840), False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((35884, 35895), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (35893, 35895), False, 'from sklearn.datasets import load_iris\n'), ((35929, 35942), 'scipy.sparse.coo_matrix', 'coo_matrix', (['X'], {}), '(X)\n', (35939, 35942), False, 'from scipy.sparse import coo_matrix\n'), ((35973, 35993), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (35976, 35993), False, 'from sklearn.svm import SVC\n'), ((36003, 36029), 'sklearn.cross_validation.StratifiedKFold', 'cval.StratifiedKFold', (['y', '(2)'], {}), '(y, 2)\n', (36023, 36029), True, 'from sklearn import cross_validation as cval\n'), ((36059, 36148), 'sklearn.cross_validation.permutation_test_score', 'cval.permutation_test_score', (['svm', 'X', 'y'], {'n_permutations': '(30)', 'cv': 'cv', 'scoring': '"""accuracy"""'}), "(svm, X, y, n_permutations=30, cv=cv, scoring=\n 'accuracy')\n", (36086, 36148), True, 'from sklearn import cross_validation as cval\n'), ((36157, 36183), 'sklearn.utils.testing.assert_greater', 'assert_greater', (['score', '(0.9)'], {}), '(score, 0.9)\n', (36171, 36183), False, 'from sklearn.utils.testing import assert_greater\n'), ((36188, 36223), 'sklearn.utils.testing.assert_almost_equal', 'assert_almost_equal', (['pvalue', '(0.0)', '(1)'], {}), '(pvalue, 0.0, 1)\n', (36207, 36223), False, 'from sklearn.utils.testing import assert_almost_equal\n'), ((36406, 36439), 'sklearn.utils.testing.assert_true', 'assert_true', (['(score_label == score)'], {}), '(score_label == score)\n', (36417, 36439), False, 'from sklearn.utils.testing import assert_true\n'), ((36444, 36479), 'sklearn.utils.testing.assert_true', 'assert_true', (['(pvalue_label == pvalue)'], {}), '(pvalue_label == pvalue)\n', (36455, 36479), False, 'from sklearn.utils.testing import assert_true\n'), ((36571, 36591), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (36574, 36591), False, 'from sklearn.svm import SVC\n'), ((36608, 36634), 'sklearn.cross_validation.StratifiedKFold', 'cval.StratifiedKFold', (['y', '(2)'], {}), '(y, 2)\n', (36628, 36634), True, 'from sklearn import cross_validation as cval\n'), ((36838, 36871), 'sklearn.utils.testing.assert_true', 'assert_true', (['(score_label == score)'], {}), '(score_label == score)\n', (36849, 36871), False, 'from sklearn.utils.testing import assert_true\n'), ((36876, 36911), 'sklearn.utils.testing.assert_true', 'assert_true', (['(pvalue_label == pvalue)'], {}), '(pvalue_label == pvalue)\n', (36887, 36911), False, 'from sklearn.utils.testing import assert_true\n'), ((37108, 37133), 'sklearn.metrics.make_scorer', 'make_scorer', (['custom_score'], {}), '(custom_score)\n', (37119, 37133), False, 'from sklearn.metrics import make_scorer\n'), ((37157, 37258), 'sklearn.cross_validation.permutation_test_score', 'cval.permutation_test_score', (['svm', 'X', 'y'], {'n_permutations': '(100)', 'scoring': 'scorer', 'cv': 'cv', 'random_state': '(0)'}), '(svm, X, y, n_permutations=100, scoring=scorer,\n cv=cv, random_state=0)\n', (37184, 37258), True, 'from sklearn import cross_validation as cval\n'), ((37268, 37303), 'sklearn.utils.testing.assert_almost_equal', 'assert_almost_equal', (['score', '(0.93)', '(2)'], {}), '(score, 0.93, 2)\n', (37287, 37303), False, 'from sklearn.utils.testing import assert_almost_equal\n'), ((37307, 37343), 'sklearn.utils.testing.assert_almost_equal', 'assert_almost_equal', (['pvalue', '(0.01)', '(3)'], {}), '(pvalue, 0.01, 3)\n', (37326, 37343), False, 'from sklearn.utils.testing import assert_almost_equal\n'), ((37430, 37519), 'sklearn.cross_validation.permutation_test_score', 'cval.permutation_test_score', (['svm', 'X', 'y'], {'n_permutations': '(30)', 'cv': 'cv', 'scoring': '"""accuracy"""'}), "(svm, X, y, n_permutations=30, cv=cv, scoring=\n 'accuracy')\n", (37457, 37519), True, 'from sklearn import cross_validation as cval\n'), ((37529, 37552), 'sklearn.utils.testing.assert_less', 'assert_less', (['score', '(0.5)'], {}), '(score, 0.5)\n', (37540, 37552), False, 'from sklearn.utils.testing import assert_less\n'), ((37557, 37584), 'sklearn.utils.testing.assert_greater', 'assert_greater', (['pvalue', '(0.2)'], {}), '(pvalue, 0.2)\n', (37571, 37584), False, 'from sklearn.utils.testing import assert_greater\n'), ((37640, 37682), 'numpy.array', 'np.array', (['[[1, 2], [3, 4], [5, 6], [7, 8]]'], {}), '([[1, 2], [3, 4], [5, 6], [7, 8]])\n', (37648, 37682), True, 'import numpy as np\n'), ((37691, 37713), 'numpy.array', 'np.array', (['[1, 1, 2, 2]'], {}), '([1, 1, 2, 2])\n', (37699, 37713), True, 'import numpy as np\n'), ((37727, 37749), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (37735, 37749), True, 'import numpy as np\n'), ((37813, 37832), 'sklearn.cross_validation.LeaveOneOut', 'cval.LeaveOneOut', (['(4)'], {}), '(4)\n', (37829, 37832), True, 'from sklearn import cross_validation as cval\n'), ((37843, 37863), 'sklearn.cross_validation.LeavePOut', 'cval.LeavePOut', (['(4)', '(2)'], {}), '(4, 2)\n', (37857, 37863), True, 'from sklearn import cross_validation as cval\n'), ((37873, 37889), 'sklearn.cross_validation.KFold', 'cval.KFold', (['(4)', '(2)'], {}), '(4, 2)\n', (37883, 37889), True, 'from sklearn import cross_validation as cval\n'), ((37900, 37926), 'sklearn.cross_validation.StratifiedKFold', 'cval.StratifiedKFold', (['y', '(2)'], {}), '(y, 2)\n', (37920, 37926), True, 'from sklearn import cross_validation as cval\n'), ((37938, 37967), 'sklearn.cross_validation.LeaveOneLabelOut', 'cval.LeaveOneLabelOut', (['labels'], {}), '(labels)\n', (37959, 37967), True, 'from sklearn import cross_validation as cval\n'), ((37979, 38009), 'sklearn.cross_validation.LeavePLabelOut', 'cval.LeavePLabelOut', (['labels', '(2)'], {}), '(labels, 2)\n', (37998, 38009), True, 'from sklearn import cross_validation as cval\n'), ((38019, 38053), 'sklearn.cross_validation.PredefinedSplit', 'cval.PredefinedSplit', (['[1, 1, 2, 2]'], {}), '([1, 1, 2, 2])\n', (38039, 38053), True, 'from sklearn import cross_validation as cval\n'), ((38063, 38083), 'sklearn.cross_validation.ShuffleSplit', 'cval.ShuffleSplit', (['(2)'], {}), '(2)\n', (38080, 38083), True, 'from sklearn import cross_validation as cval\n'), ((38438, 38480), 'numpy.array', 'np.array', (['[[1, 2], [3, 4], [5, 6], [7, 8]]'], {}), '([[1, 2], [3, 4], [5, 6], [7, 8]])\n', (38446, 38480), True, 'import numpy as np\n'), ((38489, 38511), 'numpy.array', 'np.array', (['[1, 1, 2, 2]'], {}), '([1, 1, 2, 2])\n', (38497, 38511), True, 'import numpy as np\n'), ((38525, 38547), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (38533, 38547), True, 'import numpy as np\n'), ((38558, 38577), 'sklearn.cross_validation.LeaveOneOut', 'cval.LeaveOneOut', (['(4)'], {}), '(4)\n', (38574, 38577), True, 'from sklearn import cross_validation as cval\n'), ((38588, 38608), 'sklearn.cross_validation.LeavePOut', 'cval.LeavePOut', (['(4)', '(2)'], {}), '(4, 2)\n', (38602, 38608), True, 'from sklearn import cross_validation as cval\n'), ((38618, 38634), 'sklearn.cross_validation.KFold', 'cval.KFold', (['(4)', '(2)'], {}), '(4, 2)\n', (38628, 38634), True, 'from sklearn import cross_validation as cval\n'), ((38645, 38671), 'sklearn.cross_validation.StratifiedKFold', 'cval.StratifiedKFold', (['y', '(2)'], {}), '(y, 2)\n', (38665, 38671), True, 'from sklearn import cross_validation as cval\n'), ((38683, 38712), 'sklearn.cross_validation.LeaveOneLabelOut', 'cval.LeaveOneLabelOut', (['labels'], {}), '(labels)\n', (38704, 38712), True, 'from sklearn import cross_validation as cval\n'), ((38724, 38754), 'sklearn.cross_validation.LeavePLabelOut', 'cval.LeavePLabelOut', (['labels', '(2)'], {}), '(labels, 2)\n', (38743, 38754), True, 'from sklearn import cross_validation as cval\n'), ((38764, 38784), 'sklearn.cross_validation.ShuffleSplit', 'cval.ShuffleSplit', (['(2)'], {}), '(2)\n', (38781, 38784), True, 'from sklearn import cross_validation as cval\n'), ((38794, 38828), 'sklearn.cross_validation.PredefinedSplit', 'cval.PredefinedSplit', (['[1, 1, 2, 2]'], {}), '([1, 1, 2, 2])\n', (38814, 38828), True, 'from sklearn import cross_validation as cval\n'), ((39141, 39204), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.ShuffleSplit', '(10)'], {'test_size': '(2.0)'}), '(ValueError, cval.ShuffleSplit, 10, test_size=2.0)\n', (39154, 39204), False, 'from sklearn.utils.testing import assert_raises\n'), ((39209, 39272), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.ShuffleSplit', '(10)'], {'test_size': '(1.0)'}), '(ValueError, cval.ShuffleSplit, 10, test_size=1.0)\n', (39222, 39272), False, 'from sklearn.utils.testing import assert_raises\n'), ((39277, 39362), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.ShuffleSplit', '(10)'], {'test_size': '(0.1)', 'train_size': '(0.95)'}), '(ValueError, cval.ShuffleSplit, 10, test_size=0.1, train_size=0.95\n )\n', (39290, 39362), False, 'from sklearn.utils.testing import assert_raises\n'), ((39380, 39442), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.ShuffleSplit', '(10)'], {'test_size': '(11)'}), '(ValueError, cval.ShuffleSplit, 10, test_size=11)\n', (39393, 39442), False, 'from sklearn.utils.testing import assert_raises\n'), ((39447, 39509), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.ShuffleSplit', '(10)'], {'test_size': '(10)'}), '(ValueError, cval.ShuffleSplit, 10, test_size=10)\n', (39460, 39509), False, 'from sklearn.utils.testing import assert_raises\n'), ((39514, 39589), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.ShuffleSplit', '(10)'], {'test_size': '(8)', 'train_size': '(3)'}), '(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)\n', (39527, 39589), False, 'from sklearn.utils.testing import assert_raises\n'), ((39594, 39659), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.ShuffleSplit', '(10)'], {'train_size': '(1.0j)'}), '(ValueError, cval.ShuffleSplit, 10, train_size=1.0j)\n', (39607, 39659), False, 'from sklearn.utils.testing import assert_raises\n'), ((39662, 39748), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'cval.ShuffleSplit', '(10)'], {'test_size': 'None', 'train_size': 'None'}), '(ValueError, cval.ShuffleSplit, 10, test_size=None, train_size\n =None)\n', (39675, 39748), False, 'from sklearn.utils.testing import assert_raises\n'), ((39939, 39977), 'sklearn.cross_validation.ShuffleSplit', 'cval.ShuffleSplit', (['(10)'], {'random_state': '(21)'}), '(10, random_state=21)\n', (39956, 39977), True, 'from sklearn import cross_validation as cval\n'), ((40108, 40126), 'sklearn.svm.SVC', 'SVC', ([], {'gamma': '"""scale"""'}), "(gamma='scale')\n", (40111, 40126), False, 'from sklearn.svm import SVC\n'), ((40138, 40163), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""precomputed"""'}), "(kernel='precomputed')\n", (40141, 40163), False, 'from sklearn.svm import SVC\n'), ((40176, 40187), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (40185, 40187), False, 'from sklearn.datasets import load_iris\n'), ((40230, 40244), 'numpy.dot', 'np.dot', (['X', 'X.T'], {}), '(X, X.T)\n', (40236, 40244), True, 'import numpy as np\n'), ((40255, 40316), 'sklearn.cross_validation.ShuffleSplit', 'cval.ShuffleSplit', (['X.shape[0]'], {'test_size': '(0.25)', 'random_state': '(0)'}), '(X.shape[0], test_size=0.25, random_state=0)\n', (40272, 40316), True, 'from sklearn import cross_validation as cval\n'), ((40360, 40391), 'sklearn.cross_validation._safe_split', 'cval._safe_split', (['clf', 'X', 'y', 'tr'], {}), '(clf, X, y, tr)\n', (40376, 40391), True, 'from sklearn import cross_validation as cval\n'), ((40410, 40442), 'sklearn.cross_validation._safe_split', 'cval._safe_split', (['clfp', 'K', 'y', 'tr'], {}), '(clfp, K, y, tr)\n', (40426, 40442), True, 'from sklearn import cross_validation as cval\n'), ((40519, 40554), 'sklearn.cross_validation._safe_split', 'cval._safe_split', (['clf', 'X', 'y', 'te', 'tr'], {}), '(clf, X, y, te, tr)\n', (40535, 40554), True, 'from sklearn import cross_validation as cval\n'), ((40573, 40609), 'sklearn.cross_validation._safe_split', 'cval._safe_split', (['clfp', 'K', 'y', 'te', 'tr'], {}), '(clfp, K, y, te, tr)\n', (40589, 40609), True, 'from sklearn import cross_validation as cval\n'), ((40856, 40889), 'numpy.repeat', 'np.repeat', (['[0, 1]', '(X.shape[0] / 2)'], {}), '([0, 1], X.shape[0] / 2)\n', (40865, 40889), True, 'import numpy as np\n'), ((41031, 41066), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['p', 'X', 'y'], {'cv': '(5)'}), '(p, X, y, cv=5)\n', (41051, 41066), True, 'from sklearn import cross_validation as cval\n'), ((41257, 41290), 'numpy.repeat', 'np.repeat', (['[0, 1]', '(X.shape[0] / 2)'], {}), '([0, 1], X.shape[0] / 2)\n', (41266, 41290), True, 'import numpy as np\n'), ((41295, 41354), 'sklearn.cross_validation.train_test_split', 'cval.train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(X, y, test_size=0.2, random_state=42)\n', (41316, 41354), True, 'from sklearn import cross_validation as cval\n'), ((41557, 41590), 'numpy.repeat', 'np.repeat', (['[0, 1]', '(X.shape[0] / 2)'], {}), '([0, 1], X.shape[0] / 2)\n', (41566, 41590), True, 'import numpy as np\n'), ((41732, 41774), 'sklearn.cross_validation.permutation_test_score', 'cval.permutation_test_score', (['p', 'X', 'y'], {'cv': '(5)'}), '(p, X, y, cv=5)\n', (41759, 41774), True, 'from sklearn import cross_validation as cval\n'), ((41819, 41834), 'numpy.ones', 'np.ones', (['(9, 2)'], {}), '((9, 2))\n', (41826, 41834), True, 'import numpy as np\n'), ((41844, 41881), 'sklearn.cross_validation.check_cv', 'cval.check_cv', (['(3)', 'X'], {'classifier': '(False)'}), '(3, X, classifier=False)\n', (41857, 41881), True, 'from sklearn import cross_validation as cval\n'), ((41942, 41979), 'numpy.array', 'np.array', (['[0, 1, 0, 1, 0, 0, 1, 1, 1]'], {}), '([0, 1, 0, 1, 0, 0, 1, 1, 1])\n', (41950, 41979), True, 'import numpy as np\n'), ((41989, 42035), 'sklearn.cross_validation.check_cv', 'cval.check_cv', (['(3)', 'X', 'y_binary'], {'classifier': '(True)'}), '(3, X, y_binary, classifier=True)\n', (42002, 42035), True, 'from sklearn import cross_validation as cval\n'), ((42110, 42147), 'numpy.array', 'np.array', (['[0, 1, 0, 1, 2, 1, 2, 0, 2]'], {}), '([0, 1, 0, 1, 2, 1, 2, 0, 2])\n', (42118, 42147), True, 'import numpy as np\n'), ((42157, 42207), 'sklearn.cross_validation.check_cv', 'cval.check_cv', (['(3)', 'X', 'y_multiclass'], {'classifier': '(True)'}), '(3, X, y_multiclass, classifier=True)\n', (42170, 42207), True, 'from sklearn import cross_validation as cval\n'), ((42271, 42286), 'numpy.ones', 'np.ones', (['(5, 2)'], {}), '((5, 2))\n', (42278, 42286), True, 'import numpy as np\n'), ((42371, 42421), 'sklearn.cross_validation.check_cv', 'cval.check_cv', (['(3)', 'X', 'y_multilabel'], {'classifier': '(True)'}), '(3, X, y_multilabel, classifier=True)\n', (42384, 42421), True, 'from sklearn import cross_validation as cval\n'), ((42487, 42537), 'numpy.array', 'np.array', (['[[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]]'], {}), '([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])\n', (42495, 42537), True, 'import numpy as np\n'), ((42547, 42598), 'sklearn.cross_validation.check_cv', 'cval.check_cv', (['(3)', 'X', 'y_multioutput'], {'classifier': '(True)'}), '(3, X, y_multioutput, classifier=True)\n', (42560, 42598), True, 'from sklearn import cross_validation as cval\n'), ((42692, 42795), 'numpy.array', 'np.array', (['[[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1], [-2, 1], [0, 0], [-2, -1], [-1, \n -2], [1, -2]]'], {}), '([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1], [-2, 1], [0, 0], [-2, -\n 1], [-1, -2], [1, -2]])\n', (42700, 42795), True, 'import numpy as np\n'), ((42817, 42912), 'numpy.array', 'np.array', (['[[1, 1], [0, 1], [0, 1], [0, 1], [1, 1], [0, 1], [1, 0], [1, 1], [1, 0], [0, 0]\n ]'], {}), '([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1], [0, 1], [1, 0], [1, 1], [\n 1, 0], [0, 0]])\n', (42825, 42912), True, 'import numpy as np\n'), ((42936, 42971), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(1)'}), '(n_neighbors=1)\n', (42956, 42971), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((42992, 43037), 'sklearn.metrics.make_scorer', 'make_scorer', (['precision_score'], {'average': '"""micro"""'}), "(precision_score, average='micro')\n", (43003, 43037), False, 'from sklearn.metrics import make_scorer\n'), ((43058, 43103), 'sklearn.metrics.make_scorer', 'make_scorer', (['precision_score'], {'average': '"""macro"""'}), "(precision_score, average='macro')\n", (43069, 43103), False, 'from sklearn.metrics import make_scorer\n'), ((43126, 43173), 'sklearn.metrics.make_scorer', 'make_scorer', (['precision_score'], {'average': '"""samples"""'}), "(precision_score, average='samples')\n", (43137, 43173), False, 'from sklearn.metrics import make_scorer\n'), ((43192, 43252), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['clf', 'X', 'y'], {'scoring': 'scoring_micro', 'cv': '(5)'}), '(clf, X, y, scoring=scoring_micro, cv=5)\n', (43212, 43252), True, 'from sklearn import cross_validation as cval\n'), ((43271, 43331), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['clf', 'X', 'y'], {'scoring': 'scoring_macro', 'cv': '(5)'}), '(clf, X, y, scoring=scoring_macro, cv=5)\n', (43291, 43331), True, 'from sklearn import cross_validation as cval\n'), ((43352, 43414), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['clf', 'X', 'y'], {'scoring': 'scoring_samples', 'cv': '(5)'}), '(clf, X, y, scoring=scoring_samples, cv=5)\n', (43372, 43414), True, 'from sklearn import cross_validation as cval\n'), ((43460, 43525), 'sklearn.utils.testing.assert_almost_equal', 'assert_almost_equal', (['score_micro', '[1, 1 / 2, 3 / 4, 1 / 2, 1 / 3]'], {}), '(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])\n', (43479, 43525), False, 'from sklearn.utils.testing import assert_almost_equal\n'), ((43530, 43595), 'sklearn.utils.testing.assert_almost_equal', 'assert_almost_equal', (['score_macro', '[1, 1 / 2, 3 / 4, 1 / 2, 1 / 4]'], {}), '(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])\n', (43549, 43595), False, 'from sklearn.utils.testing import assert_almost_equal\n'), ((43600, 43667), 'sklearn.utils.testing.assert_almost_equal', 'assert_almost_equal', (['score_samples', '[1, 1 / 2, 3 / 4, 1 / 2, 1 / 4]'], {}), '(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])\n', (43619, 43667), False, 'from sklearn.utils.testing import assert_almost_equal\n'), ((43713, 43726), 'sklearn.datasets.load_boston', 'load_boston', ([], {}), '()\n', (43724, 43726), False, 'from sklearn.datasets import load_boston\n'), ((43816, 43823), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (43821, 43823), False, 'from sklearn.linear_model import Ridge\n'), ((43894, 43910), 'numpy.zeros_like', 'np.zeros_like', (['y'], {}), '(y)\n', (43907, 43910), True, 'import numpy as np\n'), ((44031, 44071), 'sklearn.cross_validation.cross_val_predict', 'cval.cross_val_predict', (['est', 'X', 'y'], {'cv': 'cv'}), '(est, X, y, cv=cv)\n', (44053, 44071), True, 'from sklearn import cross_validation as cval\n'), ((44076, 44116), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['preds', 'preds2'], {}), '(preds, preds2)\n', (44101, 44116), False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((44130, 44163), 'sklearn.cross_validation.cross_val_predict', 'cval.cross_val_predict', (['est', 'X', 'y'], {}), '(est, X, y)\n', (44152, 44163), True, 'from sklearn import cross_validation as cval\n'), ((44248, 44288), 'sklearn.cross_validation.cross_val_predict', 'cval.cross_val_predict', (['est', 'X', 'y'], {'cv': 'cv'}), '(est, X, y, cv=cv)\n', (44270, 44288), True, 'from sklearn import cross_validation as cval\n'), ((44390, 44405), 'scipy.sparse.coo_matrix', 'coo_matrix', (['Xsp'], {}), '(Xsp)\n', (44400, 44405), False, 'from scipy.sparse import coo_matrix\n'), ((44418, 44453), 'sklearn.cross_validation.cross_val_predict', 'cval.cross_val_predict', (['est', 'Xsp', 'y'], {}), '(est, Xsp, y)\n', (44440, 44453), True, 'from sklearn import cross_validation as cval\n'), ((44837, 44844), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (44842, 44844), False, 'from sklearn.linear_model import Ridge\n'), ((44880, 44913), 'sklearn.cross_validation.cross_val_predict', 'cval.cross_val_predict', (['clf', 'X', 'y'], {}), '(clf, X, y)\n', (44902, 44913), True, 'from sklearn import cross_validation as cval\n'), ((44918, 44956), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['predictions.shape', '(10,)'], {}), '(predictions.shape, (10,))\n', (44930, 44956), False, 'from sklearn.utils.testing import assert_equal\n'), ((45110, 45150), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['predictions.shape', '(10, 2)'], {}), '(predictions.shape, (10, 2))\n', (45122, 45150), False, 'from sklearn.utils.testing import assert_equal\n'), ((45170, 45210), 'sklearn.cross_validation.cross_val_predict', 'cval.cross_val_predict', (['clf', 'X_sparse', 'y'], {}), '(clf, X_sparse, y)\n', (45192, 45210), True, 'from sklearn import cross_validation as cval\n'), ((45215, 45259), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['predictions.shape', '(10,)'], {}), '(predictions.shape, (10,))\n', (45233, 45259), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((45413, 45459), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['predictions.shape', '(10, 2)'], {}), '(predictions.shape, (10, 2))\n', (45431, 45459), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((45550, 45588), 'sklearn.utils.mocking.CheckingClassifier', 'CheckingClassifier', ([], {'check_X': 'list_check'}), '(check_X=list_check)\n', (45568, 45588), False, 'from sklearn.utils.mocking import CheckingClassifier, MockDataFrame\n'), ((45670, 45708), 'sklearn.utils.mocking.CheckingClassifier', 'CheckingClassifier', ([], {'check_y': 'list_check'}), '(check_y=list_check)\n', (45688, 45708), False, 'from sklearn.utils.mocking import CheckingClassifier, MockDataFrame\n'), ((45874, 45910), 'sklearn.utils.mocking.CheckingClassifier', 'CheckingClassifier', ([], {'check_X': 'check_3d'}), '(check_X=check_3d)\n', (45892, 45910), False, 'from sklearn.utils.mocking import CheckingClassifier, MockDataFrame\n'), ((45929, 45965), 'sklearn.cross_validation.cross_val_predict', 'cval.cross_val_predict', (['clf', 'X_3d', 'y'], {}), '(clf, X_3d, y)\n', (45951, 45965), True, 'from sklearn import cross_validation as cval\n'), ((45970, 46014), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['predictions.shape', '(10,)'], {}), '(predictions.shape, (10,))\n', (45988, 46014), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((46714, 46725), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (46723, 46725), False, 'from sklearn.datasets import load_iris\n'), ((46869, 46923), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['clf', 'X', 'y'], {'fit_params': 'fit_params'}), '(clf, X, y, fit_params=fit_params)\n', (46889, 46923), True, 'from sklearn import cross_validation as cval\n'), ((47003, 47017), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (47012, 47017), True, 'import numpy as np\n'), ((47341, 47463), 'sklearn.datasets.make_multilabel_classification', 'make_multilabel_classification', ([], {'n_classes': '(2)', 'n_labels': '(1)', 'allow_unlabeled': '(False)', 'return_indicator': '(True)', 'random_state': '(1)'}), '(n_classes=2, n_labels=1, allow_unlabeled=\n False, return_indicator=True, random_state=1)\n', (47371, 47463), False, 'from sklearn.datasets import make_multilabel_classification\n'), ((47600, 47613), 'scipy.sparse.csr_matrix', 'csr_matrix', (['X'], {}), '(X)\n', (47610, 47613), False, 'from scipy.sparse import csr_matrix\n'), ((47629, 47642), 'scipy.sparse.csr_matrix', 'csr_matrix', (['y'], {}), '(y)\n', (47639, 47642), False, 'from scipy.sparse import csr_matrix\n'), ((47711, 47755), 'sklearn.cross_validation.cross_val_predict', 'cval.cross_val_predict', (['classif', 'X', 'y'], {'cv': '(10)'}), '(classif, X, y, cv=10)\n', (47733, 47755), True, 'from sklearn import cross_validation as cval\n'), ((47775, 47833), 'sklearn.cross_validation.cross_val_predict', 'cval.cross_val_predict', (['classif', 'X_sparse', 'y_sparse'], {'cv': '(10)'}), '(classif, X_sparse, y_sparse, cv=10)\n', (47797, 47833), True, 'from sklearn import cross_validation as cval\n'), ((47880, 47926), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['preds_sparse', 'preds'], {}), '(preds_sparse, preds)\n', (47905, 47926), False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((4637, 4650), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (4645, 4650), True, 'import numpy as np\n'), ((8056, 8072), 'sklearn.cross_validation.KFold', 'cval.KFold', (['(4)', '(2)'], {}), '(4, 2)\n', (8066, 8072), True, 'from sklearn import cross_validation as cval\n'), ((8306, 8322), 'sklearn.cross_validation.KFold', 'cval.KFold', (['(5)', '(2)'], {}), '(5, 2)\n', (8316, 8322), True, 'from sklearn import cross_validation as cval\n'), ((8780, 8817), 'sklearn.cross_validation.StratifiedKFold', 'cval.StratifiedKFold', (['[1, 1, 0, 0]', '(2)'], {}), '([1, 1, 0, 0], 2)\n', (8800, 8817), True, 'from sklearn import cross_validation as cval\n'), ((9051, 9097), 'sklearn.cross_validation.StratifiedKFold', 'cval.StratifiedKFold', (['[1, 1, 1, 0, 0, 0, 0]', '(2)'], {}), '([1, 1, 1, 0, 0, 0, 0], 2)\n', (9071, 9097), True, 'from sklearn import cross_validation as cval\n'), ((9729, 9777), 'sklearn.cross_validation.StratifiedKFold', 'cval.StratifiedKFold', (['labels', '(5)'], {'shuffle': 'shuffle'}), '(labels, 5, shuffle=shuffle)\n', (9749, 9777), True, 'from sklearn import cross_validation as cval\n'), ((10461, 10477), 'sklearn.cross_validation.KFold', 'cval.KFold', (['i', '(5)'], {}), '(i, 5)\n', (10471, 10477), True, 'from sklearn import cross_validation as cval\n'), ((12106, 12167), 'sklearn.cross_validation.StratifiedKFold', 'cval.StratifiedKFold', (['labels', '(5)'], {'shuffle': '(True)', 'random_state': '(0)'}), '(labels, 5, shuffle=True, random_state=0)\n', (12126, 12167), True, 'from sklearn import cross_validation as cval\n'), ((12184, 12245), 'sklearn.cross_validation.StratifiedKFold', 'cval.StratifiedKFold', (['labels', '(5)'], {'shuffle': '(True)', 'random_state': '(1)'}), '(labels, 5, shuffle=True, random_state=1)\n', (12204, 12245), True, 'from sklearn import cross_validation as cval\n'), ((14652, 14692), 'sklearn.cross_validation.LabelKFold', 'cval.LabelKFold', (['labels'], {'n_folds': 'n_folds'}), '(labels, n_folds=n_folds)\n', (14667, 14692), True, 'from sklearn import cross_validation as cval\n'), ((15943, 15960), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (15952, 15960), True, 'import numpy as np\n'), ((16078, 16118), 'sklearn.cross_validation.LabelKFold', 'cval.LabelKFold', (['labels'], {'n_folds': 'n_folds'}), '(labels, n_folds=n_folds)\n', (16093, 16118), True, 'from sklearn import cross_validation as cval\n'), ((17316, 17348), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['t1[0]', 't2[0]'], {}), '(t1[0], t2[0])\n', (17334, 17348), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((17357, 17389), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['t2[0]', 't3[0]'], {}), '(t2[0], t3[0])\n', (17375, 17389), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((17398, 17430), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['t3[0]', 't4[0]'], {}), '(t3[0], t4[0])\n', (17416, 17430), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((17439, 17471), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['t1[1]', 't2[1]'], {}), '(t1[1], t2[1])\n', (17457, 17471), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((17480, 17512), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['t2[1]', 't3[1]'], {}), '(t2[1], t3[1])\n', (17498, 17512), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((17521, 17553), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['t3[1]', 't4[1]'], {}), '(t3[1], t4[1])\n', (17539, 17553), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((18673, 18719), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]'], {}), '([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3])\n', (18681, 18719), True, 'import numpy as np\n'), ((18731, 18777), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3])\n', (18739, 18777), True, 'import numpy as np\n'), ((18789, 18848), 'numpy.array', 'np.array', (['([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2)'], {}), '([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2)\n', (18797, 18848), True, 'import numpy as np\n'), ((18860, 18918), 'numpy.array', 'np.array', (['[1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]'], {}), '([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4])\n', (18868, 18918), True, 'import numpy as np\n'), ((18930, 18961), 'numpy.array', 'np.array', (['([-1] * 800 + [1] * 50)'], {}), '([-1] * 800 + [1] * 50)\n', (18938, 18961), True, 'import numpy as np\n'), ((19006, 19071), 'sklearn.cross_validation.StratifiedShuffleSplit', 'cval.StratifiedShuffleSplit', (['y', '(6)'], {'test_size': '(0.33)', 'random_state': '(0)'}), '(y, 6, test_size=0.33, random_state=0)\n', (19033, 19071), True, 'from sklearn import cross_validation as cval\n'), ((20331, 20355), 'scipy.stats.binom', 'stats.binom', (['n_splits', 'p'], {}), '(n_splits, p)\n', (20342, 20355), False, 'from scipy import stats\n'), ((20622, 20655), 'numpy.array', 'np.array', (['(n_samples // 2 * [0, 1])'], {}), '(n_samples // 2 * [0, 1])\n', (20630, 20655), True, 'import numpy as np\n'), ((20675, 20770), 'sklearn.cross_validation.StratifiedShuffleSplit', 'cval.StratifiedShuffleSplit', (['labels'], {'n_iter': 'n_iter', 'test_size': '(1.0 / n_folds)', 'random_state': '(0)'}), '(labels, n_iter=n_iter, test_size=1.0 / n_folds,\n random_state=0)\n', (20702, 20770), True, 'from sklearn import cross_validation as cval\n'), ((21170, 21200), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['n_splits', 'n_iter'], {}), '(n_splits, n_iter)\n', (21182, 21200), False, 'from sklearn.utils.testing import assert_equal\n'), ((21382, 21399), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (21391, 21399), True, 'import numpy as np\n'), ((21408, 21453), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['splits.test_size', '(1.0 / n_folds)'], {}), '(splits.test_size, 1.0 / n_folds)\n', (21420, 21453), False, 'from sklearn.utils.testing import assert_equal\n'), ((22179, 22206), 'numpy.intersect1d', 'np.intersect1d', (['train', 'test'], {}), '(train, test)\n', (22193, 22206), True, 'import numpy as np\n'), ((22351, 22362), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (22358, 22362), True, 'import numpy as np\n'), ((22444, 22475), 'sklearn.cross_validation.KFold', 'cval.KFold', (['(10)', '(5)'], {'shuffle': '(True)'}), '(10, 5, shuffle=True)\n', (22454, 22475), True, 'from sklearn import cross_validation as cval\n'), ((22877, 22923), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]'], {}), '([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3])\n', (22885, 22923), True, 'import numpy as np\n'), ((22935, 22981), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3])\n', (22943, 22981), True, 'import numpy as np\n'), ((22993, 23048), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]'], {}), '([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2])\n', (23001, 23048), True, 'import numpy as np\n'), ((23060, 23118), 'numpy.array', 'np.array', (['[1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]'], {}), '([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4])\n', (23068, 23118), True, 'import numpy as np\n'), ((23210, 23280), 'sklearn.cross_validation.LabelShuffleSplit', 'cval.LabelShuffleSplit', (['y', 'n_iter'], {'test_size': 'test_size', 'random_state': '(0)'}), '(y, n_iter, test_size=test_size, random_state=0)\n', (23232, 23280), True, 'from sklearn import cross_validation as cval\n'), ((23474, 23486), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (23483, 23486), True, 'import numpy as np\n'), ((25078, 25100), 'sklearn.externals.six.moves.zip', 'zip', (['llo', 'llo_changing'], {}), '(llo, llo_changing)\n', (25081, 25100), False, 'from sklearn.externals.six.moves import zip\n'), ((25342, 25373), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['clf', 'X', 'y'], {}), '(clf, X, y)\n', (25362, 25373), True, 'from sklearn import cross_validation as cval\n'), ((25478, 25516), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['clf', 'X_sparse', 'X'], {}), '(clf, X_sparse, X)\n', (25498, 25516), True, 'from sklearn import cross_validation as cval\n'), ((25594, 25632), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['clf', 'X_sparse', 'y'], {}), '(clf, X_sparse, y)\n', (25614, 25632), True, 'from sklearn import cross_validation as cval\n'), ((25744, 25782), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['clf', 'X_sparse', 'X'], {}), '(clf, X_sparse, X)\n', (25764, 25782), True, 'from sklearn import cross_validation as cval\n'), ((27038, 27096), 'sklearn.utils.mocking.CheckingClassifier', 'CheckingClassifier', ([], {'check_X': 'check_df', 'check_y': 'check_series'}), '(check_X=check_df, check_y=check_series)\n', (27056, 27096), False, 'from sklearn.utils.mocking import CheckingClassifier, MockDataFrame\n'), ((27105, 27143), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['clf', 'X_df', 'y_ser'], {}), '(clf, X_df, y_ser)\n', (27125, 27143), True, 'from sklearn import cross_validation as cval\n'), ((28704, 28716), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (28713, 28716), True, 'import numpy as np\n'), ((28947, 28985), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['clf.dummy_int', 'DUMMY_INT'], {}), '(clf.dummy_int, DUMMY_INT)\n', (28959, 28985), False, 'from sklearn.utils.testing import assert_equal\n'), ((28994, 29032), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['clf.dummy_str', 'DUMMY_STR'], {}), '(clf.dummy_str, DUMMY_STR)\n', (29006, 29032), False, 'from sklearn.utils.testing import assert_equal\n'), ((29041, 29079), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['clf.dummy_obj', 'DUMMY_OBJ'], {}), '(clf.dummy_obj, DUMMY_OBJ)\n', (29053, 29079), False, 'from sklearn.utils.testing import assert_equal\n'), ((29116, 29134), 'numpy.ones', 'np.ones', (['n_samples'], {}), '(n_samples)\n', (29123, 29134), True, 'import numpy as np\n'), ((29747, 29783), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (29770, 29783), False, 'import warnings\n'), ((29803, 29826), 'sklearn.metrics.make_scorer', 'make_scorer', (['score_func'], {}), '(score_func)\n', (29814, 29826), False, 'from sklearn.metrics import make_scorer\n'), ((29843, 29891), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['clf', 'X', 'y'], {'scoring': 'scoring'}), '(clf, X, y, scoring=scoring)\n', (29863, 29891), True, 'from sklearn import cross_validation as cval\n'), ((31428, 31464), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (31451, 31464), False, 'import warnings\n'), ((32540, 32613), 'sklearn.cross_validation.train_test_split', 'cval.train_test_split', (['y'], {'test_size': 'test_size', 'stratify': 'y', 'random_state': '(0)'}), '(y, test_size=test_size, stratify=y, random_state=0)\n', (32561, 32613), True, 'from sklearn import cross_validation as cval\n'), ((33334, 33361), 'sklearn.cross_validation.train_test_split', 'cval.train_test_split', (['X_df'], {}), '(X_df)\n', (33355, 33361), True, 'from sklearn import cross_validation as cval\n'), ((40479, 40499), 'numpy.dot', 'np.dot', (['X_tr', 'X_tr.T'], {}), '(X_tr, X_tr.T)\n', (40485, 40499), True, 'import numpy as np\n'), ((40646, 40666), 'numpy.dot', 'np.dot', (['X_te', 'X_tr.T'], {}), '(X_te, X_tr.T)\n', (40652, 40666), True, 'import numpy as np\n'), ((44364, 44378), 'numpy.median', 'np.median', (['Xsp'], {}), '(Xsp)\n', (44373, 44378), True, 'import numpy as np\n'), ((44540, 44548), 'sklearn.cluster.KMeans', 'KMeans', ([], {}), '()\n', (44546, 44548), False, 'from sklearn.cluster import KMeans\n'), ((44997, 45041), 'sklearn.utils.testing.ignore_warnings', 'ignore_warnings', ([], {'category': 'ConvergenceWarning'}), '(category=ConvergenceWarning)\n', (45012, 45041), False, 'from sklearn.utils.testing import ignore_warnings\n'), ((45065, 45105), 'sklearn.cross_validation.cross_val_predict', 'cval.cross_val_predict', (['clf', 'X_sparse', 'X'], {}), '(clf, X_sparse, X)\n', (45087, 45105), True, 'from sklearn import cross_validation as cval\n'), ((45300, 45344), 'sklearn.utils.testing.ignore_warnings', 'ignore_warnings', ([], {'category': 'ConvergenceWarning'}), '(category=ConvergenceWarning)\n', (45315, 45344), False, 'from sklearn.utils.testing import ignore_warnings\n'), ((45368, 45408), 'sklearn.cross_validation.cross_val_predict', 'cval.cross_val_predict', (['clf', 'X_sparse', 'X'], {}), '(clf, X_sparse, X)\n', (45390, 45408), True, 'from sklearn import cross_validation as cval\n'), ((46563, 46621), 'sklearn.utils.mocking.CheckingClassifier', 'CheckingClassifier', ([], {'check_X': 'check_df', 'check_y': 'check_series'}), '(check_X=check_df, check_y=check_series)\n', (46581, 46621), False, 'from sklearn.utils.mocking import CheckingClassifier, MockDataFrame\n'), ((46630, 46670), 'sklearn.cross_validation.cross_val_predict', 'cval.cross_val_predict', (['clf', 'X_df', 'y_ser'], {}), '(clf, X_df, y_ser)\n', (46652, 46670), True, 'from sklearn import cross_validation as cval\n'), ((46950, 46960), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (46957, 46960), True, 'import numpy as np\n'), ((47034, 47066), 'sklearn.cross_validation._check_is_partition', 'cval._check_is_partition', (['p', '(100)'], {}), '(p, 100)\n', (47058, 47066), True, 'from sklearn import cross_validation as cval\n'), ((47166, 47198), 'sklearn.cross_validation._check_is_partition', 'cval._check_is_partition', (['p', '(100)'], {}), '(p, 100)\n', (47190, 47198), True, 'from sklearn import cross_validation as cval\n'), ((47677, 47697), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (47680, 47697), False, 'from sklearn.svm import SVC\n'), ((4653, 4666), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (4661, 4666), True, 'import numpy as np\n'), ((4668, 4681), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (4676, 4681), True, 'import numpy as np\n'), ((10665, 10678), 'numpy.sum', 'np.sum', (['sizes'], {}), '(sizes)\n', (10671, 10678), True, 'import numpy as np\n'), ((10958, 11010), 'sklearn.cross_validation.StratifiedKFold', 'cval.StratifiedKFold', (['labels[:i]', '(3)'], {'shuffle': 'shuffle'}), '(labels[:i], 3, shuffle=shuffle)\n', (10978, 11010), True, 'from sklearn import cross_validation as cval\n'), ((11827, 11865), 'numpy.concatenate', 'np.concatenate', (['(all_folds, ind[test])'], {}), '((all_folds, ind[test]))\n', (11841, 11865), True, 'import numpy as np\n'), ((13152, 13192), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['model', 'X', 'y'], {'cv': 'cv'}), '(model, X, y, cv=cv)\n', (13172, 13192), True, 'from sklearn import cross_validation as cval\n'), ((13554, 13594), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['model', 'X', 'y'], {'cv': 'cv'}), '(model, X, y, cv=cv)\n', (13574, 13594), True, 'from sklearn import cross_validation as cval\n'), ((13713, 13753), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['model', 'X', 'y'], {'cv': 'cv'}), '(model, X, y, cv=cv)\n', (13733, 13753), True, 'from sklearn import cross_validation as cval\n'), ((14232, 14272), 'sklearn.cross_validation.cross_val_score', 'cval.cross_val_score', (['model', 'X', 'y'], {'cv': 'cv'}), '(model, X, y, cv=cv)\n', (14252, 14272), True, 'from sklearn import cross_validation as cval\n'), ((17124, 17135), 'numpy.int32', 'np.int32', (['(2)'], {}), '(2)\n', (17132, 17135), True, 'import numpy as np\n'), ((19687, 19732), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['p_train', 'p_test', '(1)'], {}), '(p_train, p_test, 1)\n', (19712, 19732), False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((20431, 20526), 'sklearn.utils.testing.assert_true', 'assert_true', (['(p > threshold)', '"""An index is not drawn with chance corresponding to even draws"""'], {}), "(p > threshold,\n 'An index is not drawn with chance corresponding to even draws')\n", (20442, 20526), False, 'from sklearn.utils.testing import assert_true\n'), ((23624, 23643), 'numpy.unique', 'np.unique', (['y[train]'], {}), '(y[train])\n', (23633, 23643), True, 'import numpy as np\n'), ((23672, 23690), 'numpy.unique', 'np.unique', (['y[test]'], {}), '(y[test])\n', (23681, 23690), True, 'import numpy as np\n'), ((23903, 23953), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['(y[train].size + y[test].size)', 'y.size'], {}), '(y[train].size + y[test].size, y.size)\n', (23915, 23953), False, 'from sklearn.utils.testing import assert_equal\n'), ((25114, 25151), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['train', 'train_chan'], {}), '(train, train_chan)\n', (25132, 25151), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((25164, 25199), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['test', 'test_chan'], {}), '(test, test_chan)\n', (25182, 25199), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((29169, 29187), 'numpy.ones', 'np.ones', (['n_classes'], {}), '(n_classes)\n', (29176, 29187), True, 'import numpy as np\n'), ((30501, 30516), 'numpy.float32', 'np.float32', (['(0.6)'], {}), '(0.6)\n', (30511, 30516), True, 'import numpy as np\n'), ((30529, 30544), 'numpy.float32', 'np.float32', (['(0.6)'], {}), '(0.6)\n', (30539, 30544), True, 'import numpy as np\n'), ((30971, 30985), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (30980, 30985), True, 'import numpy as np\n'), ((31988, 32013), 'numpy.arange', 'np.arange', (['(10 * 5 * 3 * 2)'], {}), '(10 * 5 * 3 * 2)\n', (31997, 32013), True, 'import numpy as np\n'), ((32046, 32068), 'numpy.arange', 'np.arange', (['(10 * 7 * 11)'], {}), '(10 * 7 * 11)\n', (32055, 32068), True, 'import numpy as np\n'), ((32939, 32957), 'numpy.sum', 'np.sum', (['(train == 1)'], {}), '(train == 1)\n', (32945, 32957), True, 'import numpy as np\n'), ((32959, 32977), 'numpy.sum', 'np.sum', (['(train == 2)'], {}), '(train == 2)\n', (32965, 32977), True, 'import numpy as np\n'), ((36369, 36384), 'numpy.ones', 'np.ones', (['y.size'], {}), '(y.size)\n', (36376, 36384), True, 'import numpy as np\n'), ((36800, 36815), 'numpy.ones', 'np.ones', (['y.size'], {}), '(y.size)\n', (36807, 36815), True, 'import numpy as np\n'), ((40778, 40810), 'numpy.arange', 'np.arange', (['(200)'], {'dtype': 'np.float64'}), '(200, dtype=np.float64)\n', (40787, 40810), True, 'import numpy as np\n'), ((41179, 41211), 'numpy.arange', 'np.arange', (['(200)'], {'dtype': 'np.float64'}), '(200, dtype=np.float64)\n', (41188, 41211), True, 'import numpy as np\n'), ((41479, 41511), 'numpy.arange', 'np.arange', (['(200)'], {'dtype': 'np.float64'}), '(200, dtype=np.float64)\n', (41488, 41511), True, 'import numpy as np\n'), ((46840, 46858), 'numpy.eye', 'np.eye', (['X.shape[0]'], {}), '(X.shape[0])\n', (46846, 46858), True, 'import numpy as np\n'), ((47110, 47126), 'numpy.delete', 'np.delete', (['p', '(23)'], {}), '(p, 23)\n', (47119, 47126), True, 'import numpy as np\n'), ((4456, 4470), 'numpy.abs', 'np.abs', (['self.a'], {}), '(self.a)\n', (4462, 4470), True, 'import numpy as np\n'), ((11239, 11252), 'numpy.sum', 'np.sum', (['sizes'], {}), '(sizes)\n', (11245, 11252), True, 'import numpy as np\n'), ((15109, 15142), 'numpy.unique', 'np.unique', (['folds[labels == label]'], {}), '(folds[labels == label])\n', (15118, 15142), True, 'import numpy as np\n'), ((15341, 15384), 'numpy.intersect1d', 'np.intersect1d', (['labels[train]', 'labels[test]'], {}), '(labels[train], labels[test])\n', (15355, 15384), True, 'import numpy as np\n'), ((16535, 16568), 'numpy.unique', 'np.unique', (['folds[labels == label]'], {}), '(folds[labels == label])\n', (16544, 16568), True, 'import numpy as np\n'), ((16721, 16764), 'numpy.intersect1d', 'np.intersect1d', (['labels[train]', 'labels[test]'], {}), '(labels[train], labels[test])\n', (16735, 16764), True, 'import numpy as np\n'), ((19260, 19279), 'numpy.unique', 'np.unique', (['y[train]'], {}), '(y[train])\n', (19269, 19279), True, 'import numpy as np\n'), ((19281, 19299), 'numpy.unique', 'np.unique', (['y[test]'], {}), '(y[test])\n', (19290, 19299), True, 'import numpy as np\n'), ((19917, 19960), 'numpy.lib.arraysetops.intersect1d', 'np.lib.arraysetops.intersect1d', (['train', 'test'], {}), '(train, test)\n', (19947, 19960), True, 'import numpy as np\n'), ((24040, 24067), 'numpy.intersect1d', 'np.intersect1d', (['train', 'test'], {}), '(train, test)\n', (24054, 24067), True, 'import numpy as np\n'), ((40929, 40975), 'sklearn.preprocessing.Imputer', 'Imputer', ([], {'strategy': '"""mean"""', 'missing_values': '"""NaN"""'}), "(strategy='mean', missing_values='NaN')\n", (40936, 40975), False, 'from sklearn.preprocessing import Imputer\n'), ((41630, 41676), 'sklearn.preprocessing.Imputer', 'Imputer', ([], {'strategy': '"""mean"""', 'missing_values': '"""NaN"""'}), "(strategy='mean', missing_values='NaN')\n", (41637, 41676), False, 'from sklearn.preprocessing import Imputer\n'), ((9811, 9837), 'numpy.sum', 'np.sum', (['(labels[train] == 4)'], {}), '(labels[train] == 4)\n', (9817, 9837), True, 'import numpy as np\n'), ((9925, 9951), 'numpy.sum', 'np.sum', (['(labels[train] == 0)'], {}), '(labels[train] == 0)\n', (9931, 9951), True, 'import numpy as np\n'), ((10039, 10065), 'numpy.sum', 'np.sum', (['(labels[train] == 1)'], {}), '(labels[train] == 1)\n', (10045, 10065), True, 'import numpy as np\n'), ((10153, 10178), 'numpy.sum', 'np.sum', (['(labels[test] == 4)'], {}), '(labels[test] == 4)\n', (10159, 10178), True, 'import numpy as np\n'), ((10233, 10258), 'numpy.sum', 'np.sum', (['(labels[test] == 0)'], {}), '(labels[test] == 0)\n', (10239, 10258), True, 'import numpy as np\n'), ((10313, 10338), 'numpy.sum', 'np.sum', (['(labels[test] == 1)'], {}), '(labels[test] == 1)\n', (10319, 10338), True, 'import numpy as np\n'), ((10607, 10620), 'numpy.max', 'np.max', (['sizes'], {}), '(sizes)\n', (10613, 10620), True, 'import numpy as np\n'), ((10623, 10636), 'numpy.min', 'np.min', (['sizes'], {}), '(sizes)\n', (10629, 10636), True, 'import numpy as np\n'), ((11563, 11577), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (11572, 11577), True, 'import numpy as np\n'), ((11620, 11639), 'numpy.arange', 'np.arange', (['(100)', '(200)'], {}), '(100, 200)\n', (11629, 11639), True, 'import numpy as np\n'), ((11682, 11701), 'numpy.arange', 'np.arange', (['(200)', '(300)'], {}), '(200, 300)\n', (11691, 11701), True, 'import numpy as np\n'), ((23723, 23755), 'numpy.in1d', 'np.in1d', (['y[train]', 'y_test_unique'], {}), '(y[train], y_test_unique)\n', (23730, 23755), True, 'import numpy as np\n'), ((23790, 23822), 'numpy.in1d', 'np.in1d', (['y[test]', 'y_train_unique'], {}), '(y[test], y_train_unique)\n', (23797, 23822), True, 'import numpy as np\n'), ((44654, 44676), 'numpy.array', 'np.array', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (44662, 44676), True, 'import numpy as np\n'), ((44678, 44703), 'numpy.array', 'np.array', (['[4, 5, 6, 7, 8]'], {}), '([4, 5, 6, 7, 8])\n', (44686, 44703), True, 'import numpy as np\n'), ((3273, 3285), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (3282, 3285), True, 'import numpy as np\n'), ((3504, 3516), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (3513, 3516), True, 'import numpy as np\n'), ((11177, 11190), 'numpy.max', 'np.max', (['sizes'], {}), '(sizes)\n', (11183, 11190), True, 'import numpy as np\n'), ((11193, 11206), 'numpy.min', 'np.min', (['sizes'], {}), '(sizes)\n', (11199, 11206), True, 'import numpy as np\n'), ((19391, 19431), 'numpy.unique', 'np.unique', (['y[train]'], {'return_inverse': '(True)'}), '(y[train], return_inverse=True)\n', (19400, 19431), True, 'import numpy as np\n'), ((19552, 19591), 'numpy.unique', 'np.unique', (['y[test]'], {'return_inverse': '(True)'}), '(y[test], return_inverse=True)\n', (19561, 19591), True, 'import numpy as np\n'), ((38199, 38216), 'numpy.asarray', 'np.asarray', (['train'], {}), '(train)\n', (38209, 38216), True, 'import numpy as np\n'), ((38263, 38280), 'numpy.asarray', 'np.asarray', (['train'], {}), '(train)\n', (38273, 38280), True, 'import numpy as np\n'), ((38944, 38961), 'numpy.asarray', 'np.asarray', (['train'], {}), '(train)\n', (38954, 38961), True, 'import numpy as np\n'), ((39008, 39025), 'numpy.asarray', 'np.asarray', (['train'], {}), '(train)\n', (39018, 39025), True, 'import numpy as np\n')]
|
import numpy as np
from example import algs
def test_pointless_sort():
# generate random vector of length 10
x = np.random.rand(10)
# check that pointless_sort always returns [1,2,3]
assert np.array_equal(algs.pointless_sort(x), np.array([1,2,3]))
# generate a new random vector of length 10
x = np.random.rand(10)
# check that pointless_sort still returns [1,2,3]
assert np.array_equal(algs.pointless_sort(x), np.array([1,2,3]))
def test_bubblesort():
# Actually test bubblesort here. It might be useful to think about
# some edge cases for your code, where it might fail. Some things to
# think about: (1) does your code handle 0-element arrays without
# failing, (2) does your code handle characters?
w, x, y, z = np.array([1,2,4,0,1]), np.array([]), np.array([0]), np.array([2,1,0,-1,-2])
assert np.array_equal(algs.bubblesort(w)[0], sorted(w))
assert np.array_equal(algs.bubblesort(x)[0], sorted(x))
assert np.array_equal(algs.bubblesort(y)[0], sorted(y))
assert np.array_equal(algs.bubblesort(z)[0], sorted(z))
def test_quicksort():
w, x, y, z = np.array([1,2,4,0,1]), np.array([]), np.array([0]), np.array([2,1,0,-1,-2])
assert np.array_equal(algs.quicksort(w, 0, len(w)-1, 0, 0)[0], sorted(w))
assert np.array_equal(algs.quicksort(x, 0, len(x)-1, 0, 0)[0], sorted(x))
assert np.array_equal(algs.quicksort(y, 0, len(y)-1, 0, 0)[0], sorted(y))
assert np.array_equal(algs.quicksort(z, 0, len(z)-1, 0, 0)[0], sorted(z))
|
[
"numpy.random.rand",
"example.algs.bubblesort",
"numpy.array",
"example.algs.pointless_sort"
] |
[((122, 140), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (136, 140), True, 'import numpy as np\n'), ((323, 341), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (337, 341), True, 'import numpy as np\n'), ((223, 245), 'example.algs.pointless_sort', 'algs.pointless_sort', (['x'], {}), '(x)\n', (242, 245), False, 'from example import algs\n'), ((247, 266), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (255, 266), True, 'import numpy as np\n'), ((423, 445), 'example.algs.pointless_sort', 'algs.pointless_sort', (['x'], {}), '(x)\n', (442, 445), False, 'from example import algs\n'), ((447, 466), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (455, 466), True, 'import numpy as np\n'), ((775, 800), 'numpy.array', 'np.array', (['[1, 2, 4, 0, 1]'], {}), '([1, 2, 4, 0, 1])\n', (783, 800), True, 'import numpy as np\n'), ((798, 810), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (806, 810), True, 'import numpy as np\n'), ((812, 825), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (820, 825), True, 'import numpy as np\n'), ((827, 854), 'numpy.array', 'np.array', (['[2, 1, 0, -1, -2]'], {}), '([2, 1, 0, -1, -2])\n', (835, 854), True, 'import numpy as np\n'), ((1136, 1161), 'numpy.array', 'np.array', (['[1, 2, 4, 0, 1]'], {}), '([1, 2, 4, 0, 1])\n', (1144, 1161), True, 'import numpy as np\n'), ((1159, 1171), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1167, 1171), True, 'import numpy as np\n'), ((1173, 1186), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1181, 1186), True, 'import numpy as np\n'), ((1188, 1215), 'numpy.array', 'np.array', (['[2, 1, 0, -1, -2]'], {}), '([2, 1, 0, -1, -2])\n', (1196, 1215), True, 'import numpy as np\n'), ((882, 900), 'example.algs.bubblesort', 'algs.bubblesort', (['w'], {}), '(w)\n', (897, 900), False, 'from example import algs\n'), ((942, 960), 'example.algs.bubblesort', 'algs.bubblesort', (['x'], {}), '(x)\n', (957, 960), False, 'from example import algs\n'), ((1002, 1020), 'example.algs.bubblesort', 'algs.bubblesort', (['y'], {}), '(y)\n', (1017, 1020), False, 'from example import algs\n'), ((1062, 1080), 'example.algs.bubblesort', 'algs.bubblesort', (['z'], {}), '(z)\n', (1077, 1080), False, 'from example import algs\n')]
|
# -*- coding: utf-8 -*-
"""
Generating image window by weighted sampling map from input image
This can also be considered as a `weighted random cropping` layer of the
input image
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
from niftynet.engine.image_window import N_SPATIAL
from niftynet.engine.sampler_uniform import UniformSampler
class WeightedSampler(UniformSampler):
"""
This class generators samples from a user provided
frequency map for each input volume
The sampling likelihood of each voxel (and window around)
is proportional to its frequency
This is implemented in a closed form using cumulative histograms
for efficiency purposes i.e., the first three dims of image.
This layer can be considered as a `weighted random cropping` layer of the
input image.
"""
def __init__(self,
reader,
data_param,
batch_size,
windows_per_image,
queue_length=10):
UniformSampler.__init__(self,
reader=reader,
data_param=data_param,
batch_size=batch_size,
windows_per_image=windows_per_image,
queue_length=queue_length)
tf.logging.info('Initialised weighted sampler window instance')
self.spatial_coordinates_generator = weighted_spatial_coordinates
def weighted_spatial_coordinates(subject_id,
data,
img_sizes,
win_sizes,
n_samples=1):
"""
This is the function that actually does the cumulative histogram
and sampling.
also, note that win_sizes could be different,
for example in segmentation network
input image window size is 32x32x10,
training label window is 16x16x10, the network reduces x-y plane
spatial resolution.
This function handles this situation by first find the largest
window across these window definitions, and generate the coordinates.
These coordinates are then adjusted for each of the
smaller window sizes (the output windows are concentric).
"""
# requiring a data['sampler'] as the frequency map.
# the shape should be [x, y, z, 1, 1]
if data is None or data.get('sampler', None) is None:
tf.logging.fatal("input weight map not found. please check "
"the configuration file")
raise RuntimeError
n_samples = max(n_samples, 1)
uniq_spatial_size = set([img_size[:N_SPATIAL]
for img_size in list(img_sizes.values())])
if len(uniq_spatial_size) > 1:
tf.logging.fatal("Don't know how to generate sampling "
"locations: Spatial dimensions of the "
"grouped input sources are not "
"consistent. %s", uniq_spatial_size)
raise NotImplementedError
uniq_spatial_size = uniq_spatial_size.pop()
# find spatial window location based on the largest spatial window
spatial_win_sizes = [win_size[:N_SPATIAL]
for win_size in win_sizes.values()]
spatial_win_sizes = np.asarray(spatial_win_sizes, dtype=np.int32)
max_spatial_win = np.max(spatial_win_sizes, axis=0)
# testing window size
for i in range(0, N_SPATIAL):
assert uniq_spatial_size[i] >= max_spatial_win[i], \
"window size {} is larger than image size {}".format(
max_spatial_win[i], uniq_spatial_size[i])
# get cropped version of the input weight map where the centre of
# the window might be. If the centre of the window was outside of
# this crop area, the patch would be outside of the field of view
half_win = np.floor(max_spatial_win / 2).astype(int)
try:
cropped_map = data['sampler'][
half_win[0]:-half_win[0] if max_spatial_win[0] > 1 else 1,
half_win[1]:-half_win[1] if max_spatial_win[1] > 1 else 1,
half_win[2]:-half_win[2] if max_spatial_win[2] > 1 else 1,
0, 0]
assert np.all(cropped_map.shape) > 0
except (IndexError, KeyError):
tf.logging.fatal("incompatible map: %s", data['sampler'].shape)
raise
except AssertionError:
tf.logging.fatal(
"incompatible window size for weighted sampler. "
"Please use smaller (fully-specified) spatial window sizes")
raise
# Get the cumulative sum of the normalised sorted intensities
# i.e. first sort the sampling frequencies, normalise them
# to sum to one, and then accumulate them in order
flatten_map = cropped_map.flatten()
sorted_data = np.cumsum(np.divide(np.sort(flatten_map), flatten_map.sum()))
# get the sorting indexes to that we can invert the sorting later on.
sorted_indexes = np.argsort(flatten_map)
middle_coords = np.zeros((n_samples, N_SPATIAL), dtype=np.int32)
for sample in range(0, n_samples):
# get n_sample from the cumulative histogram, spaced by 1/n_samples,
# plus a random perturbation to give us a stochastic sampler
sample_ratio = 1 - (np.random.random() + sample) / (n_samples + 1)
# find the index where the cumulative it above the sample threshold
# import pdb; pdb.set_trace()
try:
sample_index = np.argmax(sorted_data >= sample_ratio)
except ValueError:
tf.logging.fatal("unable to choose sampling window based on "
"the current frequency map.")
raise
# invert the sample index to the pre-sorted index
inverted_sample_index = sorted_indexes[sample_index]
# get the x,y,z coordinates on the cropped_map
# (note: we need to re-shift it later due to the crop)
middle_coords[sample, :N_SPATIAL] = np.unravel_index(
inverted_sample_index, cropped_map.shape)[:N_SPATIAL]
# adjust max spatial coordinates based on each mod spatial window size
all_coordinates = {}
for mod in list(win_sizes):
win_size = win_sizes[mod][:N_SPATIAL]
half_win_diff = np.floor((max_spatial_win - win_size) / 2.0)
# shift starting coordinates of the window
# Note that we did not shift the centre coordinates
# above to the corner of the window
# because the shift is the same as the cropping amount
# Also, we need to add half_win_diff/2 so that smaller windows
# are centred within the large windows
spatial_coords = np.zeros((n_samples, N_SPATIAL * 2), dtype=np.int32)
spatial_coords[:, :N_SPATIAL] = \
middle_coords[:, :N_SPATIAL] + half_win_diff[:N_SPATIAL]
# the opposite corner of the window is
# just adding the mod specific window size
spatial_coords[:, N_SPATIAL:] = \
spatial_coords[:, :N_SPATIAL] + win_size[:N_SPATIAL]
# include the subject id
subject_id = np.ones((n_samples,), dtype=np.int32) * subject_id
spatial_coords = np.append(subject_id[:, None], spatial_coords, axis=1)
all_coordinates[mod] = spatial_coords
return all_coordinates
|
[
"tensorflow.logging.info",
"tensorflow.logging.fatal",
"numpy.argmax",
"numpy.asarray",
"numpy.floor",
"numpy.zeros",
"numpy.unravel_index",
"numpy.ones",
"numpy.argsort",
"niftynet.engine.sampler_uniform.UniformSampler.__init__",
"numpy.max",
"numpy.append",
"numpy.sort",
"numpy.random.random",
"numpy.all"
] |
[((3368, 3413), 'numpy.asarray', 'np.asarray', (['spatial_win_sizes'], {'dtype': 'np.int32'}), '(spatial_win_sizes, dtype=np.int32)\n', (3378, 3413), True, 'import numpy as np\n'), ((3436, 3469), 'numpy.max', 'np.max', (['spatial_win_sizes'], {'axis': '(0)'}), '(spatial_win_sizes, axis=0)\n', (3442, 3469), True, 'import numpy as np\n'), ((5030, 5053), 'numpy.argsort', 'np.argsort', (['flatten_map'], {}), '(flatten_map)\n', (5040, 5053), True, 'import numpy as np\n'), ((5075, 5123), 'numpy.zeros', 'np.zeros', (['(n_samples, N_SPATIAL)'], {'dtype': 'np.int32'}), '((n_samples, N_SPATIAL), dtype=np.int32)\n', (5083, 5123), True, 'import numpy as np\n'), ((1071, 1233), 'niftynet.engine.sampler_uniform.UniformSampler.__init__', 'UniformSampler.__init__', (['self'], {'reader': 'reader', 'data_param': 'data_param', 'batch_size': 'batch_size', 'windows_per_image': 'windows_per_image', 'queue_length': 'queue_length'}), '(self, reader=reader, data_param=data_param,\n batch_size=batch_size, windows_per_image=windows_per_image,\n queue_length=queue_length)\n', (1094, 1233), False, 'from niftynet.engine.sampler_uniform import UniformSampler\n'), ((1394, 1457), 'tensorflow.logging.info', 'tf.logging.info', (['"""Initialised weighted sampler window instance"""'], {}), "('Initialised weighted sampler window instance')\n", (1409, 1457), True, 'import tensorflow as tf\n'), ((2504, 2592), 'tensorflow.logging.fatal', 'tf.logging.fatal', (['"""input weight map not found. please check the configuration file"""'], {}), "(\n 'input weight map not found. please check the configuration file')\n", (2520, 2592), True, 'import tensorflow as tf\n'), ((2842, 3008), 'tensorflow.logging.fatal', 'tf.logging.fatal', (['"""Don\'t know how to generate sampling locations: Spatial dimensions of the grouped input sources are not consistent. %s"""', 'uniq_spatial_size'], {}), '(\n "Don\'t know how to generate sampling locations: Spatial dimensions of the grouped input sources are not consistent. %s"\n , uniq_spatial_size)\n', (2858, 3008), True, 'import tensorflow as tf\n'), ((6327, 6371), 'numpy.floor', 'np.floor', (['((max_spatial_win - win_size) / 2.0)'], {}), '((max_spatial_win - win_size) / 2.0)\n', (6335, 6371), True, 'import numpy as np\n'), ((6734, 6786), 'numpy.zeros', 'np.zeros', (['(n_samples, N_SPATIAL * 2)'], {'dtype': 'np.int32'}), '((n_samples, N_SPATIAL * 2), dtype=np.int32)\n', (6742, 6786), True, 'import numpy as np\n'), ((7234, 7288), 'numpy.append', 'np.append', (['subject_id[:, None]', 'spatial_coords'], {'axis': '(1)'}), '(subject_id[:, None], spatial_coords, axis=1)\n', (7243, 7288), True, 'import numpy as np\n'), ((3942, 3971), 'numpy.floor', 'np.floor', (['(max_spatial_win / 2)'], {}), '(max_spatial_win / 2)\n', (3950, 3971), True, 'import numpy as np\n'), ((4278, 4303), 'numpy.all', 'np.all', (['cropped_map.shape'], {}), '(cropped_map.shape)\n', (4284, 4303), True, 'import numpy as np\n'), ((4351, 4414), 'tensorflow.logging.fatal', 'tf.logging.fatal', (['"""incompatible map: %s"""', "data['sampler'].shape"], {}), "('incompatible map: %s', data['sampler'].shape)\n", (4367, 4414), True, 'import tensorflow as tf\n'), ((4464, 4598), 'tensorflow.logging.fatal', 'tf.logging.fatal', (['"""incompatible window size for weighted sampler. Please use smaller (fully-specified) spatial window sizes"""'], {}), "(\n 'incompatible window size for weighted sampler. Please use smaller (fully-specified) spatial window sizes'\n )\n", (4480, 4598), True, 'import tensorflow as tf\n'), ((4893, 4913), 'numpy.sort', 'np.sort', (['flatten_map'], {}), '(flatten_map)\n', (4900, 4913), True, 'import numpy as np\n'), ((5542, 5580), 'numpy.argmax', 'np.argmax', (['(sorted_data >= sample_ratio)'], {}), '(sorted_data >= sample_ratio)\n', (5551, 5580), True, 'import numpy as np\n'), ((6040, 6098), 'numpy.unravel_index', 'np.unravel_index', (['inverted_sample_index', 'cropped_map.shape'], {}), '(inverted_sample_index, cropped_map.shape)\n', (6056, 6098), True, 'import numpy as np\n'), ((7158, 7195), 'numpy.ones', 'np.ones', (['(n_samples,)'], {'dtype': 'np.int32'}), '((n_samples,), dtype=np.int32)\n', (7165, 7195), True, 'import numpy as np\n'), ((5620, 5713), 'tensorflow.logging.fatal', 'tf.logging.fatal', (['"""unable to choose sampling window based on the current frequency map."""'], {}), "(\n 'unable to choose sampling window based on the current frequency map.')\n", (5636, 5713), True, 'import tensorflow as tf\n'), ((5337, 5355), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5353, 5355), True, 'import numpy as np\n')]
|
import argparse
import cv2 as cv
import numpy as np
from trainer.config import img_rows, img_cols
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument("-x0")
ap.add_argument("-y0")
ap.add_argument("-x1")
ap.add_argument("-y1")
args = vars(ap.parse_args())
x0 = int(args["x0"])
x1 = int(args["x1"])
y0 = int(args["y0"])
y1 = int(args["y1"])
trimap = np.zeros((img_rows, img_cols, 1), dtype=np.uint8)
trimap[ x0:x1, y0:y1, 0] = 128
cv.imshow('trimap', trimap)
cv.imwrite('made-trimap.png', trimap)
|
[
"cv2.imwrite",
"cv2.imshow",
"numpy.zeros",
"argparse.ArgumentParser"
] |
[((136, 161), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (159, 161), False, 'import argparse\n'), ((417, 466), 'numpy.zeros', 'np.zeros', (['(img_rows, img_cols, 1)'], {'dtype': 'np.uint8'}), '((img_rows, img_cols, 1), dtype=np.uint8)\n', (425, 466), True, 'import numpy as np\n'), ((507, 534), 'cv2.imshow', 'cv.imshow', (['"""trimap"""', 'trimap'], {}), "('trimap', trimap)\n", (516, 534), True, 'import cv2 as cv\n'), ((539, 576), 'cv2.imwrite', 'cv.imwrite', (['"""made-trimap.png"""', 'trimap'], {}), "('made-trimap.png', trimap)\n", (549, 576), True, 'import cv2 as cv\n')]
|
import os
import sys
import numpy as np
from math import floor
def splitset(dataset, parts):
"""Partition data into "parts" partitions"""
n = dataset.shape[0]
local_n = floor(n/parts)
result = []
for i in range(parts):
result.append(dataset[i*local_n: (i+1)*local_n])
return np.array(result)
if __name__ == '__main__':
if len(sys.argv) < 2:
nr_of_datasets = 10
else:
nr_of_datasets = int(sys.argv[1])
package = np.load("data/mnist.npz")
data = {}
for key, val in package.items():
data[key] = splitset(val, nr_of_datasets)
print("CREATING {} PARTITIONS INSIDE {}/data/clients".format(nr_of_datasets, os.getcwd()))
if not os.path.exists('data/clients'):
os.mkdir('data/clients')
for i in range(nr_of_datasets):
if not os.path.exists('data/clients/{}'.format(str(i))):
os.mkdir('data/clients/{}'.format(str(i)))
np.savez('data/clients/{}'.format(str(i)) + '/mnist.npz',
x_train=data['x_train'][i],
y_train=data['y_train'][i],
x_test=data['x_test'][i],
y_test=data['y_test'][i])
print("DONE")
|
[
"os.mkdir",
"numpy.load",
"os.getcwd",
"math.floor",
"os.path.exists",
"numpy.array"
] |
[((182, 198), 'math.floor', 'floor', (['(n / parts)'], {}), '(n / parts)\n', (187, 198), False, 'from math import floor\n'), ((308, 324), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (316, 324), True, 'import numpy as np\n'), ((476, 501), 'numpy.load', 'np.load', (['"""data/mnist.npz"""'], {}), "('data/mnist.npz')\n", (483, 501), True, 'import numpy as np\n'), ((710, 740), 'os.path.exists', 'os.path.exists', (['"""data/clients"""'], {}), "('data/clients')\n", (724, 740), False, 'import os\n'), ((750, 774), 'os.mkdir', 'os.mkdir', (['"""data/clients"""'], {}), "('data/clients')\n", (758, 774), False, 'import os\n'), ((685, 696), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (694, 696), False, 'import os\n')]
|
import numpy as np
def approximate_error(motif):
"""Calculate approximate error"""
pwm = motif.pwm
bases = list(pwm.keys())
n = sum(motif.counts[bases[0]])
approx_error = (len(bases)-1)/(2 * np.log(2) * n)
return approx_error
def exact_error(motif):
"""Calculate exact error, using multinomial(na,nc,ng,nt)"""
## Super Slow. O(n^3)
pwm = motif.pwm
bases = pwm.keys()
na = sum(motif.counts['A'])
n = na
nc = 0
ng = 0
nt = 0
done = False
exact_error = 0
while not done:
print (na,nc,ng,nt)
exact_error += sum([-p*np.log2(p) for p in [na/n, nc/n, ng/n, nt/n]])
if nt<=0:
## iterate inner loop
if ng > 0:
## g => t
ng = ng - 1
nt = nt + 1
elif nc > 0:
## c -> g
nc = nc - 1;
ng = ng + 1;
else:
## a->c
na = na - 1
nc = nc + 1
else:
if ng > 0:
## g => t
ng = ng - 1
nt = nt + 1
elif nc>0:
## c => g; all t -> g
nc = nc - 1
ng = nt + 1
nt = 0
elif na>0:
## a => c; all g,t -> c
nc = nt + 1
na = na - 1
nt = 0
else:
done = True
return exact_error
def calc_info_matrix(motif, correction_type='approx'):
"""Calculate information matrix with small sample correction"""
pwm = motif.pwm
bases = pwm.keys()
if correction_type=='approx':
error = approximate_error(motif)
else:
error = exact_error(motif)
info_matrix = [2-error+sum([pwm[b][l]*np.nan_to_num(np.log2(pwm[b][l])) for b in bases]) for l in range(0, len(motif))]
return info_matrix
def calc_relative_information(motif, correction_type='approx'):
"""Calculate relative information matrix"""
pwm = motif.pwm
bases = pwm.keys()
if correction_type=='approx':
info_matrix = calc_info_matrix(motif)
else:
info_matrix = calc_info_matrix(motif, 'exact')
relative_info = {base: [prob*info for prob,info in zip(pwm[base], info_matrix)] for base in bases}
return relative_info
|
[
"numpy.log2",
"numpy.log"
] |
[((213, 222), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (219, 222), True, 'import numpy as np\n'), ((603, 613), 'numpy.log2', 'np.log2', (['p'], {}), '(p)\n', (610, 613), True, 'import numpy as np\n'), ((1828, 1846), 'numpy.log2', 'np.log2', (['pwm[b][l]'], {}), '(pwm[b][l])\n', (1835, 1846), True, 'import numpy as np\n')]
|
"""
:py:class:`Utils` - a set of generic utilities
==============================================
Usage::
# assuming that $PYTHONPATH=.../lcls2/psana
# Run test: python lcls2/psana/psana/pyalgos/generic/Utils.py 1
# Import
from psana.pyalgos.generic.Utils import input_single_char
import psana.pyalgos.generic.Utils as gu
# Methods
#resp = gu.<method(pars)>
ts = gu.str_tstamp(fmt='%Y-%m-%dT%H:%M:%S%z', time_sec=None)
tsec, ts = gu.time_and_stamp(fmt='%Y-%m-%dT%H:%M:%S%z', time_sec=None)
tsec = gu.time_sec_from_stamp(fmt='%Y-%m-%dT%H:%M:%S%z', time_stamp='1970-01-01T00:00:00-0800')
usr = gu.get_enviroment(env='USER')
usr = gu.get_login()
host = gu.get_hostname()
cwd = gu.get_cwd()
pid = gu.get_pid()
stat = gu.shell_command_is_available(cmd='mongorestore', verb=True)
rec = gu.log_rec_on_start()
fmode = gu.file_mode(fname)
gu.create_directory(dir, mode=0o777)
exists = gu.create_path(path, depth=6, mode=0o777)
flist = gu.get_list_of_files_in_dir(dirname)
flist = gu.get_list_of_files_in_dir_for_ext(dir, ext='.xtc')
flist = gu.get_list_of_files_in_dir_for_pattern(dir, pattern='-r0022')
owner = gu.get_path_owner(path)
mode = gu.get_path_mode(path)
tmpf = gu.get_tempfile(mode='r+b',suffix='.txt')
gu.print_parsed_path(path)
arr = gu.load_textfile(path)
gu.save_textfile(text, path, mode='w') # mode: 'w'-write, 'a'-append
gu.set_file_access_mode(fname, mode=0o777)
jo = gu.load_json(fname)
gu.save_json(jo, fname)
o = gu.load_pickle(fname)
gu.save_pickle(o, fname)
# Save image in file
# ==================
gu.save_image_tiff(image, fname='image.tiff', verb=True) # 16-bit tiff
gu.save_image_file(image, fname='image.png', verb=True) # gif, pdf, eps, png, jpg, jpeg, tiff (8-bit only)
list_int = gu.list_of_int_from_list_of_str(list_str)
list_str = gu.list_of_str_from_list_of_int(list_int, fmt='%04d')
resp = gu.has_kerberos_ticket()
resp = gu.check_token(do_print=False)
resp = gu.get_afs_token(do_print=False)
hlst = gu.list_of_hosts_from_lshosts(filter='ps')
resp = gu.text_sataus_of_lsf_hosts(farm='psnehfarm')
resp = gu.ext_status_of_queues(lst_of_queues=['psanaq', 'psnehq', 'psfehq', 'psnehprioq', 'psfehprioq'])
gu.str_kwargs(kwargs, title='Input parameters:', fmt='\n%20s : %s'):
gu.print_kwargs(kwargs)
gu.print_parser(parser) # from optparse import OptionParser
s = gu.do_print(nev) # returns true for sparcified event numbers.
ch = gu.input_single_char('Next event? [y/n]')
os_system(cmd)
os_command(cmd)
See:
- :py:class:`Utils`
- :py:class:`PSUtils`
- :py:class:`NDArrUtils`
- :py:class:`Graphics`
This software was developed for the LCLS2 project.
If you use all or part of it, please give an appropriate acknowledgment.
Created: 2018-01-25 by <NAME>
Adopted for LCLS2 on 2018-02-02
"""
import os
import sys
import getpass
import socket
from time import localtime, strftime, time, strptime, mktime
import numpy as np
import tty, termios
#import subprocessif
from subprocess import call
if sys.version_info.major == 2:
from commands import getoutput
else:
from subprocess import getoutput
# init_logger etc is moved to logger.py
# from psana.pyalgos.generic.logger import init_logger, STR_LEVEL_NAMES, DICT_NAME_TO_LEVEL, TSFORMAT
import logging
logger = logging.getLogger('__name__')
def str_tstamp(fmt='%Y-%m-%dT%H:%M:%S%z', time_sec=None):
"""Returns string timestamp for specified format and time in sec or current time by default
"""
ts = strftime(fmt, localtime(time_sec))
#logger.debug('str_tstamp: %s' % ts)
return ts
def str_tstamp_v1(fmt='%Y-%m-%dT%H:%M:%S.%f%z', time_sec=None):
"""Returns string timestamp for specified format and time in sec or current time by default
"""
from datetime import datetime
dt = datetime.fromtimestamp(time() if time_sec is None else time_sec)
return dt.strftime(fmt)
def time_and_stamp(fmt='%Y-%m-%dT%H:%M:%S%z', time_sec=None):
tsec = time() if time_sec is None else time_sec
return tsec, str_tstamp(fmt, tsec)
def time_sec_from_stamp(fmt='%Y-%m-%dT%H:%M:%S%z', time_stamp='1970-01-01T00:00:00-0800'):
try: struc = strptime(time_stamp, fmt)
except ValueError as err:
logger.exception(err)
sys.exit()
return int(mktime(struc))
def get_enviroment(env='USER'):
"""Returns the value of specified by string name environment variable
"""
return os.environ.get(env, None)
def get_hostname():
"""Returns login name
"""
#return os.uname()[1]
return socket.gethostname()
def get_cwd():
"""Returns current working directory
"""
return os.getcwd()
def get_pid():
"""Returns pid - process id
"""
return os.getpid()
def get_login():
"""Returns login name
"""
#return os.getlogin()
return getpass.getuser()
def shell_command_is_available(cmd='mongorestore', verb=True):
import shutil
if shutil.which(cmd) is None:
if verb: logger.warning('shell command "%s" is unavailable.' % cmd)
return
def file_mode(fname):
"""Returns file mode, e.g. 0o40377
"""
from stat import ST_MODE
return os.stat(fname)[ST_MODE]
def log_rec_on_start(tsfmt='%Y-%m-%dT%H:%M:%S%z'):
"""Returns (str) record containing timestamp, login, host, cwd, and command line
"""
return '\n%s user:%s@%s cwd:%s command:%s'%\
(str_tstamp(fmt=tsfmt), get_login(), get_hostname(), get_cwd(), ' '.join(sys.argv))
def create_directory(dir, mode=0o777):
"""Creates directory and sets its mode
"""
if os.path.exists(dir):
logger.debug('Directory exists: %s' % dir)
else:
os.makedirs(dir)
os.chmod(dir, mode)
logger.debug('Directory created: %s, mode(oct)=%s' % (dir, oct(mode)))
def create_path(path, depth=6, mode=0o777):
"""Creates missing path of specified depth from the beginning
e.g. for '/reg/g/psdm/logs/calibman/2016/07/log-file-name.txt'
or '/reg/d/psdm/cxi/cxi11216/calib/Jungfrau::CalibV1/CxiEndstation.0:Jungfrau.0/pedestals/9-end.data'
Returns True if path to file exists, False othervise
"""
logger.debug('create_path: %s' % path)
#subdirs = path.strip('/').split('/')
subdirs = path.split('/')
cpath = subdirs[0]
for i,sd in enumerate(subdirs[:-1]):
if i>0: cpath += '/%s'% sd
if i<depth: continue
if cpath=='': continue
create_directory(cpath, mode)
return os.path.exists(cpath)
def get_list_of_files_in_dir(dirname):
return os.listdir(dirname)
def get_list_of_files_in_dir_for_ext(dir, ext='.xtc'):
"""Returns the list of files in the directory for specified extension or None if directory is None."""
if dir is None: return []
if not os.path.exists(dir): return []
list_of_files_in_dir = os.listdir(dir)
list_of_files = []
for fname in list_of_files_in_dir:
if os.path.splitext(fname)[1] == ext:
list_of_files.append(fname)
return sorted(list_of_files)
def get_list_of_files_in_dir_for_part_fname(dir, pattern='-r0022'):
"""Returns the list of files in the directory for specified file name pattern or [] - empty list."""
if dir is None: return []
if not os.path.exists(dir): return []
list_of_files_in_dir = os.listdir(dir)
list_of_files = []
for fname in list_of_files_in_dir:
if pattern in fname:
fpath = os.path.join(dir,fname)
list_of_files.append(fpath)
return sorted(list_of_files)
def get_path_owner(path):
import pwd
stat = os.stat(path)
#print(' stat =', stat)
pwuid = pwd.getpwuid(stat.st_uid)
#print(' pwuid =', pwuid)
user_name = pwuid.pw_name
#print(' uid = %s user_name = %s' % (uid, user_name))
return user_name
def get_path_mode(path):
return os.stat(path).st_mode
def get_tempfile(mode='r+b',suffix='.txt'):
import tempfile
tf = tempfile.NamedTemporaryFile(mode=mode,suffix=suffix)
return tf # .name
def print_parsed_path(path): # Output for path:
print('print_parsed_path(path): path:',) # path/reg/d/psdm/XCS/xcsi0112/xtc/e167-r0015-s00-c00.xtc
print('exists(path) =', os.path.exists(path)) # True
print('splitext(path)=', os.path.splitext(path))# ('/reg/d/psdm/XCS/xcsi0112/xtc/e167-r0015-s00-c00', '.xtc')
print('basename(path)=', os.path.basename(path))# e167-r0015-s00-c00.xtc
print('dirname(path) =', os.path.dirname(path)) # /reg/d/psdm/XCS/xcsi0112/xtc
print('lexists(path) =', os.path.lexists(path)) # True
print('isfile(path) =', os.path.isfile(path)) # True
print('isdir(path) =', os.path.isdir(path)) # False
print('split(path) =', os.path.split(path)) # ('/reg/d/psdm/XCS/xcsi0112/xtc', 'e167-r0015-s00-c00.xtc')
def set_file_access_mode(fname, mode=0o777):
os.chmod(fname, mode)
def save_textfile(text, path, mode='w', verb=False):
"""Saves text in file specified by path. mode: 'w'-write, 'a'-append
"""
msg = 'save_textfile %s' % path
if verb: print(msg)
logger.debug(msg)
f=open(path, mode)
f.write(text)
f.close()
def load_textfile(path, verb=False):
"""Returns text file as a str object
"""
msg = 'load_textfile %s' % path
if verb: print(msg)
logger.debug(msg)
f=open(path, 'r')
recs = f.read() # f.readlines()
f.close()
return recs
def load_json(fname):
"""Load json object from file.
"""
logger.debug('load_json %s' % fname)
import json
return json.load(open(fname,'rb'))
# or
#with open(fname) as f: jo = json.load(f)
#return jo
def save_json(jo, fname, mode='w'):
"""Saves json object in file.
"""
logger.debug('save_json %s' % fname)
import json
with open(fname, mode) as f: json.dump(jo, f)
def load_pickle(fname, mode='rb'):
"""Returns object from packed in file.
"""
logger.debug('load_pickle %s' % fname)
import pickle
return pickle.load(open(fname, mode))
def save_pickle(o, fname, mode='wb'):
"""Saves object in the pickle file.
"""
logger.debug('save_pickle %s' % fname)
import pickle
with open(fname, mode) as f: pickle.dump(o, f)
def save_image_tiff(image, fname='image.tiff', verb=False):
"""Saves image in 16-bit tiff file
"""
import Image
msg = 'save_image_tiff %s' % fname
if verb: print(msg)
logger.debug(msg)
img = Image.fromarray(image.astype(np.int16))
img.save(fname)
def save_image_file(image, fname='image.png', verb=False):
"""Saves files with type by extension gif, pdf, eps, png, jpg, jpeg, tiff (8-bit only),
or txt for any other type
"""
import scipy.misc as scim
msg = 'save_image_file %s' % fname
fields = os.path.splitext(fname)
if len(fields)>1 and fields[1] in ['.gif', '.pdf', '.eps', '.png', '.jpg', '.jpeg', '.tiff']:
scim.imsave(fname, image)
else:
fnametxt = '%s.txt' % fname
msg = 'save_image_file: non-supported file extension. Save image in text file %s' % fnametxt
np.savetxt(fnametxt, image, fmt='%8.1f', delimiter=' ', newline='\n')
#raise IOError('Unknown file type in extension %s' % fname)
if verb: print(msg)
logger.debug(msg)
def replace(template, pattern, subst):
"""If pattern in the template replaces it with subst.
Returns str object template with replaced patterns.
"""
fields = template.split(pattern, 1)
if len(fields) > 1:
return '%s%s%s' % (fields[0], subst, fields[1])
else:
return template
def print_command_line_parameters(parser):
"""Prints input arguments and optional parameters"""
(popts, pargs) = parser.parse_args()
args = pargs # list of positional arguments
opts = vars(popts) # dict of options
defs = vars(parser.get_default_values()) # dict of default options
print('Command:\n ', ' '.join(sys.argv)+\
'\nArgument list: %s\nOptional parameters:\n' % str(args)+\
' <key> <value> <default>')
for k,v in opts.items():
print(' %s %s %s' % (k.ljust(10), str(v).ljust(20), str(defs[k]).ljust(20)))
def list_of_int_from_list_of_str(list_str):
"""Converts ['0001', '0202', '0203', '0204',...] to [1, 202, 203, 204,...]
"""
return [int(s) for s in list_str]
def list_of_str_from_list_of_int(list_int, fmt='%04d'):
"""Converts [1, 202, 203, 204,...] to ['0001', '0202', '0203', '0204',...]
"""
return [fmt % i for i in list_int]
def has_kerberos_ticket():
"""Checks to see if the user has a valid Kerberos ticket"""
#stream = os.popen('klist -s')
#output = getoutput('klist -4')
#resp = call(["klist", "-s"])
return True if call(["klist", "-s"]) == 0 else False
def _parse_token(token):
""" from string like: User's (AFS ID 5269) tokens for <EMAIL> [Expires Feb 28 19:16] 54 75 Expires Feb 28 19:16
returns date/time: Feb 28 19:16
"""
timestamp = ''
for line in token.split('\n'):
pos_beg = line.find('[Expire')
if pos_beg == -1: continue
pos_end = line.find(']', pos_beg)
#print(line)
timestamp = line[pos_beg+9:pos_end]
#date_object = datetime.strptime('Jun 1 2005 1:33PM', '%b %d %Y %I:%M%p')
#date_object = datetime.strptime(timestamp, '%b %d %H:%M')
#print('date_object', str(date_object))
return timestamp
def check_token(do_print=False):
token = getoutput('tokens')
#if do_print(: print(token)
status = True if 'Expire' in token else False
timestamp = _parse_token(token) if status else ''
msg = 'Your AFS token %s %s' % ({True:'IS valid until', False:'IS NOT valid'}[status], timestamp)
if do_print: print(msg)
return status, msg
def get_afs_token(do_print=False):
output = getoutput('aklog')
if do_print: print(str(output))
return output
def list_of_hosts(filter='psana'):
"""Returns list of hosts for lshosts"""
cmd = 'lshosts | grep %s' % filter
lines = getoutput(cmd).split('\n')
hosts = [line.split()[0] for line in lines]
return hosts
def text_sataus_of_lsf_hosts(farm='psnehfarm'):
"""Returns text output of the command: bhosts farm"""
cmd = 'bhosts %s' % farm
return cmd, getoutput(cmd)
def text_status_of_queues(lst_of_queues=['psanaq', 'psnehq', 'psfehq', 'psnehprioq', 'psfehprioq']):
"""Checks status of queues"""
cmd = 'bqueues %s' % (' '.join(lst_of_queues))
return cmd, getoutput(cmd)
def str_kwargs(kwargs, title='Input parameters:', fmt='\n%20s: %s'):
return title + ''.join([fmt % (k,str(v)) for k,v in kwargs.items()])
def print_kwargs(kwargs, cmt='%s\n kwargs:' % (40*'_')):
print(cmt)
for k,v in kwargs.items(): print(' %10s: %10s' % (k,v))
print(40*'_')
def str_attributes(o, cmt='\nattributes:', fmt='\n %s'):
return cmt + ''.join([fmt % str(v) for v in dir(o)])
#def str_attributes(o, cmt='\nattributes:', fmt='\n%20s: %s'):
# return str(dir(o))
#return cmt + ''.join([fmt % (k,str(v)) for k,v in dir(o) if len(k)>2 and k[:2] != '__'])
def print_parser(parser):
"""Prints input parameters"""
popts, pargs = parser.parse_args()
args = pargs
opts = vars(popts)
defs = vars(parser.get_default_values())
print('Arguments: %s\nOptional parameters:\n' % str(args)+\
'<key> <value> <default>')
for k,v in opts.items():
print('%s %s %s' % (k.ljust(10), str(v).ljust(16), str(defs[k]).ljust(16)))
def is_in_command_line(ptrn1=None, ptrn2=None):
"""Returns True (False) if parameter is (not) specified in the command line"""
if len(sys.argv) < 2: return False
for p in sys.argv[1:]:
if ptrn1 is not None and (ptrn1 in p[:2]):
#logger.debug('option "%s" is found in CL' % ptrn1)
return True
if ptrn2 is not None and (ptrn2 in p):
#logger.debug('option "%s" is found in CL' % ptrn2)
return True
return False
def do_print(nev):
"""Returns true for sparcified event numbers.
"""
return nev<10\
or (nev<50 and (not nev%10))\
or (nev<500 and (not nev%100))\
or not nev%1000
def input_single_char(prompt='input? >'):
""" input of single character from keybord without <CR>
import sys, tty, termios
"""
sys.stdout.write('\r'+prompt)
sys.stdout.flush()
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
tty.setraw(fd)
ch = sys.stdin.read(1)
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
#def get_grpnames(user='root'):
# """Returns tuple of group names"""
# from grp import getgrnam
# return getgrnam(user)
def os_system(cmd):
assert isinstance(cmd,str), 'command should be str'
os.system(cmd)
logger.debug('os_system command: %s' % cmd)
def os_command(cmd):
assert isinstance(cmd,str), 'command should be str'
#_cmd = cmd.split() if isinstance(cmd,str) else cmd
_cmd = cmd
stream = os.popen(_cmd)
resp = stream.read()
msg = '%s\n%s' % (_cmd, resp) if resp else _cmd
logger.debug('os_command resp: %s' % msg)
#----------- TEST -------------
if __name__ == "__main__":
def test_10():
from psana.pyalgos.generic.NDArrGenerators import random_standard
image = random_standard()
verbosity=True
save_image_tiff(image, fname='image.tiff', verb=verbosity)
save_image_file(image, fname='image.png', verb=verbosity)
save_image_file(image, fname='image.xyz', verb=verbosity)
def test_datetime():
from datetime import datetime
t_sec = time()
print('t_sec:', t_sec)
t = datetime.fromtimestamp(t_sec)
print('t:', t)
tnow = datetime.now()
print('datetime.now:', tnow)
tstamp = t.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3]
zone = strftime('%z', localtime(t_sec))
print(tstamp)
print('zone', zone)
tsz = '%s%s' % (tstamp,zone)
print('tsz', tsz)
def test_input_single_char():
for n in range(20):
ch = input_single_char('Event:%03d Next event? [y/n]' %n)
if ch != 'y': sys.exit('\nExit by key %s' % ch)
def test_01():
#logger.debug('debug msg') # will print a message to the console
#logger.warning('Watch out!') # will print a message to the console
#logger.info('I told you so') # will not print anything
print('get_enviroment("PWD"): %s' % get_enviroment(env='PWD'))
print('get_hostname() : %s' % get_hostname())
print('get_cwd() : %s' % get_cwd())
print('get_login() : %s' % get_login())
print('str_tstamp() : %s' % str_tstamp(fmt='%Y-%m-%dT%H:%M'))
print('str_tstamp() : %s' % str_tstamp(fmt='%Y-%m-%dT%H:%M:%S%z'))
create_directory('./work', mode=0o377)
print('file_mode("work") : %s' % oct(file_mode('work')))
print('log_rec_on_start() :%s' % log_rec_on_start())
#print('get_grpnames() :%s' % str(get_grpnames('root')))
print('list_of_hosts :%s' % list_of_hosts())
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s: %(message)s',\
datefmt='%Y-%m-%dT%H:%M:%S',\
level=logging.DEBUG)
#filename='example.log', filemode='w'
test_01()
test_datetime()
test_input_single_char()
sys.exit('\nEnd of test')
# EOF
|
[
"sys.stdout.write",
"time.strptime",
"pickle.dump",
"getpass.getuser",
"os.popen",
"termios.tcsetattr",
"time.mktime",
"os.path.isfile",
"sys.stdout.flush",
"scipy.misc.imsave",
"subprocess.getoutput",
"os.path.join",
"os.path.lexists",
"os.path.dirname",
"numpy.savetxt",
"os.path.exists",
"socket.gethostname",
"datetime.datetime.now",
"time.localtime",
"json.dump",
"os.chmod",
"sys.stdin.read",
"os.stat",
"os.path.basename",
"os.system",
"shutil.which",
"subprocess.call",
"sys.stdin.fileno",
"datetime.datetime.fromtimestamp",
"os.listdir",
"sys.exit",
"tempfile.NamedTemporaryFile",
"os.getpid",
"os.makedirs",
"termios.tcgetattr",
"logging.basicConfig",
"os.getcwd",
"os.path.isdir",
"time.time",
"os.environ.get",
"psana.pyalgos.generic.NDArrGenerators.random_standard",
"os.path.splitext",
"tty.setraw",
"os.path.split",
"pwd.getpwuid",
"logging.getLogger"
] |
[((3460, 3489), 'logging.getLogger', 'logging.getLogger', (['"""__name__"""'], {}), "('__name__')\n", (3477, 3489), False, 'import logging\n'), ((4586, 4611), 'os.environ.get', 'os.environ.get', (['env', 'None'], {}), '(env, None)\n', (4600, 4611), False, 'import os\n'), ((4705, 4725), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (4723, 4725), False, 'import socket\n'), ((4803, 4814), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4812, 4814), False, 'import os\n'), ((4883, 4894), 'os.getpid', 'os.getpid', ([], {}), '()\n', (4892, 4894), False, 'import os\n'), ((4985, 5002), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (5000, 5002), False, 'import getpass\n'), ((5735, 5754), 'os.path.exists', 'os.path.exists', (['dir'], {}), '(dir)\n', (5749, 5754), False, 'import os\n'), ((6634, 6655), 'os.path.exists', 'os.path.exists', (['cpath'], {}), '(cpath)\n', (6648, 6655), False, 'import os\n'), ((6708, 6727), 'os.listdir', 'os.listdir', (['dirname'], {}), '(dirname)\n', (6718, 6727), False, 'import os\n'), ((6992, 7007), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (7002, 7007), False, 'import os\n'), ((7464, 7479), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (7474, 7479), False, 'import os\n'), ((7742, 7755), 'os.stat', 'os.stat', (['path'], {}), '(path)\n', (7749, 7755), False, 'import os\n'), ((7796, 7821), 'pwd.getpwuid', 'pwd.getpwuid', (['stat.st_uid'], {}), '(stat.st_uid)\n', (7808, 7821), False, 'import pwd\n'), ((8100, 8153), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': 'mode', 'suffix': 'suffix'}), '(mode=mode, suffix=suffix)\n', (8127, 8153), False, 'import tempfile\n'), ((9032, 9053), 'os.chmod', 'os.chmod', (['fname', 'mode'], {}), '(fname, mode)\n', (9040, 9053), False, 'import os\n'), ((10957, 10980), 'os.path.splitext', 'os.path.splitext', (['fname'], {}), '(fname)\n', (10973, 10980), False, 'import os\n'), ((13720, 13739), 'subprocess.getoutput', 'getoutput', (['"""tokens"""'], {}), "('tokens')\n", (13729, 13739), False, 'from subprocess import getoutput\n'), ((14079, 14097), 'subprocess.getoutput', 'getoutput', (['"""aklog"""'], {}), "('aklog')\n", (14088, 14097), False, 'from subprocess import getoutput\n'), ((16613, 16644), 'sys.stdout.write', 'sys.stdout.write', (["('\\r' + prompt)"], {}), "('\\r' + prompt)\n", (16629, 16644), False, 'import sys\n'), ((16647, 16665), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (16663, 16665), False, 'import sys\n'), ((16675, 16693), 'sys.stdin.fileno', 'sys.stdin.fileno', ([], {}), '()\n', (16691, 16693), False, 'import sys\n'), ((16713, 16734), 'termios.tcgetattr', 'termios.tcgetattr', (['fd'], {}), '(fd)\n', (16730, 16734), False, 'import tty, termios\n'), ((16739, 16753), 'tty.setraw', 'tty.setraw', (['fd'], {}), '(fd)\n', (16749, 16753), False, 'import tty, termios\n'), ((16763, 16780), 'sys.stdin.read', 'sys.stdin.read', (['(1)'], {}), '(1)\n', (16777, 16780), False, 'import sys\n'), ((16785, 16839), 'termios.tcsetattr', 'termios.tcsetattr', (['fd', 'termios.TCSADRAIN', 'old_settings'], {}), '(fd, termios.TCSADRAIN, old_settings)\n', (16802, 16839), False, 'import tty, termios\n'), ((17067, 17081), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (17076, 17081), False, 'import os\n'), ((17293, 17307), 'os.popen', 'os.popen', (['_cmd'], {}), '(_cmd)\n', (17301, 17307), False, 'import os\n'), ((19337, 19474), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(name)s %(levelname)s: %(message)s"""', 'datefmt': '"""%Y-%m-%dT%H:%M:%S"""', 'level': 'logging.DEBUG'}), "(format=\n '%(asctime)s %(name)s %(levelname)s: %(message)s', datefmt=\n '%Y-%m-%dT%H:%M:%S', level=logging.DEBUG)\n", (19356, 19474), False, 'import logging\n'), ((19644, 19669), 'sys.exit', 'sys.exit', (['"""\nEnd of test"""'], {}), "('\\nEnd of test')\n", (19652, 19669), False, 'import sys\n'), ((3677, 3696), 'time.localtime', 'localtime', (['time_sec'], {}), '(time_sec)\n', (3686, 3696), False, 'from time import localtime, strftime, time, strptime, mktime\n'), ((4134, 4140), 'time.time', 'time', ([], {}), '()\n', (4138, 4140), False, 'from time import localtime, strftime, time, strptime, mktime\n'), ((4324, 4349), 'time.strptime', 'strptime', (['time_stamp', 'fmt'], {}), '(time_stamp, fmt)\n', (4332, 4349), False, 'from time import localtime, strftime, time, strptime, mktime\n'), ((4444, 4457), 'time.mktime', 'mktime', (['struc'], {}), '(struc)\n', (4450, 4457), False, 'from time import localtime, strftime, time, strptime, mktime\n'), ((5093, 5110), 'shutil.which', 'shutil.which', (['cmd'], {}), '(cmd)\n', (5105, 5110), False, 'import shutil\n'), ((5322, 5336), 'os.stat', 'os.stat', (['fname'], {}), '(fname)\n', (5329, 5336), False, 'import os\n'), ((5825, 5841), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (5836, 5841), False, 'import os\n'), ((5850, 5869), 'os.chmod', 'os.chmod', (['dir', 'mode'], {}), '(dir, mode)\n', (5858, 5869), False, 'import os\n'), ((6933, 6952), 'os.path.exists', 'os.path.exists', (['dir'], {}), '(dir)\n', (6947, 6952), False, 'import os\n'), ((7405, 7424), 'os.path.exists', 'os.path.exists', (['dir'], {}), '(dir)\n', (7419, 7424), False, 'import os\n'), ((8003, 8016), 'os.stat', 'os.stat', (['path'], {}), '(path)\n', (8010, 8016), False, 'import os\n'), ((8386, 8406), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (8400, 8406), False, 'import os\n'), ((8445, 8467), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (8461, 8467), False, 'import os\n'), ((8559, 8581), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (8575, 8581), False, 'import os\n'), ((8636, 8657), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (8651, 8657), False, 'import os\n'), ((8719, 8740), 'os.path.lexists', 'os.path.lexists', (['path'], {}), '(path)\n', (8734, 8740), False, 'import os\n'), ((8778, 8798), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (8792, 8798), False, 'import os\n'), ((8837, 8856), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (8850, 8856), False, 'import os\n'), ((8897, 8916), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (8910, 8916), False, 'import os\n'), ((9990, 10006), 'json.dump', 'json.dump', (['jo', 'f'], {}), '(jo, f)\n', (9999, 10006), False, 'import json\n'), ((10380, 10397), 'pickle.dump', 'pickle.dump', (['o', 'f'], {}), '(o, f)\n', (10391, 10397), False, 'import pickle\n'), ((11087, 11112), 'scipy.misc.imsave', 'scim.imsave', (['fname', 'image'], {}), '(fname, image)\n', (11098, 11112), True, 'import scipy.misc as scim\n'), ((11268, 11337), 'numpy.savetxt', 'np.savetxt', (['fnametxt', 'image'], {'fmt': '"""%8.1f"""', 'delimiter': '""" """', 'newline': '"""\n"""'}), "(fnametxt, image, fmt='%8.1f', delimiter=' ', newline='\\n')\n", (11278, 11337), True, 'import numpy as np\n'), ((14529, 14543), 'subprocess.getoutput', 'getoutput', (['cmd'], {}), '(cmd)\n', (14538, 14543), False, 'from subprocess import getoutput\n'), ((14748, 14762), 'subprocess.getoutput', 'getoutput', (['cmd'], {}), '(cmd)\n', (14757, 14762), False, 'from subprocess import getoutput\n'), ((17594, 17611), 'psana.pyalgos.generic.NDArrGenerators.random_standard', 'random_standard', ([], {}), '()\n', (17609, 17611), False, 'from psana.pyalgos.generic.NDArrGenerators import random_standard\n'), ((17891, 17897), 'time.time', 'time', ([], {}), '()\n', (17895, 17897), False, 'from time import localtime, strftime, time, strptime, mktime\n'), ((17933, 17962), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['t_sec'], {}), '(t_sec)\n', (17955, 17962), False, 'from datetime import datetime\n'), ((17993, 18007), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (18005, 18007), False, 'from datetime import datetime\n'), ((3989, 3995), 'time.time', 'time', ([], {}), '()\n', (3993, 3995), False, 'from time import localtime, strftime, time, strptime, mktime\n'), ((4418, 4428), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4426, 4428), False, 'import sys\n'), ((7591, 7615), 'os.path.join', 'os.path.join', (['dir', 'fname'], {}), '(dir, fname)\n', (7603, 7615), False, 'import os\n'), ((12987, 13008), 'subprocess.call', 'call', (["['klist', '-s']"], {}), "(['klist', '-s'])\n", (12991, 13008), False, 'from subprocess import call\n'), ((14284, 14298), 'subprocess.getoutput', 'getoutput', (['cmd'], {}), '(cmd)\n', (14293, 14298), False, 'from subprocess import getoutput\n'), ((18120, 18136), 'time.localtime', 'localtime', (['t_sec'], {}), '(t_sec)\n', (18129, 18136), False, 'from time import localtime, strftime, time, strptime, mktime\n'), ((7081, 7104), 'os.path.splitext', 'os.path.splitext', (['fname'], {}), '(fname)\n', (7097, 7104), False, 'import os\n'), ((18377, 18410), 'sys.exit', 'sys.exit', (["('\\nExit by key %s' % ch)"], {}), "('\\nExit by key %s' % ch)\n", (18385, 18410), False, 'import sys\n')]
|
# -*- coding: utf-8 -*-
"""
TODO:
Fix slowness
Fix sorting so columns are initially sorted in ascending order
"""
import logging
from wbia.guitool.__PYQT__ import QtCore, QtGui, QVariantHack
from wbia.guitool.__PYQT__.QtCore import Qt
from wbia.guitool import qtype
from wbia.guitool.guitool_decorators import checks_qt_error, signal_ # NOQA
from six.moves import zip # builtins # NOQA
# from utool._internal.meta_util_six import get_funcname
import functools
import utool as ut
# from .api_thumb_delegate import APIThumbDelegate
import numpy as np
from wbia.guitool import api_tree_node as _atn
import cachetools
# UTOOL PRINT STATEMENTS CAUSE RACE CONDITIONS IN QT THAT CAN LEAD TO SEGFAULTS
# DO NOT INJECT THEM IN GUITOOL
# print, rrr, profile = ut.inject2(__name__)
logger = logging.getLogger('wbia')
ut.noinject(__name__, '[APIItemModel]')
# raise ImportError('refused to import wbia.guitool')
profile = ut.profile
API_MODEL_BASE = QtCore.QAbstractItemModel
VERBOSE_MODEL = ut.VERBOSE or ut.get_argflag(('--verbose-qt', '--verbqt'))
VERBOSE_MODEL = VERBOSE_MODEL or ut.get_argflag(('--verbose-qt-api', '--verbqt-api'))
class ChangeLayoutContext(object):
"""
Context manager emitting layoutChanged before body,
not updating durring body, and then updating after body.
"""
@ut.accepts_scalar_input
def __init__(self, model_list, *args):
# logger.info('Changing: %r' % (model_list,))
self.model_list = list(model_list) + list(args)
def __enter__(self):
for model in self.model_list:
if model._get_context_id() is not None:
# logger.info("[ChangeLayoutContext] WARNING: ENTERING CONTEXT TWICE")
continue
model._set_context_id(id(self))
# logger.info("[ChangeLayoutContext] ENTERING CONTEXT, context_id: %r" % (model._get_context_id(), ))
model._about_to_change()
# isabouttochange = model._about_to_change()
# logger.info("... isabouttochange = %r" % (isabouttochange,))
model._set_changeblocked(True)
return self
def __exit__(self, type_, value, trace):
if trace is not None:
logger.info('[api_model] Error in context manager!: ' + str(value))
return False # return a falsey value on error
for model in self.model_list:
if model._get_context_id() == id(self):
# logger.info("[ChangeLayoutContext] EXITING CONTEXT, context_id: %r" % (id(self), ))
model._set_context_id(None)
model._set_changeblocked(False)
model._change()
# didchange = model._change()
# logger.info("... didchange = %r" % (didchange,))
def default_method_decorator(func):
""" Dummy decorator """
# return profile(func)
# return checks_qt_error(profile(func))
return func
def updater(func):
"""
Decorates a function by executing layoutChanged signals if not already in
the middle of a layout changed
"""
func_ = default_method_decorator(func)
# @checks_qt_error
@functools.wraps(func)
def upd_wrapper(model, *args, **kwargs):
with ChangeLayoutContext([model]):
return func_(model, *args, **kwargs)
return upd_wrapper
class APIItemModel(API_MODEL_BASE):
"""
Item model for displaying a list of columns
Attributes:
iders (list) : functions that return ids for setters and getters
col_name_list (list) : keys or SQL-like name for column to reference
abstracted data storage using getters and setters
col_type_list (list) : column value (Python) types
col_nice_list (list) : well-formatted names of the columns
col_edit_list (list) : booleans for if column should be editable
col_setter_list (list) : setter functions
col_getter_list (list) : getter functions
col_sort_index (int) : index into col_name_list for sorting
col_sort_reverse (bool) : flag to reverse the sort ordering
"""
_rows_updated = signal_(str, int)
EditableItemColor = QtGui.QColor(242, 242, 255)
# EditableItemColor = QtGui.QColor(200, 200, 255)
TrueItemColor = QtGui.QColor(230, 250, 230)
FalseItemColor = QtGui.QColor(250, 230, 230)
def _set_context_id(self, id_):
self._context_id = id_
def _get_context_id(self):
return self._context_id
def _set_changeblocked(self, changeblocked_):
self._changeblocked = changeblocked_
def _get_changeblocked(self):
return self._changeblocked
#
# Non-Qt Init Functions
def __init__(model, headers=None, parent=None):
if VERBOSE_MODEL:
logger.info('[APIItemModel] __init__')
# FIXME: don't let the model point to the view
model.view = parent
API_MODEL_BASE.__init__(model, parent=parent)
# Internal Flags
model._abouttochange = False
model._context_id = None
model._haschanged = True
model._changeblocked = False
# Model Data And Accessors
model.name = 'None'
model.nice = 'None'
model.iders = [lambda: []]
model.col_visible_list = []
model.col_name_list = []
model.col_type_list = []
model.col_nice_list = []
model.col_edit_list = []
model.col_setter_list = []
model.col_getter_list = []
model.col_level_list = []
model.col_bgrole_getter_list = None
model.col_sort_index = None
model.col_sort_reverse = False
model.level_index_list = []
model.cache = None # FIXME: This is not sustainable
model.cache_timeout_sec = 2.5
model.cache_size = 512
model.batch_size = None # Small batch sizes give good response time
model.scope_hack_list = []
model.root_node = _atn.TreeNode(-1, None, -1)
# Initialize member variables
# model._about_to_change()
model.headers = headers # save the headers
model.ider_filters = None
model.num_rows_loaded = 0
model.num_rows_total = None
# len(model.level_index_list)
# model.lazy_updater = None
if headers is not None:
model._update_headers(**headers)
def set_ider_filters(model, ider_filters):
""" Used to induce a filter on the rows, needs call of udpate rows after """
model.ider_filters = ider_filters
def get_iders(model):
# def filtfun_test(x_list):
# return [x for x in x_list if x % 2 == 0]
# model.name == 'annotations'
# if len(model.iders) == 1:
# model.ider_filters = [filtfun_test]
if model.ider_filters is None:
ider_list = model.iders
else:
assert len(model.ider_filters) == len(model.iders), 'bad filters'
# ider_list = [lambda: filtfn(ider()) for filtfn, ider in zip(model.ider_filters, model.iders)]
# with ut.embed_on_exception_context:
def wrap_ider(ider, filtfn):
def wrapped_ider(*args, **kwargs):
return filtfn(ider(*args, **kwargs))
return wrapped_ider
ider_list = [
# ider
wrap_ider(ider, filtfn)
# lambda *args: filtfn(ider(*args))
for filtfn, ider in zip(model.ider_filters, model.iders)
]
return ider_list
@updater
def _update_headers(model, **headers):
if VERBOSE_MODEL:
logger.info('[APIItemModel] _update_headers')
iders = headers.get('iders', None)
name = headers.get('name', None)
nice = headers.get('nice', None)
col_name_list = headers.get('col_name_list', None)
col_type_list = headers.get('col_type_list', None)
col_nice_list = headers.get('col_nice_list', None)
col_edit_list = headers.get('col_edit_list', None)
col_setter_list = headers.get('col_setter_list', None)
col_getter_list = headers.get('col_getter_list', None)
col_level_list = headers.get('col_level_list', None)
col_sort_index = headers.get('col_sort_index', 0)
col_sort_reverse = headers.get('col_sort_reverse', False)
# New for dynamically getting non-data roles for each row
col_bgrole_getter_list = headers.get('col_bgrole_getter_list', None)
col_visible_list = headers.get('col_visible_list', None)
#
if iders is None:
iders = []
if ut.USE_ASSERT:
assert ut.is_list(iders), 'bad type: %r' % type(iders)
for index, ider in enumerate(iders):
assert ut.is_funclike(ider), 'bad type at index %r: %r' % (
index,
type(ider),
)
if col_name_list is None:
col_name_list = []
if col_type_list is None:
col_type_list = []
if col_nice_list is None:
col_nice_list = col_name_list[:]
if col_edit_list is None:
col_edit_list = [False] * len(col_name_list)
if col_setter_list is None:
col_setter_list = []
if col_getter_list is None:
col_getter_list = []
if col_bgrole_getter_list is None:
col_bgrole_getter_list = [None] * len(col_name_list)
if col_visible_list is None:
col_visible_list = [True] * len(col_name_list)
if col_level_list is None:
col_level_list = [0] * len(col_name_list)
if True or ut.USE_ASSERT:
assert len(col_name_list) == len(col_type_list), 'inconsistent colnametype'
assert len(col_name_list) == len(col_nice_list), 'inconsistent colnice'
assert len(col_name_list) == len(col_edit_list), 'inconsistent coledit'
assert len(col_name_list) == len(col_setter_list), 'inconsistent colsetter'
assert len(col_bgrole_getter_list) == len(
col_name_list
), 'inconsistent col_bgrole_getter_list'
assert len(col_name_list) == len(col_getter_list), 'inconsistent colgetter'
assert len(col_visible_list) == len(
col_name_list
), 'inconsistent col_visible_list'
assert len(col_name_list) == len(col_level_list), 'inconsistent collevel'
for colname, flag, func in zip(col_name_list, col_edit_list, col_setter_list):
if flag:
assert func is not None, 'column=%r is editable but func is None' % (
colname,
)
model.clear_cache()
model.name = str(name)
model.nice = str(nice)
model.iders = iders
model.col_name_list = col_name_list
model.col_type_list = col_type_list
model.col_nice_list = col_nice_list
model.col_edit_list = col_edit_list
model.col_setter_list = col_setter_list
model.col_getter_list = col_getter_list
model.col_visible_list = col_visible_list
model.col_level_list = col_level_list
model.col_bgrole_getter_list = col_bgrole_getter_list
model.col_display_role_func_dict = headers.get('col_display_role_func_dict', None)
model.num_rows_loaded = 0
# model.num_cols_loaded = 0
model.num_rows_total = None
model.lazy_rows = True
# calls model._update_rows()
model._set_sort(col_sort_index, col_sort_reverse, rebuild_structure=True)
def clear_cache(model):
model.cache = cachetools.TTLCache(
maxsize=model.cache_size, ttl=model.cache_timeout_sec
)
@updater
def _set_sort(model, col_sort_index, col_sort_reverse=False, rebuild_structure=False):
if VERBOSE_MODEL:
logger.info(
'[APIItemModel] _set_sort, index=%r reverse=%r, rebuild=%r'
% (col_sort_index, col_sort_reverse, rebuild_structure)
)
if len(model.col_name_list) > 0:
if ut.USE_ASSERT:
assert isinstance(col_sort_index, int) and col_sort_index < len(
model.col_name_list
), ('sort index out of bounds by: %r' % col_sort_index)
model.col_sort_index = col_sort_index
model.col_sort_reverse = col_sort_reverse
# Update the row-id order
model._update_rows(rebuild_structure=rebuild_structure)
@updater
def _update_rows(model, rebuild_structure=True):
"""
Uses the current ider and col_sort_index to create
row_indices
"""
# with ut.Timer('[gt] update_rows (%s)' % (model.name,)):
if True:
# flag = model.blockSignals(True)
if VERBOSE_MODEL:
logger.info('[APIItemModel] +-----------')
logger.info('[APIItemModel] _update_rows')
# this is not slow
# logger.info('UPDATE ROWS!')
if len(model.col_level_list) == 0:
return
# old_root = model.root_node # NOQA
if rebuild_structure:
# logger.info('Rebuilging api_item_model internal structure')
model.beginResetModel() # I think this is preventing a segfault
model.root_node = _atn.build_internal_structure(model)
model.endResetModel()
if VERBOSE_MODEL:
logger.info('[APIItemModel] lazy_update_rows')
model.level_index_list = []
sort_index = 0 if model.col_sort_index is None else model.col_sort_index
children = (
model.root_node.get_children()
) # THIS IS THE LINE THAT TAKES FOREVER
id_list = [child.get_id() for child in children]
# logger.info('ids_ generated')
nodes = []
if len(id_list) != 0:
if VERBOSE_MODEL:
logger.info(
'[APIItemModel] lazy_update_rows len(id_list) = %r'
% (len(id_list))
)
# start sort
if model.col_sort_index is not None:
type_ = model.col_type_list[sort_index]
getter = model.col_getter_list[sort_index]
values = getter(id_list)
if type_ == 'PIXMAP':
# TODO: find a better sorting metric for pixmaps
values = ut.get_list_column(values, 0)
else:
type_ = int
values = id_list
reverse = model.col_sort_reverse
# <NUMPY MULTIARRAY SORT>
if True:
if values is None:
logger.info('SORTING VALUES IS NONE. VERY WEIRD')
if type_ is float:
values = np.array(ut.replace_nones(values, np.nan))
# Force nan to be the smallest number
values[np.isnan(values)] = -np.inf
elif type_ is str:
values = ut.replace_nones(values, '')
import vtool as vt
sortx = vt.argsort_records([values, id_list], reverse=reverse)
# </NUMPY MULTIARRAY SORT>
nodes = ut.take(children, sortx)
level = model.col_level_list[sort_index]
if level == 0:
model.root_node.set_children(nodes)
# end sort
if ut.USE_ASSERT:
assert nodes is not None, 'no indices'
model.level_index_list = nodes
# Book keeping for lazy loading rows
model.num_rows_total = len(model.level_index_list)
# model.num_cols_total = len(model.col_name_list)
model.num_cols_loaded = 0
if model.lazy_rows:
model.num_rows_loaded = 0
else:
model.num_rows_loaded = model.num_rows_total
# emit the numerr of rows and the name of for the view to display
# model.blockSignals(flag)
model._rows_updated.emit(model.name, model.num_rows_total)
if VERBOSE_MODEL:
logger.info('[APIItemModel] finished _update_rows')
logger.info('[APIItemModel] L__________')
# ------------------------------------
# --- Data maintainence functions ---
# ------------------------------------
@default_method_decorator
def _about_to_change(model, force=False):
if force or (not model._abouttochange and not model._changeblocked):
if VERBOSE_MODEL:
logger.info('ABOUT TO CHANGE: %r' % (model.name,))
model._abouttochange = True
model.layoutAboutToBeChanged.emit()
return True
else:
if VERBOSE_MODEL:
logger.info('NOT ABOUT TO CHANGE')
return False
@default_method_decorator
def _change(model, force=False):
if force or (model._abouttochange and not model._changeblocked):
if VERBOSE_MODEL:
logger.info('LAYOUT CHANGED: %r' % (model.name,))
model._abouttochange = False
model.clear_cache()
model.layoutChanged.emit()
return True
else:
if VERBOSE_MODEL:
logger.info('NOT LAYOUT CHANGING')
return False
@default_method_decorator
def _update(model, newrows=False):
model.cache = {}
model._update_rows()
def _use_ider(model, level=0):
if level == 0:
return model.iders[level]()
else:
parent_ids = model._use_ider(level - 1)
level_ider = model.iders[level]
return level_ider(parent_ids)
def get_row_and_qtindex_from_id(model, _id):
""" uses an sqlrowid (from iders) to get a qtindex """
row = model.root_node.find_row_from_id(_id)
qtindex = model.index(row, 0) if row is not None else None
return qtindex, row
# ----------------------------------
# --- API Convineince Functions ---
# ----------------------------------
@default_method_decorator
def get_header_data(model, colname, qtindex):
""" Use _get_data if the column number is known """
if not qtindex.isValid():
return None
# row = qtindex.row()
node = qtindex.internalPointer()
col = model.col_name_list.index(colname)
getter = model.col_getter_list[col]
id_ = node.id_
# id_ = model.root_node[row].get_id()
value = getter(id_)
return value
@default_method_decorator
def get_header_name(model, column):
# TODO: use qtindex?
colname = model.col_name_list[column]
return colname
@default_method_decorator
def _get_level(model, qtindex):
node = qtindex.internalPointer()
if node is None:
return -1
level = node.get_level()
# level = model.col_level_list[column]
return level
# --------------------------------
# --- API Interface Functions ---
# --------------------------------
@default_method_decorator
def _get_col_align(model, col):
if ut.USE_ASSERT:
assert col is not None, 'bad column'
raise NotImplementedError('_get_col_align')
@default_method_decorator
def _get_row_id(model, qtindex=QtCore.QModelIndex()):
"""
returns the id (specified by iders i.e. an wbia rowid) from qtindex
"""
if qtindex is not None and qtindex.isValid():
node = qtindex.internalPointer()
if ut.USE_ASSERT:
try:
assert isinstance(node, _atn.TreeNode), 'type(node)=%r, node=%r' % (
type(node),
node,
)
except AssertionError as ex:
ut.printex(
ex, 'error in _get_row_id', keys=['model', 'qtindex', 'node']
)
raise
try:
id_ = node.get_id()
except AttributeError as ex:
ut.printex(ex, key_list=['node', 'model', 'qtindex'])
raise
return id_
@default_method_decorator
def _get_adjacent_qtindex(model, qtindex=QtCore.QModelIndex(), offset=1):
# check qtindex
if qtindex is None or not qtindex.isValid():
return None
node = qtindex.internalPointer()
# check node
try:
if ut.USE_ASSERT:
assert isinstance(node, _atn.TreeNode), type(node)
except AssertionError as ex:
ut.printex(ex, key_list=['node'], pad_stdout=True)
raise
# get node parent
try:
node_parent = node.get_parent()
except Exception as ex:
ut.printex(ex, key_list=['node'], reraise=False, pad_stdout=True)
raise
# parent_node check
if node_parent is None:
logger.info('[model._get_adjacent_qtindex] node_parent is None!')
return None
# Offset to find the next qtindex
next_index = node_parent.child_index(node) + offset
nChildren = node_parent.get_num_children()
# check next index validitiy
if next_index >= 0 and next_index < nChildren:
next_node = node_parent.get_child(next_index)
next_level = next_node.get_level()
col = model.col_level_list.index(next_level)
row = next_node.get_row()
# Create qtindex for the adjacent note
parent_qtindex = model.parent(qtindex)
next_qtindex = model.index(row, col, parent_qtindex)
return next_qtindex
else:
# There is no adjacent node
return None
@default_method_decorator
def _get_type(model, col):
return model.col_type_list[col]
@default_method_decorator
def _get_bgrole_value(model, qtindex):
""" Gets the background role if specified """
col = qtindex.column()
bgrole_getter = model.col_bgrole_getter_list[col]
if bgrole_getter is None:
return None
row_id = model._get_row_id(qtindex) # row_id w.r.t. to sorting
color = bgrole_getter(row_id)
if color is None:
return None
val = qtype.to_qcolor(color)
return val
@default_method_decorator
def _get_data(model, qtindex, **kwargs):
col = qtindex.column()
# row_id wrt. to sorting
row_id = model._get_row_id(qtindex)
cachekey = (row_id, col)
try:
data = model.cache[cachekey]
except KeyError:
# getter function for this column
getter = model.col_getter_list[col]
try:
# Using this getter may not be thread safe
# Should this work around decorators?
# data = getter((row_id,), **kwargs)[0]
data = getter(row_id, **kwargs)
except Exception as ex:
qtindex_rc = (qtindex.row(), qtindex.column()) # NOQA
ut.printex(
ex,
'[api_item_model] problem getting in column %r' % (col,),
keys=[
'model.name',
'getter',
'row_id',
'col',
'qtindex',
'qtindex_rc',
],
iswarning=True,
)
# getting from: %r' % ut.util_str.get_callable_name(getter))
raise
model.cache[cachekey] = data
# </MODEL_CACHE>
return data
@default_method_decorator
def _set_data(model, qtindex, value):
"""
The setter function should be of the following format def
setter(column_name, row_id, value) column_name is the key or SQL-like
name for the column row_id is the corresponding row key or SQL-like id
that the row call back returned value is the value that needs to be
stored The setter function should return a boolean, if setting the
value was successfull or not
"""
col = qtindex.column()
row_id = model._get_row_id(qtindex)
# <HACK: MODEL_CACHE>
cachekey = (row_id, col)
try:
del model.cache[cachekey]
except KeyError:
pass
# </HACK: MODEL_CACHE>
setter = model.col_setter_list[col]
if VERBOSE_MODEL:
logger.info('[model] Setting data: row_id=%r, setter=%r' % (row_id, setter))
try:
return setter(row_id, value)
except Exception as ex:
ut.printex(
ex,
'ERROR: setting data: row_id=%r, setter=%r, col=%r'
% (row_id, setter, col),
)
raise
# ------------------------
# --- QtGui Functions ---
# ------------------------
@default_method_decorator
def parent(model, qindex):
"""
A common convention used in models that expose tree data structures is
that only items in the first column have children. For that case, when
reimplementing this function in a subclass the column of the returned
QModelIndex would be 0.
When reimplementing this function in a subclass, be careful to avoid
calling QModelIndex member functions, such as QModelIndex.parent(),
since indexes belonging to your model will simply call your
implementation, leading to infinite recursion.
FIXME:
seems to segfault in here
https://riverbankcomputing.com/pipermail/pyqt/2016-February/036977.html
https://gist.github.com/estan/c051d1f798c4c46caa7d
Returns:
the parent of the model item with the given index. If the item has
no parent, an invalid QModelIndex is returned.
"""
# model.lazy_checks()
if qindex.isValid():
try:
node = qindex.internalPointer()
# <HACK>
# A segfault happens in isinstance when updating rows?
if not isinstance(node, _atn.TreeNode):
logger.info(
'WARNING: tried to access parent of %r type object' % type(node)
)
return QtCore.QModelIndex()
# assert node.__dict__, "node.__dict__=%r" % node.__dict__
# </HACK>
parent_node = node.get_parent()
parent_id = parent_node.get_id()
if parent_id == -1 or parent_id is None:
return QtCore.QModelIndex()
row = parent_node.get_row()
col = model.col_level_list.index(parent_node.get_level())
return model.createIndex(row, col, parent_node)
except Exception as ex:
import utool
with utool.embed_on_exception_context:
qindex_rc = (qindex.row(), qindex.column()) # NOQA
ut.printex(
ex,
'failed to do parenty things',
keys=['qindex_rc', 'model.name'],
tb=True,
)
import utool
utool.embed()
raise
return QtCore.QModelIndex()
@default_method_decorator
def index(model, row, column, parent=QtCore.QModelIndex()):
"""
Qt Override
Returns:
the index of the item in the model specified by the given row,
column and parent index. When reimplementing this function in a
subclass, call createIndex() to generate model indexes that other
components can use to refer to items in your model.
NOTE:
Object must be specified to sort delegates.
"""
# model.lazy_checks()
if not parent.isValid():
# This is a top level == 0 index
# logger.info('[model.index] ROOT: row=%r, col=%r' % (row, column))
if row >= model.root_node.get_num_children():
return QtCore.QModelIndex()
# import traceback
# traceback.print_stack()
node = model.root_node[row]
if model.col_level_list[column] != node.get_level():
return QtCore.QModelIndex()
qtindex = model.createIndex(row, column, object=node)
return qtindex
else:
# This is a child level > 0 index
parent_node = parent.internalPointer()
node = parent_node[row]
if ut.USE_ASSERT:
assert isinstance(parent_node, _atn.TreeNode), type(parent_node)
assert isinstance(node, _atn.TreeNode), type(node)
return model.createIndex(row, column, object=node)
def _get_level_row_count(model, qtindex):
return model.rowCount(qtindex.parent())
def _get_level_row_index(model, qtindex):
node = qtindex.internalPointer()
return node.get_row()
@default_method_decorator
def rowCount(model, parent=QtCore.QModelIndex()):
""" Qt Override """
# model.lazy_checks()
if not parent.isValid():
# Root row count
if len(model.level_index_list) == 0:
return 0
return model.num_rows_loaded
# nRows = len(model.level_index_list)
# # logger.info('* nRows=%r' % nRows)
# return nRows
else:
node = parent.internalPointer()
nRows = node.get_num_children()
# logger.info('+ nRows=%r' % nRows)
return nRows
@default_method_decorator
def columnCount(model, parent=QtCore.QModelIndex()):
""" Qt Override """
# FOR NOW THE COLUMN COUNT IS CONSTANT
# model.lazy_checks()
return len(model.col_name_list)
@default_method_decorator
def canFetchMore(model, parent=QtCore.QModelIndex()):
"""
Returns true if there is more data available for parent; otherwise
returns false. The default implementation always returns false. If
canFetchMore() returns true, the fetchMore() function should be called.
This is the behavior of QAbstractItemView, for example.
References:
http://doc.qt.io/qt-5/qtwidgets-itemviews-fetchmore-example.html
# Extend this to work well with QTreeViews
http://blog.tjwakeham.com/lazy-loading-pyqt-data-models/
http://stackoverflow.com/questions/38506808/pyqt4-force-view-to-fetchmore-from
"""
if parent is None:
return
if parent.isValid():
# Check if we are at a leaf node
node = parent.internalPointer()
if node.get_num_children() == 0:
return
# if node.get_level() == len(model.col_level_list):
# return
# logger.info('model.num_rows_total = %r' % (model.num_rows_total,))
# logger.info('model.num_rows_loaded = %r' % (model.num_rows_loaded,))
if model.num_rows_total is not None:
if model.num_rows_loaded < model.num_rows_total:
if VERBOSE_MODEL:
logger.info('canFetchMore %s? -- Yes' % (model.name,))
return True
if VERBOSE_MODEL:
logger.info('canFetchMore %s? -- No' % (model.name,))
return False
# if not parent.isValid():
# return False
# flags = model.flags(qtindex)
# # row = qtindex.row()
# col = qtindex.column()
# node = qtindex.internalPointer()
# return False
@default_method_decorator
def fetchMore(model, parent=QtCore.QModelIndex()):
"""
Fetches any available data for the items with the parent specified by
the parent index.
Reimplement this if you are populating your model incrementally.
The default implementation does nothing.
"""
if parent is None:
return
if parent.isValid():
# Check if we are at a leaf node
node = parent.internalPointer()
if node.get_num_children() == 0:
return
remainder = model.num_rows_total - model.num_rows_loaded
if model.batch_size is None:
num_fetching = remainder
else:
num_fetching = min(model.batch_size, remainder)
if VERBOSE_MODEL:
logger.info('Fetching %r more %s' % (num_fetching, model.name))
idx1 = model.num_rows_total
idx2 = model.num_rows_total + num_fetching - 1
# model.beginInsertRows(QtCore.QModelIndex(), idx1, idx2)
model.beginInsertRows(parent, idx1, idx2)
model.num_rows_loaded += num_fetching
# logger.info('model.num_rows_total = %r' % (model.num_rows_total,))
# logger.info('model.num_rows_loaded = %r' % (model.num_rows_loaded,))
model.endInsertRows()
if VERBOSE_MODEL:
logger.info(
'Fetched %r/%r rows' % (model.num_rows_loaded, model.num_rows_total)
)
# model.numberPopulated.emit(num_loading)
@default_method_decorator
def data(model, qtindex, role=Qt.DisplayRole, **kwargs):
"""
Depending on the role, returns either data or how to display data
Returns the data stored under the given role for the item referred to by
the index.
Note:
If you do not have a value to return, return None
"""
if not qtindex.isValid():
return None
flags = model.flags(qtindex)
# row = qtindex.row()
col = qtindex.column()
node = qtindex.internalPointer()
if model.col_level_list[col] != node.get_level():
return QVariantHack()
type_ = model._get_type(col)
#
# Specify Text Alignment Role
if role == Qt.TextAlignmentRole:
if type_ in qtype.QT_IMAGE_TYPES:
value = Qt.AlignRight | Qt.AlignVCenter
elif type_ in qtype.QT_BUTTON_TYPES:
value = Qt.AlignRight | Qt.AlignVCenter
elif type_ in ut.VALID_FLOAT_TYPES:
value = Qt.AlignRight | Qt.AlignVCenter
else:
value = Qt.AlignHCenter | Qt.AlignVCenter
return value
#
# Specify Background Rule
elif role == Qt.BackgroundRole:
value = model._get_bgrole_value(qtindex)
if value is not None:
return value
if flags & Qt.ItemIsEditable:
# Editable fields are colored
return QVariantHack(model.EditableItemColor)
elif flags & Qt.ItemIsUserCheckable:
# Checkable color depends on the truth value
data = model._get_data(qtindex, **kwargs)
if data:
return QVariantHack(model.TrueItemColor)
else:
return QVariantHack(model.FalseItemColor)
else:
pass
#
# Specify Foreground Role
elif role == Qt.ForegroundRole:
if flags & Qt.ItemIsEditable:
return QtGui.QBrush(QtGui.QColor(0, 0, 0))
# Specify Decoration Role (superceded by thumbdelegate)
# elif role == Qt.DecorationRole and type_ in qtype.QT_IMAGE_TYPES:
# Specify CheckState Role:
if role == Qt.CheckStateRole:
if flags & Qt.ItemIsUserCheckable:
data = model._get_data(qtindex, **kwargs)
return Qt.Checked if data else Qt.Unchecked
#
# Return the data to edit or display
elif role in (Qt.DisplayRole, Qt.EditRole):
# For types displayed with custom delegates do not cast data into a
# qvariant. This includes PIXMAP, BUTTON, and COMBO
if type_ in qtype.QT_DELEGATE_TYPES:
data = model._get_data(qtindex, **kwargs)
# logger.info(data)
return data
else:
# Display data with default delegate by casting to a qvariant
data = model._get_data(qtindex, **kwargs)
if model.col_display_role_func_dict is not None:
col_name = model.col_name_list[col]
display_role_func = model.col_display_role_func_dict.get(
col_name, None
)
if display_role_func is not None:
value = display_role_func(data)
return value
value = qtype.cast_into_qt(data)
return value
else:
# import builtins
# role_name = qtype.ItemDataRoles[role]
# builtins.print('UNHANDLED ROLE=%r' % role_name)
pass
# else return None
return QVariantHack()
@default_method_decorator
def setData(model, qtindex, value, role=Qt.EditRole):
"""
Sets the role data for the item at qtindex to value. value is a
QVariant (called data in documentation) Returns a map with values for
all predefined roles in the model for the item at the given index.
Reimplement this function if you want to extend the default behavior of
this function to include custom roles in the map.
"""
try:
if not qtindex.isValid():
return None
flags = model.flags(qtindex)
# row = qtindex.row()
col = qtindex.column()
if not (flags & Qt.ItemIsEditable or flags & Qt.ItemIsUserCheckable):
return None
if role == Qt.CheckStateRole:
type_ = 'QtCheckState'
data = value == Qt.Checked
elif role != Qt.EditRole:
return False
else:
# Cast value into datatype
type_ = model.col_type_list[col]
data = qtype.cast_from_qt(value, type_)
# Do actual setting of data
old_data = model._get_data(qtindex)
if old_data != data:
model._set_data(qtindex, data)
# This may not work with PyQt5
# http://stackoverflow.com/questions/22560296/not-responding-datachanged
# Emit that data was changed and return succcess
model.dataChanged.emit(qtindex, qtindex)
return True
except Exception as ex:
# value = str(value.toString()) # NOQA
ut.printex(
ex,
'ignoring setData',
'[model]',
tb=True,
key_list=['value'],
iswarning=True,
)
return False
@default_method_decorator
def headerData(model, section, orientation, role=Qt.DisplayRole):
"""
Qt Override
Returns:
the data for the given role and section in the header with the
specified orientation. For horizontal headers, the section number
corresponds to the column number. Similarly, for vertical headers,
the section number corresponds to the row number.
"""
# model.lazy_checks()
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
column = section
if column >= len(model.col_nice_list):
return []
return model.col_nice_list[column]
if orientation == Qt.Vertical and role == Qt.DisplayRole:
# row = section
# rowid = model._get_row_id(row)
# return rowid
return section
return QVariantHack()
@updater
def sort(model, column, order):
""" Qt Override """
# model.lazy_checks()
reverse = order == QtCore.Qt.DescendingOrder
model._set_sort(column, reverse)
@default_method_decorator
def flags(model, qtindex):
"""
Qt Override
Returns:
Qt.ItemFlag:
0: 'NoItemFlags' # It does not have any properties set.
1: 'ItemIsSelectable' # It can be selected.
2: 'ItemIsEditable' # It can be edited.
4: 'ItemIsDragEnabled' # It can be dragged.
8: 'ItemIsDropEnabled' # It can be used as a drop target.
16: 'ItemIsUserCheckable' # It can be checked or unchecked by the user.
32: 'ItemIsEnabled' # The user can interact with the item.
64: 'ItemIsTristate' # The item is checkable with three separate states.
"""
# Return flags based on column properties (like type, and editable)
col = qtindex.column()
type_ = model._get_type(col)
editable = (
model.col_edit_list[col]
and model._get_level(qtindex) == model.col_level_list[col]
)
if type_ in qtype.QT_IMAGE_TYPES:
# return Qt.NoItemFlags
return Qt.ItemIsEnabled | Qt.ItemIsSelectable
elif not editable:
return Qt.ItemIsEnabled | Qt.ItemIsSelectable
elif type_ in ut.VALID_BOOL_TYPES:
return Qt.ItemIsEnabled | Qt.ItemIsUserCheckable
else:
return Qt.ItemIsEnabled | Qt.ItemIsEditable | Qt.ItemIsSelectable
def simple_thumbnail_widget():
r"""
Very simple example to test thumbnails
CommandLine:
python -m wbia.guitool.api_item_model --test-simple_thumbnail_widget --show
Example:
>>> # ENABLE_DOCTEST
>>> # xdoctest: +REQUIRES(--gui)
>>> import wbia.guitool as guitool
>>> from wbia.guitool.api_item_model import * # NOQA
>>> guitool.ensure_qapp() # must be ensured before any embeding
>>> wgt = simple_thumbnail_widget()
>>> ut.quit_if_noshow()
>>> wgt.show()
>>> guitool.qtapp_loop(wgt, frequency=100, init_signals=True)
"""
import wbia.guitool as guitool
guitool.ensure_qapp()
col_name_list = ['rowid', 'image_name', 'thumb']
col_types_dict = {
'thumb': 'PIXMAP',
}
def thumb_getter(id_, thumbsize=128):
""" Thumb getters must conform to thumbtup structure """
# logger.info(id_)
return ut.grab_test_imgpath(id_)
# return None
col_getter_dict = {
'rowid': [1, 2, 3],
'image_name': ['lena.png', 'carl.jpg', 'patsy.jpg'],
'thumb': thumb_getter,
}
col_ider_dict = {
'thumb': 'image_name',
}
col_setter_dict = {}
editable_colnames = []
sortby = 'rowid'
def get_thumb_size():
return 128
col_width_dict = {}
col_bgrole_dict = {}
api = guitool.CustomAPI(
col_name_list,
col_types_dict,
col_getter_dict,
col_bgrole_dict,
col_ider_dict,
col_setter_dict,
editable_colnames,
sortby,
get_thumb_size,
True,
col_width_dict,
)
headers = api.make_headers(tblnice='Simple Example')
wgt = guitool.APIItemWidget()
wgt.change_headers(headers)
# guitool.qtapp_loop(qwin=wgt, ipy=ipy, frequency=loop_freq)
return wgt
|
[
"utool.is_funclike",
"utool.replace_nones",
"utool.grab_test_imgpath",
"numpy.isnan",
"utool.noinject",
"six.moves.zip",
"utool.get_argflag",
"wbia.guitool.api_tree_node.TreeNode",
"wbia.guitool.__PYQT__.QVariantHack",
"utool.printex",
"wbia.guitool.ensure_qapp",
"wbia.guitool.qtype.cast_from_qt",
"wbia.guitool.__PYQT__.QtCore.QModelIndex",
"wbia.guitool.qtype.to_qcolor",
"utool.get_list_column",
"wbia.guitool.qtype.cast_into_qt",
"wbia.guitool.__PYQT__.QtGui.QColor",
"cachetools.TTLCache",
"wbia.guitool.CustomAPI",
"wbia.guitool.APIItemWidget",
"utool.is_list",
"wbia.guitool.api_tree_node.build_internal_structure",
"functools.wraps",
"vtool.argsort_records",
"utool.embed",
"utool.take",
"wbia.guitool.guitool_decorators.signal_",
"logging.getLogger"
] |
[((796, 821), 'logging.getLogger', 'logging.getLogger', (['"""wbia"""'], {}), "('wbia')\n", (813, 821), False, 'import logging\n'), ((822, 861), 'utool.noinject', 'ut.noinject', (['__name__', '"""[APIItemModel]"""'], {}), "(__name__, '[APIItemModel]')\n", (833, 861), True, 'import utool as ut\n'), ((1013, 1057), 'utool.get_argflag', 'ut.get_argflag', (["('--verbose-qt', '--verbqt')"], {}), "(('--verbose-qt', '--verbqt'))\n", (1027, 1057), True, 'import utool as ut\n'), ((1091, 1143), 'utool.get_argflag', 'ut.get_argflag', (["('--verbose-qt-api', '--verbqt-api')"], {}), "(('--verbose-qt-api', '--verbqt-api'))\n", (1105, 1143), True, 'import utool as ut\n'), ((3134, 3155), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (3149, 3155), False, 'import functools\n'), ((4126, 4143), 'wbia.guitool.guitool_decorators.signal_', 'signal_', (['str', 'int'], {}), '(str, int)\n', (4133, 4143), False, 'from wbia.guitool.guitool_decorators import checks_qt_error, signal_\n'), ((4168, 4195), 'wbia.guitool.__PYQT__.QtGui.QColor', 'QtGui.QColor', (['(242)', '(242)', '(255)'], {}), '(242, 242, 255)\n', (4180, 4195), False, 'from wbia.guitool.__PYQT__ import QtCore, QtGui, QVariantHack\n'), ((4270, 4297), 'wbia.guitool.__PYQT__.QtGui.QColor', 'QtGui.QColor', (['(230)', '(250)', '(230)'], {}), '(230, 250, 230)\n', (4282, 4297), False, 'from wbia.guitool.__PYQT__ import QtCore, QtGui, QVariantHack\n'), ((4319, 4346), 'wbia.guitool.__PYQT__.QtGui.QColor', 'QtGui.QColor', (['(250)', '(230)', '(230)'], {}), '(250, 230, 230)\n', (4331, 4346), False, 'from wbia.guitool.__PYQT__ import QtCore, QtGui, QVariantHack\n'), ((42721, 42742), 'wbia.guitool.ensure_qapp', 'guitool.ensure_qapp', ([], {}), '()\n', (42740, 42742), True, 'import wbia.guitool as guitool\n'), ((43440, 43627), 'wbia.guitool.CustomAPI', 'guitool.CustomAPI', (['col_name_list', 'col_types_dict', 'col_getter_dict', 'col_bgrole_dict', 'col_ider_dict', 'col_setter_dict', 'editable_colnames', 'sortby', 'get_thumb_size', '(True)', 'col_width_dict'], {}), '(col_name_list, col_types_dict, col_getter_dict,\n col_bgrole_dict, col_ider_dict, col_setter_dict, editable_colnames,\n sortby, get_thumb_size, True, col_width_dict)\n', (43457, 43627), True, 'import wbia.guitool as guitool\n'), ((43783, 43806), 'wbia.guitool.APIItemWidget', 'guitool.APIItemWidget', ([], {}), '()\n', (43804, 43806), True, 'import wbia.guitool as guitool\n'), ((5932, 5959), 'wbia.guitool.api_tree_node.TreeNode', '_atn.TreeNode', (['(-1)', 'None', '(-1)'], {}), '(-1, None, -1)\n', (5945, 5959), True, 'from wbia.guitool import api_tree_node as _atn\n'), ((11658, 11732), 'cachetools.TTLCache', 'cachetools.TTLCache', ([], {'maxsize': 'model.cache_size', 'ttl': 'model.cache_timeout_sec'}), '(maxsize=model.cache_size, ttl=model.cache_timeout_sec)\n', (11677, 11732), False, 'import cachetools\n'), ((19681, 19701), 'wbia.guitool.__PYQT__.QtCore.QModelIndex', 'QtCore.QModelIndex', ([], {}), '()\n', (19699, 19701), False, 'from wbia.guitool.__PYQT__ import QtCore, QtGui, QVariantHack\n'), ((20627, 20647), 'wbia.guitool.__PYQT__.QtCore.QModelIndex', 'QtCore.QModelIndex', ([], {}), '()\n', (20645, 20647), False, 'from wbia.guitool.__PYQT__ import QtCore, QtGui, QVariantHack\n'), ((22697, 22719), 'wbia.guitool.qtype.to_qcolor', 'qtype.to_qcolor', (['color'], {}), '(color)\n', (22712, 22719), False, 'from wbia.guitool import qtype\n'), ((27850, 27870), 'wbia.guitool.__PYQT__.QtCore.QModelIndex', 'QtCore.QModelIndex', ([], {}), '()\n', (27868, 27870), False, 'from wbia.guitool.__PYQT__ import QtCore, QtGui, QVariantHack\n'), ((27943, 27963), 'wbia.guitool.__PYQT__.QtCore.QModelIndex', 'QtCore.QModelIndex', ([], {}), '()\n', (27961, 27963), False, 'from wbia.guitool.__PYQT__ import QtCore, QtGui, QVariantHack\n'), ((29665, 29685), 'wbia.guitool.__PYQT__.QtCore.QModelIndex', 'QtCore.QModelIndex', ([], {}), '()\n', (29683, 29685), False, 'from wbia.guitool.__PYQT__ import QtCore, QtGui, QVariantHack\n'), ((30290, 30310), 'wbia.guitool.__PYQT__.QtCore.QModelIndex', 'QtCore.QModelIndex', ([], {}), '()\n', (30308, 30310), False, 'from wbia.guitool.__PYQT__ import QtCore, QtGui, QVariantHack\n'), ((30524, 30544), 'wbia.guitool.__PYQT__.QtCore.QModelIndex', 'QtCore.QModelIndex', ([], {}), '()\n', (30542, 30544), False, 'from wbia.guitool.__PYQT__ import QtCore, QtGui, QVariantHack\n'), ((32308, 32328), 'wbia.guitool.__PYQT__.QtCore.QModelIndex', 'QtCore.QModelIndex', ([], {}), '()\n', (32326, 32328), False, 'from wbia.guitool.__PYQT__ import QtCore, QtGui, QVariantHack\n'), ((37532, 37546), 'wbia.guitool.__PYQT__.QVariantHack', 'QVariantHack', ([], {}), '()\n', (37544, 37546), False, 'from wbia.guitool.__PYQT__ import QtCore, QtGui, QVariantHack\n'), ((40370, 40384), 'wbia.guitool.__PYQT__.QVariantHack', 'QVariantHack', ([], {}), '()\n', (40382, 40384), False, 'from wbia.guitool.__PYQT__ import QtCore, QtGui, QVariantHack\n'), ((43002, 43027), 'utool.grab_test_imgpath', 'ut.grab_test_imgpath', (['id_'], {}), '(id_)\n', (43022, 43027), True, 'import utool as ut\n'), ((8648, 8665), 'utool.is_list', 'ut.is_list', (['iders'], {}), '(iders)\n', (8658, 8665), True, 'import utool as ut\n'), ((10486, 10536), 'six.moves.zip', 'zip', (['col_name_list', 'col_edit_list', 'col_setter_list'], {}), '(col_name_list, col_edit_list, col_setter_list)\n', (10489, 10536), False, 'from six.moves import zip\n'), ((34410, 34424), 'wbia.guitool.__PYQT__.QVariantHack', 'QVariantHack', ([], {}), '()\n', (34422, 34424), False, 'from wbia.guitool.__PYQT__ import QtCore, QtGui, QVariantHack\n'), ((8768, 8788), 'utool.is_funclike', 'ut.is_funclike', (['ider'], {}), '(ider)\n', (8782, 8788), True, 'import utool as ut\n'), ((13413, 13449), 'wbia.guitool.api_tree_node.build_internal_structure', '_atn.build_internal_structure', (['model'], {}), '(model)\n', (13442, 13449), True, 'from wbia.guitool import api_tree_node as _atn\n'), ((20982, 21032), 'utool.printex', 'ut.printex', (['ex'], {'key_list': "['node']", 'pad_stdout': '(True)'}), "(ex, key_list=['node'], pad_stdout=True)\n", (20992, 21032), True, 'import utool as ut\n'), ((21178, 21243), 'utool.printex', 'ut.printex', (['ex'], {'key_list': "['node']", 'reraise': '(False)', 'pad_stdout': '(True)'}), "(ex, key_list=['node'], reraise=False, pad_stdout=True)\n", (21188, 21243), True, 'import utool as ut\n'), ((25126, 25222), 'utool.printex', 'ut.printex', (['ex', "('ERROR: setting data: row_id=%r, setter=%r, col=%r' % (row_id, setter, col))"], {}), "(ex, 'ERROR: setting data: row_id=%r, setter=%r, col=%r' % (\n row_id, setter, col))\n", (25136, 25222), True, 'import utool as ut\n'), ((28662, 28682), 'wbia.guitool.__PYQT__.QtCore.QModelIndex', 'QtCore.QModelIndex', ([], {}), '()\n', (28680, 28682), False, 'from wbia.guitool.__PYQT__ import QtCore, QtGui, QVariantHack\n'), ((28888, 28908), 'wbia.guitool.__PYQT__.QtCore.QModelIndex', 'QtCore.QModelIndex', ([], {}), '()\n', (28906, 28908), False, 'from wbia.guitool.__PYQT__ import QtCore, QtGui, QVariantHack\n'), ((39226, 39320), 'utool.printex', 'ut.printex', (['ex', '"""ignoring setData"""', '"""[model]"""'], {'tb': '(True)', 'key_list': "['value']", 'iswarning': '(True)'}), "(ex, 'ignoring setData', '[model]', tb=True, key_list=['value'],\n iswarning=True)\n", (39236, 39320), True, 'import utool as ut\n'), ((7447, 7483), 'six.moves.zip', 'zip', (['model.ider_filters', 'model.iders'], {}), '(model.ider_filters, model.iders)\n', (7450, 7483), False, 'from six.moves import zip\n'), ((15353, 15407), 'vtool.argsort_records', 'vt.argsort_records', (['[values, id_list]'], {'reverse': 'reverse'}), '([values, id_list], reverse=reverse)\n', (15371, 15407), True, 'import vtool as vt\n'), ((15483, 15507), 'utool.take', 'ut.take', (['children', 'sortx'], {}), '(children, sortx)\n', (15490, 15507), True, 'import utool as ut\n'), ((20452, 20505), 'utool.printex', 'ut.printex', (['ex'], {'key_list': "['node', 'model', 'qtindex']"}), "(ex, key_list=['node', 'model', 'qtindex'])\n", (20462, 20505), True, 'import utool as ut\n'), ((26826, 26846), 'wbia.guitool.__PYQT__.QtCore.QModelIndex', 'QtCore.QModelIndex', ([], {}), '()\n', (26844, 26846), False, 'from wbia.guitool.__PYQT__ import QtCore, QtGui, QVariantHack\n'), ((27129, 27149), 'wbia.guitool.__PYQT__.QtCore.QModelIndex', 'QtCore.QModelIndex', ([], {}), '()\n', (27147, 27149), False, 'from wbia.guitool.__PYQT__ import QtCore, QtGui, QVariantHack\n'), ((27799, 27812), 'utool.embed', 'utool.embed', ([], {}), '()\n', (27810, 27812), False, 'import utool\n'), ((35274, 35311), 'wbia.guitool.__PYQT__.QVariantHack', 'QVariantHack', (['model.EditableItemColor'], {}), '(model.EditableItemColor)\n', (35286, 35311), False, 'from wbia.guitool.__PYQT__ import QtCore, QtGui, QVariantHack\n'), ((37261, 37285), 'wbia.guitool.qtype.cast_into_qt', 'qtype.cast_into_qt', (['data'], {}), '(data)\n', (37279, 37285), False, 'from wbia.guitool import qtype\n'), ((38647, 38679), 'wbia.guitool.qtype.cast_from_qt', 'qtype.cast_from_qt', (['value', 'type_'], {}), '(value, type_)\n', (38665, 38679), False, 'from wbia.guitool import qtype\n'), ((14597, 14626), 'utool.get_list_column', 'ut.get_list_column', (['values', '(0)'], {}), '(values, 0)\n', (14615, 14626), True, 'import utool as ut\n'), ((20196, 20269), 'utool.printex', 'ut.printex', (['ex', '"""error in _get_row_id"""'], {'keys': "['model', 'qtindex', 'node']"}), "(ex, 'error in _get_row_id', keys=['model', 'qtindex', 'node'])\n", (20206, 20269), True, 'import utool as ut\n'), ((23486, 23655), 'utool.printex', 'ut.printex', (['ex', "('[api_item_model] problem getting in column %r' % (col,))"], {'keys': "['model.name', 'getter', 'row_id', 'col', 'qtindex', 'qtindex_rc']", 'iswarning': '(True)'}), "(ex, '[api_item_model] problem getting in column %r' % (col,),\n keys=['model.name', 'getter', 'row_id', 'col', 'qtindex', 'qtindex_rc'],\n iswarning=True)\n", (23496, 23655), True, 'import utool as ut\n'), ((27545, 27637), 'utool.printex', 'ut.printex', (['ex', '"""failed to do parenty things"""'], {'keys': "['qindex_rc', 'model.name']", 'tb': '(True)'}), "(ex, 'failed to do parenty things', keys=['qindex_rc',\n 'model.name'], tb=True)\n", (27555, 27637), True, 'import utool as ut\n'), ((15029, 15061), 'utool.replace_nones', 'ut.replace_nones', (['values', 'np.nan'], {}), '(values, np.nan)\n', (15045, 15061), True, 'import utool as ut\n'), ((15156, 15172), 'numpy.isnan', 'np.isnan', (['values'], {}), '(values)\n', (15164, 15172), True, 'import numpy as np\n'), ((15256, 15284), 'utool.replace_nones', 'ut.replace_nones', (['values', '""""""'], {}), "(values, '')\n", (15272, 15284), True, 'import utool as ut\n'), ((35532, 35565), 'wbia.guitool.__PYQT__.QVariantHack', 'QVariantHack', (['model.TrueItemColor'], {}), '(model.TrueItemColor)\n', (35544, 35565), False, 'from wbia.guitool.__PYQT__ import QtCore, QtGui, QVariantHack\n'), ((35615, 35649), 'wbia.guitool.__PYQT__.QVariantHack', 'QVariantHack', (['model.FalseItemColor'], {}), '(model.FalseItemColor)\n', (35627, 35649), False, 'from wbia.guitool.__PYQT__ import QtCore, QtGui, QVariantHack\n'), ((35851, 35872), 'wbia.guitool.__PYQT__.QtGui.QColor', 'QtGui.QColor', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (35863, 35872), False, 'from wbia.guitool.__PYQT__ import QtCore, QtGui, QVariantHack\n')]
|
# -*- coding: utf-8 -*-
from __future__ import division, absolute_import, unicode_literals
import datetime as dt
from io import StringIO
import logging
import numpy as np
import pytest
from sys import version_info
import warnings
import aacgmv2
class TestFutureDepWarning:
def setup(self):
# Initialize the routine to be tested
self.test_routine = None
self.test_args = []
self.test_kwargs = {}
def teardown(self):
del self.test_routine, self.test_args, self.test_kwargs
def test_future_dep_warning(self):
"""Test the implementation of FutureWarning for deprecated routines"""
if self.test_routine is None:
assert True
else:
with warnings.catch_warnings(record=True) as wout:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
self.test_routine(*self.test_args, **self.test_kwargs)
# Verify some things
assert len(wout) == 1
assert issubclass(wout[-1].category, FutureWarning)
assert "Deprecated routine" in str(wout[-1].message)
class TestDepAACGMV2Warning(TestFutureDepWarning):
def setup(self):
self.dtime = dt.datetime(2015, 1, 1, 0, 0, 0)
self.test_routine = None
self.test_args = []
self.test_kwargs = {}
def teardown(self):
del self.dtime, self.test_routine, self.test_args, self.test_kwargs
def test_gc2gd_lat_warning(self):
"""Test future deprecation warning for gc2gd_lat"""
self.test_routine = aacgmv2.deprecated.gc2gd_lat
self.test_args = [60.0]
self.test_future_dep_warning()
def test_igrf_dipole_axis_warning(self):
"""Test future deprecation warning for igrf_dipole_axis"""
self.test_routine = aacgmv2.deprecated.igrf_dipole_axis
self.test_args = [self.dtime]
self.test_future_dep_warning()
class TestDepAACGMV2:
def setup(self):
"""Runs before every method to create a clean testing setup"""
self.dtime = dt.datetime(2015, 1, 1, 0, 0, 0)
self.lat = None
self.lon = None
def teardown(self):
"""Runs after every method to clean up previous testing"""
del self.dtime, self.lat, self.lon
def test_subsol(self):
"""Test the subsolar calculation"""
doy = int(self.dtime.strftime("%j"))
ut = self.dtime.hour * 3600.0 + self.dtime.minute * 60.0 + \
self.dtime.second
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.lon, self.lat = aacgmv2.deprecated.subsol(self.dtime.year,
doy, ut)
np.testing.assert_almost_equal(self.lon, -179.2004, decimal=4)
np.testing.assert_almost_equal(self.lat, -23.0431, decimal=4)
def test_gc2gd_lat(self):
"""Test the geocentric to geodetic conversion"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.lat = aacgmv2.deprecated.gc2gd_lat(45.0)
np.testing.assert_almost_equal(self.lat, 45.1924, decimal=4)
def test_gc2gd_lat_list(self):
"""Test the geocentric to geodetic conversion"""
self.lat = [45.0, -45.0]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.lat = aacgmv2.deprecated.gc2gd_lat(self.lat)
np.testing.assert_allclose(self.lat, [45.1924, -45.1924], rtol=1.0e-4)
def test_gc2gd_lat_arr(self):
"""Test the geocentric to geodetic conversion"""
self.lat = np.array([45.0, -45.0])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.lat = aacgmv2.deprecated.gc2gd_lat(self.lat)
np.testing.assert_allclose(self.lat, [45.1924, -45.1924], rtol=1.0e-4)
def test_igrf_dipole_axis(self):
"""Test the IGRF dipole axis calculation"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
m = aacgmv2.deprecated.igrf_dipole_axis(self.dtime)
np.testing.assert_allclose(m, [0.050281,-0.16057,0.98574], rtol=1.0e-4)
|
[
"warnings.simplefilter",
"numpy.testing.assert_almost_equal",
"aacgmv2.deprecated.subsol",
"aacgmv2.deprecated.gc2gd_lat",
"datetime.datetime",
"numpy.array",
"warnings.catch_warnings",
"numpy.testing.assert_allclose",
"aacgmv2.deprecated.igrf_dipole_axis"
] |
[((1307, 1339), 'datetime.datetime', 'dt.datetime', (['(2015)', '(1)', '(1)', '(0)', '(0)', '(0)'], {}), '(2015, 1, 1, 0, 0, 0)\n', (1318, 1339), True, 'import datetime as dt\n'), ((2151, 2183), 'datetime.datetime', 'dt.datetime', (['(2015)', '(1)', '(1)', '(0)', '(0)', '(0)'], {}), '(2015, 1, 1, 0, 0, 0)\n', (2162, 2183), True, 'import datetime as dt\n'), ((2821, 2883), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['self.lon', '(-179.2004)'], {'decimal': '(4)'}), '(self.lon, -179.2004, decimal=4)\n', (2851, 2883), True, 'import numpy as np\n'), ((2892, 2953), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['self.lat', '(-23.0431)'], {'decimal': '(4)'}), '(self.lat, -23.0431, decimal=4)\n', (2922, 2953), True, 'import numpy as np\n'), ((3193, 3253), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['self.lat', '(45.1924)'], {'decimal': '(4)'}), '(self.lat, 45.1924, decimal=4)\n', (3223, 3253), True, 'import numpy as np\n'), ((3535, 3605), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['self.lat', '[45.1924, -45.1924]'], {'rtol': '(0.0001)'}), '(self.lat, [45.1924, -45.1924], rtol=0.0001)\n', (3561, 3605), True, 'import numpy as np\n'), ((3717, 3740), 'numpy.array', 'np.array', (['[45.0, -45.0]'], {}), '([45.0, -45.0])\n', (3725, 3740), True, 'import numpy as np\n'), ((3896, 3966), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['self.lat', '[45.1924, -45.1924]'], {'rtol': '(0.0001)'}), '(self.lat, [45.1924, -45.1924], rtol=0.0001)\n', (3922, 3966), True, 'import numpy as np\n'), ((4214, 4287), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['m', '[0.050281, -0.16057, 0.98574]'], {'rtol': '(0.0001)'}), '(m, [0.050281, -0.16057, 0.98574], rtol=0.0001)\n', (4240, 4287), True, 'import numpy as np\n'), ((2597, 2622), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2620, 2622), False, 'import warnings\n'), ((2636, 2667), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (2657, 2667), False, 'import warnings\n'), ((2701, 2752), 'aacgmv2.deprecated.subsol', 'aacgmv2.deprecated.subsol', (['self.dtime.year', 'doy', 'ut'], {}), '(self.dtime.year, doy, ut)\n', (2726, 2752), False, 'import aacgmv2\n'), ((3055, 3080), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (3078, 3080), False, 'import warnings\n'), ((3094, 3125), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (3115, 3125), False, 'import warnings\n'), ((3149, 3183), 'aacgmv2.deprecated.gc2gd_lat', 'aacgmv2.deprecated.gc2gd_lat', (['(45.0)'], {}), '(45.0)\n', (3177, 3183), False, 'import aacgmv2\n'), ((3393, 3418), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (3416, 3418), False, 'import warnings\n'), ((3432, 3463), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (3453, 3463), False, 'import warnings\n'), ((3487, 3525), 'aacgmv2.deprecated.gc2gd_lat', 'aacgmv2.deprecated.gc2gd_lat', (['self.lat'], {}), '(self.lat)\n', (3515, 3525), False, 'import aacgmv2\n'), ((3754, 3779), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (3777, 3779), False, 'import warnings\n'), ((3793, 3824), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (3814, 3824), False, 'import warnings\n'), ((3848, 3886), 'aacgmv2.deprecated.gc2gd_lat', 'aacgmv2.deprecated.gc2gd_lat', (['self.lat'], {}), '(self.lat)\n', (3876, 3886), False, 'import aacgmv2\n'), ((4070, 4095), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (4093, 4095), False, 'import warnings\n'), ((4109, 4140), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (4130, 4140), False, 'import warnings\n'), ((4157, 4204), 'aacgmv2.deprecated.igrf_dipole_axis', 'aacgmv2.deprecated.igrf_dipole_axis', (['self.dtime'], {}), '(self.dtime)\n', (4192, 4204), False, 'import aacgmv2\n'), ((735, 771), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (758, 771), False, 'import warnings\n'), ((858, 889), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (879, 889), False, 'import warnings\n')]
|
"""
Bilateral Filtering
A bilateral filter is used for smoothening images and reducing noise, while
preserving edges.
"""
import cv2
# read the image
import numpy as np
img = cv2.imread("../images/taj.jpg")
# apply bilateral filter width s = 15
# sigmaColor = sigmaSpace = 75
bilateral = cv2.bilateralFilter(img, 15, 75, 75)
# average filter
average_filter = cv2.blur(img, (5, 5))
# median filter
median_filter = cv2.medianBlur(img, 5)
# gaussian filter
gaussian_filter = cv2.GaussianBlur(img, (5, 5), 0)
# stacking the image side-by-side
res = np.hstack((img, bilateral, average_filter, median_filter, gaussian_filter))
cv2.imshow("image", res)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"cv2.GaussianBlur",
"cv2.medianBlur",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.blur",
"numpy.hstack",
"cv2.bilateralFilter",
"cv2.imread",
"cv2.imshow"
] |
[((191, 222), 'cv2.imread', 'cv2.imread', (['"""../images/taj.jpg"""'], {}), "('../images/taj.jpg')\n", (201, 222), False, 'import cv2\n'), ((305, 341), 'cv2.bilateralFilter', 'cv2.bilateralFilter', (['img', '(15)', '(75)', '(75)'], {}), '(img, 15, 75, 75)\n', (324, 341), False, 'import cv2\n'), ((377, 398), 'cv2.blur', 'cv2.blur', (['img', '(5, 5)'], {}), '(img, (5, 5))\n', (385, 398), False, 'import cv2\n'), ((432, 454), 'cv2.medianBlur', 'cv2.medianBlur', (['img', '(5)'], {}), '(img, 5)\n', (446, 454), False, 'import cv2\n'), ((492, 524), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(5, 5)', '(0)'], {}), '(img, (5, 5), 0)\n', (508, 524), False, 'import cv2\n'), ((566, 641), 'numpy.hstack', 'np.hstack', (['(img, bilateral, average_filter, median_filter, gaussian_filter)'], {}), '((img, bilateral, average_filter, median_filter, gaussian_filter))\n', (575, 641), True, 'import numpy as np\n'), ((643, 667), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'res'], {}), "('image', res)\n", (653, 667), False, 'import cv2\n'), ((668, 682), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (679, 682), False, 'import cv2\n'), ((683, 706), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (704, 706), False, 'import cv2\n')]
|
"""
This script creates an instance of a sacred experiment and defines default configurations.
"""
from src.neural_nets.models import get_model
from src.neural_nets.load_data import get_loader
from src.neural_nets.metrics import MaskedBCE, Accuracy, compute_acc, compute_loss
import src.regression.logistic_regression as reg
import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchsso.optim as soptim
import torch.nn.functional as F
import random
from torch.utils.data import DataLoader
from sacred import Experiment
from torch import Tensor, device
from copy import deepcopy
from time import sleep
from tqdm import tqdm
from typing import List
from itertools import product
# create a new sacred experiment whose name is an integer
ex = Experiment(name=str(random.randint(0, 1000000)))
# default configurations
@ex.config
def cfg():
# system
cuda = torch.cuda.is_available()
gpu = 0
base_dir = os.getcwd()
# supported datasets
# JSB_Chorales (short)
# Nottingham (medium)
# Piano_midi (long)
# MuseData (extra long)
dataset = "JSB_Chorales"
# training
num_epochs = 150
batch_size = 128
# mask some low notes and some high notes because they never show up
low_off_notes = 0
high_off_notes = 88
lr = 0.001
decay = 1.0
optmzr = "SGD"
regularization = 0.0
# hyperparameter search
do_hpsearch = False
learning_rates = 10**np.linspace(-2, -4, 5)
decays = 1 - np.linspace(0, 0.1, num=5)
regularizations = 10**np.linspace(-2, -4, num=5)
hps_epochs = 50
# Supported architectures
# LINEAR (LDS)
# REGRESSION (regress next note based on last note)
# REGRESSION_8_STEP (regress next note based on last 8 notes)
architecture = 'LDS'
readout = 'linear'
gradient_clipping = 1
jit = False # not fully implemented
# for regression
lag = 1
window = 1
# for neural networks
input_size = 88
hidden_size = 300
num_layers = 1
output_size = 88
# see models.py and initialization.py for details
init = 'default'
scale = 1.0
parity = None # see models.py
t_distrib = torch.distributions.Uniform(0, 0.75)
path = 'results/77/final_state_dict.pt'
# when to save state dictionaries
save_init_model = True
save_final_model = True
save_every_epoch = False
# detect backprop anomalies
detect_anomaly = False
# give all random number generators the same seed
def _seed_all(_seed) -> None:
torch.manual_seed(_seed)
np.random.seed(_seed)
random.seed(_seed)
# this context is used when we are running things on the cpu
class NullContext(object):
def __init__(self):
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
# this function simply trains regression models and logs the results
# see regression.trainer for details
@ex.capture
def sklearn_experiment(dataset: str,
save_dir: str,
num_epochs: int,
high_off_notes: int,
low_off_notes: int,
lag: int,
window: int,
_seed,
_log,
_run):
"""
:param dataset: name of the dataset to be used
:save_dir: temporary directory where artifacts are being stored
:lag: how many time steps into the future the regression model is to predict
:window: how many time steps the regression model is to take into account
:param _seed: sacred random seed
:param _log: sacred object used to output to the command line
:param _run: sacred object used to monitor the runtime
"""
num_notes = high_off_notes - low_off_notes
models = reg.train_models(dataset,
num_epochs,
low_off_notes,
high_off_notes,
_seed,
lag=lag,
window=window)
coefs = np.zeros((num_notes, num_notes*window))
intercepts = np.zeros(num_notes*window)
for i in range(num_notes):
model = models[i]
# if there were no notes played for this channel, a model won't be trained
# simply save all parameters as -1 to discourage the note from being played
if model == None:
coefs[i] = -1
intercepts[i] = -1
else:
coefs[i] = model.coef_
intercepts[i] = model.intercept_
np.save(save_dir + 'coefs.npy', coefs)
np.save(save_dir + 'intercepts.npy', intercepts)
_run.add_artifact(save_dir + 'coefs.npy')
_run.add_artifact(save_dir + 'intercepts.npy')
train_loss = reg.compute_loss(models,
dataset,
'traindata',
low_off_notes,
high_off_notes,
lag=lag,
window=window)
test_loss = reg.compute_loss(models,
dataset,
'testdata',
low_off_notes,
high_off_notes,
lag=lag,
window=window)
valid_loss = reg.compute_loss(models,
dataset,
'validdata',
low_off_notes,
high_off_notes,
lag=lag,
window=window)
_run.log_scalar('trainLoss', train_loss)
_run.log_scalar('testLoss', test_loss)
_run.log_scalar('validLoss', valid_loss)
train_acc = reg.compute_accuracy(models,
dataset,
'traindata',
low_off_notes,
high_off_notes,
lag=lag,
window=window)
test_acc = reg.compute_accuracy(models,
dataset,
'testdata',
low_off_notes,
high_off_notes,
lag=lag,
window=window)
valid_acc = reg.compute_accuracy(models,
dataset,
'validdata',
low_off_notes,
high_off_notes,
lag=lag,
window=window)
_run.log_scalar('trainAccuracy', train_acc)
_run.log_scalar('testAccuracy', test_acc)
_run.log_scalar('validAccuracy', valid_acc)
# a single optimization step
@ex.capture
def train_iter(device: device,
cuda_device: torch.cuda.device,
input_tensor: Tensor,
target: Tensor,
mask: Tensor,
model: nn.Module,
loss_fcn: nn.Module,
optimizer: optim.Optimizer,
save_every_epoch: bool,
save_dir: str,
train_loader: DataLoader,
test_loader: DataLoader,
valid_loader: DataLoader,
low_off_notes: int,
high_off_notes: int,
_log,
_run,
logging=True):
input_tensor = input_tensor.to(device)
# number of songs in this batch
N = input_tensor.shape[0]
output, hidden_tensors = model(input_tensor)
loss = loss_fcn(output, target, mask, model)/N
optimizer.zero_grad()
loss.backward()
optimizer.step()
# use sacred to log training loss and accuracy
if logging:
train_acc = compute_acc(model, train_loader, low=low_off_notes, high=high_off_notes)
_run.log_scalar("trainLoss", loss.cpu().detach().item())
_run.log_scalar("trainAccuracy", train_acc)
# save a copy of the model and make sacred remember it each epoch
if save_every_epoch and logging:
sd = deepcopy(model.state_dict())
torch.save(init_sd, save_dir + 'state_dict_' + str(epoch) + '.pt')
_run.add_artifact(save_dir + 'state_dict_' + str(epoch) + '.pt')
# train a neural network
# returns the final loss and accuracy on the training, testing, and validation sets
@ex.capture
def pytorch_train_loop(cuda: bool,
model_dict: dict,
initializer: dict,
train_loader: DataLoader,
test_loader: DataLoader,
valid_loader: DataLoader,
low_off_notes: int,
high_off_notes: int,
optmzr: str,
lr: float,
decay: float,
regularization: float,
num_epochs: int,
save_dir: str,
save_init_model,
save_every_epoch,
save_final_model,
_seed,
_log,
_run,
logging=True):
# construct and initialize the model
model = get_model(model_dict, initializer, cuda)
# save a copy of the initial model and make sacred remember it
if save_init_model and logging:
init_sd = deepcopy(model.state_dict())
torch.save(init_sd, save_dir + 'initial_state_dict.pt')
_run.add_artifact(save_dir + 'initial_state_dict.pt')
# if we are on cuda we construct the device and run everything on it
cuda_device = NullContext()
device = torch.device('cpu')
if cuda:
dev_name = 'cuda:' + str(gpu)
cuda_device = torch.cuda.device(dev_name)
device = torch.device(dev_name)
model = model.to(device)
with cuda_device:
# see metrics.py
loss_fcn = MaskedBCE(regularization, low_off_notes=low_off_notes, high_off_notes=high_off_notes)
# compute the metrics before training and log them
if logging:
train_loss = compute_loss(loss_fcn, model, train_loader)
test_loss = compute_loss(loss_fcn, model, test_loader)
val_loss = compute_loss(loss_fcn, model, valid_loader)
_run.log_scalar("trainLoss", train_loss)
_run.log_scalar("testLoss", test_loss)
_run.log_scalar("validLoss", val_loss)
train_acc = compute_acc(model, train_loader, low=low_off_notes, high=high_off_notes)
test_acc = compute_acc(model, test_loader, low=low_off_notes, high=high_off_notes)
val_acc = compute_acc(model, valid_loader, low=low_off_notes, high=high_off_notes)
_run.log_scalar("trainAccuracy", train_acc)
_run.log_scalar("testAccuracy", test_acc)
_run.log_scalar("validAccuracy", val_acc)
# construct the optimizer
optimizer = None
if optmzr == "SGD":
optimizer = optim.SGD(model.parameters(), lr=lr)
elif optmzr == "Adam":
optimizer = optim.Adam(model.parameters(), lr=lr)
elif optmzr == "RMSprop":
optimizer = optim.RMSprop(model.parameters(), lr=lr)
else:
raise ValueError("Optimizer {} not recognized.".format(optmzr))
# learning rate decay
scheduler = None
scheduler = optim.lr_scheduler.LambdaLR(optimizer, lambda epoch: decay**epoch)
# begin training loop
for epoch in tqdm(range(num_epochs)):
for input_tensor, target, mask in train_loader:
train_iter(device,
cuda_device,
input_tensor,
target,
mask,
model,
loss_fcn,
optimizer,
save_every_epoch,
save_dir,
train_loader,
test_loader,
valid_loader,
low_off_notes,
high_off_notes,
_log,
_run,
logging=logging)
# learning rate decay
scheduler.step()
# use sacred to log testing and validation loss and accuracy
if logging:
test_loss = compute_loss(loss_fcn, model, test_loader)
val_loss = compute_loss(loss_fcn, model, valid_loader)
test_acc = compute_acc(model, test_loader, low=low_off_notes, high=high_off_notes)
val_acc = compute_acc(model, valid_loader, low=low_off_notes, high=high_off_notes)
_run.log_scalar("testLoss", test_loss)
_run.log_scalar("validLoss", val_loss)
_run.log_scalar("testAccuracy", test_acc)
_run.log_scalar("validAccuracy", val_acc)
# save a copy of the trained model and make sacred remember it
if save_final_model and logging:
fin_sd = deepcopy(model.state_dict())
torch.save(fin_sd, save_dir + 'final_state_dict.pt')
_run.add_artifact(save_dir + 'final_state_dict.pt')
# recompute the metrics so that this function can return them
train_loss = compute_loss(loss_fcn, model, train_loader)
test_loss = compute_loss(loss_fcn, model, test_loader)
val_loss = compute_loss(loss_fcn, model, valid_loader)
train_acc = compute_acc(model, train_loader, low=low_off_notes, high=high_off_notes)
test_acc = compute_acc(model, test_loader, low=low_off_notes, high=high_off_notes)
val_acc = compute_acc(model, valid_loader, low=low_off_notes, high=high_off_notes)
return ((train_loss, test_loss, val_loss), (train_acc, test_acc, val_acc))
# main function
@ex.automain
def train_loop(cuda,
gpu,
base_dir,
dataset,
num_epochs,
batch_size,
low_off_notes,
high_off_notes,
lr,
decay,
optmzr,
regularization,
do_hpsearch,
learning_rates,
decays,
regularizations,
hps_epochs,
architecture,
readout,
gradient_clipping,
jit,
lag,
window,
input_size,
hidden_size,
num_layers,
output_size,
detect_anomaly,
init,
scale,
parity,
t_distrib,
path,
save_init_model,
save_final_model,
save_every_epoch,
_seed,
_log,
_run):
# save artifacts to a temporary directory that gets erased when the experiment is over
save_dir = base_dir + '/tmp_' + str(_seed)
os.system('mkdir ' + save_dir)
save_dir += '/'
# give all random number generators the same seed
_seed_all(_seed)
sklearn_program = architecture == 'REGRESSION'
# regression models and neural networks are trained very differently
if sklearn_program:
sklearn_experiment(dataset,
save_dir,
num_epochs,
high_off_notes,
low_off_notes,
lag,
window,
_seed,
_log,
_run)
# run a pytorch program
else:
model_dict = {'architecture': architecture,
'readout': readout,
'gradient_clipping': gradient_clipping,
'jit': jit,
'lag': lag,
'window': window,
'input_size': input_size,
'hidden_size': hidden_size,
'num_layers': num_layers,
'output_size': output_size
}
initializer = {'init': init,
'scale': scale,
'parity': parity,
't_distrib': t_distrib,
'path': path
}
# if we are debugging we may want to detect autograd anomalies
torch.autograd.set_detect_anomaly(detect_anomaly)
# construct the pytorch data loaders
train_loader, test_loader, valid_loader = get_loader(dataset, batch_size)
# standard training loop
if not do_hpsearch:
# the training loop function returns the metrics achieved at the end of training
# they will be logged by default, no need to do anything with them here
metrics = pytorch_train_loop(cuda,
model_dict,
initializer,
train_loader,
test_loader,
valid_loader,
low_off_notes,
high_off_notes,
optmzr,
lr,
decay,
regularization,
num_epochs,
save_dir,
save_init_model,
save_every_epoch,
save_final_model,
_seed,
_log,
_run)
# only goal here is to find the best hyper parameters
else:
min_test_loss = float('inf')
best_lr = 0
best_dcay = 0
best_reg = 0
hyperparams = product(learning_rates, decays, regularizations)
for rate, dcay, reg in hyperparams:
# train a model with the given hyperparameters
# don't log anything, otherwise we will have a ridiculous amount of extraneous info
metrics = pytorch_train_loop(cuda,
model_dict,
initializer,
train_loader,
test_loader,
valid_loader,
optmzr,
rate,
dcay,
reg,
hps_epochs,
save_dir,
save_init_model,
save_every_epoch,
save_final_model,
_seed,
_log,
_run,
logging=False)
# loss is first index, test set is second index
test_loss = metrics[0][1]
# compare loss against other hyperparams and update if necessary
if test_loss == test_loss and test_loss < min_test_loss:
min_test_loss = test_loss
best_lr = rate
best_dcay = dcay
best_reg = reg
# record the best hyperparameters
_run.log_scalar("learning_rate", best_lr)
_run.log_scalar("decay", best_dcay)
_run.log_scalar("regularization", best_reg)
# wait a second then remove the temporary directory used for storing artifacts
sleep(1)
os.system('rm -r ' + save_dir)
|
[
"numpy.random.seed",
"src.neural_nets.load_data.get_loader",
"src.neural_nets.metrics.compute_loss",
"src.neural_nets.models.get_model",
"torch.optim.lr_scheduler.LambdaLR",
"torch.autograd.set_detect_anomaly",
"torch.device",
"src.neural_nets.metrics.MaskedBCE",
"random.randint",
"src.regression.logistic_regression.train_models",
"src.neural_nets.metrics.compute_acc",
"random.seed",
"numpy.linspace",
"itertools.product",
"src.regression.logistic_regression.compute_loss",
"numpy.save",
"src.regression.logistic_regression.compute_accuracy",
"torch.manual_seed",
"os.system",
"time.sleep",
"torch.distributions.Uniform",
"torch.cuda.is_available",
"torch.cuda.device",
"os.getcwd",
"numpy.zeros",
"torch.save"
] |
[((916, 941), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (939, 941), False, 'import torch\n'), ((969, 980), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (978, 980), False, 'import os\n'), ((2195, 2231), 'torch.distributions.Uniform', 'torch.distributions.Uniform', (['(0)', '(0.75)'], {}), '(0, 0.75)\n', (2222, 2231), False, 'import torch\n'), ((2545, 2569), 'torch.manual_seed', 'torch.manual_seed', (['_seed'], {}), '(_seed)\n', (2562, 2569), False, 'import torch\n'), ((2574, 2595), 'numpy.random.seed', 'np.random.seed', (['_seed'], {}), '(_seed)\n', (2588, 2595), True, 'import numpy as np\n'), ((2600, 2618), 'random.seed', 'random.seed', (['_seed'], {}), '(_seed)\n', (2611, 2618), False, 'import random\n'), ((3843, 3946), 'src.regression.logistic_regression.train_models', 'reg.train_models', (['dataset', 'num_epochs', 'low_off_notes', 'high_off_notes', '_seed'], {'lag': 'lag', 'window': 'window'}), '(dataset, num_epochs, low_off_notes, high_off_notes, _seed,\n lag=lag, window=window)\n', (3859, 3946), True, 'import src.regression.logistic_regression as reg\n'), ((4136, 4177), 'numpy.zeros', 'np.zeros', (['(num_notes, num_notes * window)'], {}), '((num_notes, num_notes * window))\n', (4144, 4177), True, 'import numpy as np\n'), ((4193, 4221), 'numpy.zeros', 'np.zeros', (['(num_notes * window)'], {}), '(num_notes * window)\n', (4201, 4221), True, 'import numpy as np\n'), ((4630, 4668), 'numpy.save', 'np.save', (["(save_dir + 'coefs.npy')", 'coefs'], {}), "(save_dir + 'coefs.npy', coefs)\n", (4637, 4668), True, 'import numpy as np\n'), ((4673, 4721), 'numpy.save', 'np.save', (["(save_dir + 'intercepts.npy')", 'intercepts'], {}), "(save_dir + 'intercepts.npy', intercepts)\n", (4680, 4721), True, 'import numpy as np\n'), ((4838, 4943), 'src.regression.logistic_regression.compute_loss', 'reg.compute_loss', (['models', 'dataset', '"""traindata"""', 'low_off_notes', 'high_off_notes'], {'lag': 'lag', 'window': 'window'}), "(models, dataset, 'traindata', low_off_notes,\n high_off_notes, lag=lag, window=window)\n", (4854, 4943), True, 'import src.regression.logistic_regression as reg\n'), ((5160, 5264), 'src.regression.logistic_regression.compute_loss', 'reg.compute_loss', (['models', 'dataset', '"""testdata"""', 'low_off_notes', 'high_off_notes'], {'lag': 'lag', 'window': 'window'}), "(models, dataset, 'testdata', low_off_notes, high_off_notes,\n lag=lag, window=window)\n", (5176, 5264), True, 'import src.regression.logistic_regression as reg\n'), ((5476, 5581), 'src.regression.logistic_regression.compute_loss', 'reg.compute_loss', (['models', 'dataset', '"""validdata"""', 'low_off_notes', 'high_off_notes'], {'lag': 'lag', 'window': 'window'}), "(models, dataset, 'validdata', low_off_notes,\n high_off_notes, lag=lag, window=window)\n", (5492, 5581), True, 'import src.regression.logistic_regression as reg\n'), ((5933, 6042), 'src.regression.logistic_regression.compute_accuracy', 'reg.compute_accuracy', (['models', 'dataset', '"""traindata"""', 'low_off_notes', 'high_off_notes'], {'lag': 'lag', 'window': 'window'}), "(models, dataset, 'traindata', low_off_notes,\n high_off_notes, lag=lag, window=window)\n", (5953, 6042), True, 'import src.regression.logistic_regression as reg\n'), ((6276, 6384), 'src.regression.logistic_regression.compute_accuracy', 'reg.compute_accuracy', (['models', 'dataset', '"""testdata"""', 'low_off_notes', 'high_off_notes'], {'lag': 'lag', 'window': 'window'}), "(models, dataset, 'testdata', low_off_notes,\n high_off_notes, lag=lag, window=window)\n", (6296, 6384), True, 'import src.regression.logistic_regression as reg\n'), ((6613, 6722), 'src.regression.logistic_regression.compute_accuracy', 'reg.compute_accuracy', (['models', 'dataset', '"""validdata"""', 'low_off_notes', 'high_off_notes'], {'lag': 'lag', 'window': 'window'}), "(models, dataset, 'validdata', low_off_notes,\n high_off_notes, lag=lag, window=window)\n", (6633, 6722), True, 'import src.regression.logistic_regression as reg\n'), ((9611, 9651), 'src.neural_nets.models.get_model', 'get_model', (['model_dict', 'initializer', 'cuda'], {}), '(model_dict, initializer, cuda)\n', (9620, 9651), False, 'from src.neural_nets.models import get_model\n'), ((10048, 10067), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (10060, 10067), False, 'import torch\n'), ((13799, 13842), 'src.neural_nets.metrics.compute_loss', 'compute_loss', (['loss_fcn', 'model', 'train_loader'], {}), '(loss_fcn, model, train_loader)\n', (13811, 13842), False, 'from src.neural_nets.metrics import MaskedBCE, Accuracy, compute_acc, compute_loss\n'), ((13859, 13901), 'src.neural_nets.metrics.compute_loss', 'compute_loss', (['loss_fcn', 'model', 'test_loader'], {}), '(loss_fcn, model, test_loader)\n', (13871, 13901), False, 'from src.neural_nets.metrics import MaskedBCE, Accuracy, compute_acc, compute_loss\n'), ((13917, 13960), 'src.neural_nets.metrics.compute_loss', 'compute_loss', (['loss_fcn', 'model', 'valid_loader'], {}), '(loss_fcn, model, valid_loader)\n', (13929, 13960), False, 'from src.neural_nets.metrics import MaskedBCE, Accuracy, compute_acc, compute_loss\n'), ((13978, 14050), 'src.neural_nets.metrics.compute_acc', 'compute_acc', (['model', 'train_loader'], {'low': 'low_off_notes', 'high': 'high_off_notes'}), '(model, train_loader, low=low_off_notes, high=high_off_notes)\n', (13989, 14050), False, 'from src.neural_nets.metrics import MaskedBCE, Accuracy, compute_acc, compute_loss\n'), ((14066, 14137), 'src.neural_nets.metrics.compute_acc', 'compute_acc', (['model', 'test_loader'], {'low': 'low_off_notes', 'high': 'high_off_notes'}), '(model, test_loader, low=low_off_notes, high=high_off_notes)\n', (14077, 14137), False, 'from src.neural_nets.metrics import MaskedBCE, Accuracy, compute_acc, compute_loss\n'), ((14152, 14224), 'src.neural_nets.metrics.compute_acc', 'compute_acc', (['model', 'valid_loader'], {'low': 'low_off_notes', 'high': 'high_off_notes'}), '(model, valid_loader, low=low_off_notes, high=high_off_notes)\n', (14163, 14224), False, 'from src.neural_nets.metrics import MaskedBCE, Accuracy, compute_acc, compute_loss\n'), ((15487, 15517), 'os.system', 'os.system', (["('mkdir ' + save_dir)"], {}), "('mkdir ' + save_dir)\n", (15496, 15517), False, 'import os\n'), ((20678, 20686), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (20683, 20686), False, 'from time import sleep\n'), ((20691, 20721), 'os.system', 'os.system', (["('rm -r ' + save_dir)"], {}), "('rm -r ' + save_dir)\n", (20700, 20721), False, 'import os\n'), ((1471, 1493), 'numpy.linspace', 'np.linspace', (['(-2)', '(-4)', '(5)'], {}), '(-2, -4, 5)\n', (1482, 1493), True, 'import numpy as np\n'), ((1511, 1537), 'numpy.linspace', 'np.linspace', (['(0)', '(0.1)'], {'num': '(5)'}), '(0, 0.1, num=5)\n', (1522, 1537), True, 'import numpy as np\n'), ((1564, 1590), 'numpy.linspace', 'np.linspace', (['(-2)', '(-4)'], {'num': '(5)'}), '(-2, -4, num=5)\n', (1575, 1590), True, 'import numpy as np\n'), ((8116, 8188), 'src.neural_nets.metrics.compute_acc', 'compute_acc', (['model', 'train_loader'], {'low': 'low_off_notes', 'high': 'high_off_notes'}), '(model, train_loader, low=low_off_notes, high=high_off_notes)\n', (8127, 8188), False, 'from src.neural_nets.metrics import MaskedBCE, Accuracy, compute_acc, compute_loss\n'), ((9811, 9866), 'torch.save', 'torch.save', (['init_sd', "(save_dir + 'initial_state_dict.pt')"], {}), "(init_sd, save_dir + 'initial_state_dict.pt')\n", (9821, 9866), False, 'import torch\n'), ((10141, 10168), 'torch.cuda.device', 'torch.cuda.device', (['dev_name'], {}), '(dev_name)\n', (10158, 10168), False, 'import torch\n'), ((10186, 10208), 'torch.device', 'torch.device', (['dev_name'], {}), '(dev_name)\n', (10198, 10208), False, 'import torch\n'), ((10310, 10400), 'src.neural_nets.metrics.MaskedBCE', 'MaskedBCE', (['regularization'], {'low_off_notes': 'low_off_notes', 'high_off_notes': 'high_off_notes'}), '(regularization, low_off_notes=low_off_notes, high_off_notes=\n high_off_notes)\n', (10319, 10400), False, 'from src.neural_nets.metrics import MaskedBCE, Accuracy, compute_acc, compute_loss\n'), ((11796, 11864), 'torch.optim.lr_scheduler.LambdaLR', 'optim.lr_scheduler.LambdaLR', (['optimizer', '(lambda epoch: decay ** epoch)'], {}), '(optimizer, lambda epoch: decay ** epoch)\n', (11823, 11864), True, 'import torch.optim as optim\n'), ((16956, 17005), 'torch.autograd.set_detect_anomaly', 'torch.autograd.set_detect_anomaly', (['detect_anomaly'], {}), '(detect_anomaly)\n', (16989, 17005), False, 'import torch\n'), ((17102, 17133), 'src.neural_nets.load_data.get_loader', 'get_loader', (['dataset', 'batch_size'], {}), '(dataset, batch_size)\n', (17112, 17133), False, 'from src.neural_nets.load_data import get_loader\n'), ((813, 839), 'random.randint', 'random.randint', (['(0)', '(1000000)'], {}), '(0, 1000000)\n', (827, 839), False, 'import random\n'), ((10502, 10545), 'src.neural_nets.metrics.compute_loss', 'compute_loss', (['loss_fcn', 'model', 'train_loader'], {}), '(loss_fcn, model, train_loader)\n', (10514, 10545), False, 'from src.neural_nets.metrics import MaskedBCE, Accuracy, compute_acc, compute_loss\n'), ((10570, 10612), 'src.neural_nets.metrics.compute_loss', 'compute_loss', (['loss_fcn', 'model', 'test_loader'], {}), '(loss_fcn, model, test_loader)\n', (10582, 10612), False, 'from src.neural_nets.metrics import MaskedBCE, Accuracy, compute_acc, compute_loss\n'), ((10636, 10679), 'src.neural_nets.metrics.compute_loss', 'compute_loss', (['loss_fcn', 'model', 'valid_loader'], {}), '(loss_fcn, model, valid_loader)\n', (10648, 10679), False, 'from src.neural_nets.metrics import MaskedBCE, Accuracy, compute_acc, compute_loss\n'), ((10861, 10933), 'src.neural_nets.metrics.compute_acc', 'compute_acc', (['model', 'train_loader'], {'low': 'low_off_notes', 'high': 'high_off_notes'}), '(model, train_loader, low=low_off_notes, high=high_off_notes)\n', (10872, 10933), False, 'from src.neural_nets.metrics import MaskedBCE, Accuracy, compute_acc, compute_loss\n'), ((10957, 11028), 'src.neural_nets.metrics.compute_acc', 'compute_acc', (['model', 'test_loader'], {'low': 'low_off_notes', 'high': 'high_off_notes'}), '(model, test_loader, low=low_off_notes, high=high_off_notes)\n', (10968, 11028), False, 'from src.neural_nets.metrics import MaskedBCE, Accuracy, compute_acc, compute_loss\n'), ((11051, 11123), 'src.neural_nets.metrics.compute_acc', 'compute_acc', (['model', 'valid_loader'], {'low': 'low_off_notes', 'high': 'high_off_notes'}), '(model, valid_loader, low=low_off_notes, high=high_off_notes)\n', (11062, 11123), False, 'from src.neural_nets.metrics import MaskedBCE, Accuracy, compute_acc, compute_loss\n'), ((13598, 13650), 'torch.save', 'torch.save', (['fin_sd', "(save_dir + 'final_state_dict.pt')"], {}), "(fin_sd, save_dir + 'final_state_dict.pt')\n", (13608, 13650), False, 'import torch\n'), ((18647, 18695), 'itertools.product', 'product', (['learning_rates', 'decays', 'regularizations'], {}), '(learning_rates, decays, regularizations)\n', (18654, 18695), False, 'from itertools import product\n'), ((12884, 12926), 'src.neural_nets.metrics.compute_loss', 'compute_loss', (['loss_fcn', 'model', 'test_loader'], {}), '(loss_fcn, model, test_loader)\n', (12896, 12926), False, 'from src.neural_nets.metrics import MaskedBCE, Accuracy, compute_acc, compute_loss\n'), ((12954, 12997), 'src.neural_nets.metrics.compute_loss', 'compute_loss', (['loss_fcn', 'model', 'valid_loader'], {}), '(loss_fcn, model, valid_loader)\n', (12966, 12997), False, 'from src.neural_nets.metrics import MaskedBCE, Accuracy, compute_acc, compute_loss\n'), ((13025, 13096), 'src.neural_nets.metrics.compute_acc', 'compute_acc', (['model', 'test_loader'], {'low': 'low_off_notes', 'high': 'high_off_notes'}), '(model, test_loader, low=low_off_notes, high=high_off_notes)\n', (13036, 13096), False, 'from src.neural_nets.metrics import MaskedBCE, Accuracy, compute_acc, compute_loss\n'), ((13123, 13195), 'src.neural_nets.metrics.compute_acc', 'compute_acc', (['model', 'valid_loader'], {'low': 'low_off_notes', 'high': 'high_off_notes'}), '(model, valid_loader, low=low_off_notes, high=high_off_notes)\n', (13134, 13195), False, 'from src.neural_nets.metrics import MaskedBCE, Accuracy, compute_acc, compute_loss\n')]
|
#!/usr/bin/env python
"""polymer.py - prototype bond breaking reactions:
Uses hydrogels to configure the following system
2 reactions:
-A-A-A- + E -> -A-B-A- + E (spatial) r=2.0, k=1.0
{ (structural) k=10000...
-A-B-A- -> -A + A-A-
A-B -> C + C
}
2 particle_types:
E (enzyme)
C (released)
2 topology_particle_types:
A (monomer)
B (unbonded)
1 topology_type
molecule
2 potential_types
harmonic repulsion (pair; all) r0=1.0, k=2.0
harmonic bonding (bond; A-A A-B) r0=1.0, k=5.0
"""
from pathlib import Path
from typing import List, Union
import numpy as np
import readdy
import pandas as pd
import matplotlib.pyplot as plt
import yaml
from softnanotools.logger import Logger
logger = Logger('POLYMER')
from hydrogels.utils.system import System
from hydrogels.utils.topology import Topology, TopologyBond
DEFAULT_DICTIONARY = {
'A': 1.0,
'B': 1.0,
'C': 1.0,
'E': 1.0,
}
def register_bonding(
system: System,
monomer: str = 'A',
unbonded: str = 'B',
length: float = 1.0,
force_constant: float = 2.5,
):
bond = TopologyBond(
'harmonic',
monomer,
monomer,
length=length,
force_constant=force_constant
)
bond.register(system)
bond = TopologyBond(
'harmonic',
monomer,
unbonded,
length=length,
force_constant=0.0
)
bond.register(system)
bond = TopologyBond(
'harmonic',
unbonded,
unbonded,
length=length,
force_constant=0.0
)
bond.register(system)
return
def register_potentials(system: System, spring_constant=2.5, spring_length=1.0):
for pair in [
['A', 'A'],
['A', 'B'],
['A', 'E'],
['A', 'C'],
['B', 'B'],
['B', 'E'],
['B', 'C'],
['E', 'E'],
['E', 'C'],
['C', 'C'],
]:
system.potentials.add_harmonic_repulsion(
*pair,
force_constant=spring_constant,
interaction_distance=spring_length
)
return
def create_topologies(
N: int,
top_type: str = 'molecule',
monomer: str = 'A',
**kwargs
) -> List[Topology]:
result = []
for i in range(N):
x, y, z = np.random.random(3) * 12.5
positions = np.array([
[x, y, z],
[x+1.0, y, z],
[x+1.0, y+1.0, z],
[x, y+1.0, z],
[x, y, z+1.0],
[x+1.0, y, z+1.0],
[x+1.0, y+1.0, z+1.0],
[x, y+1.0, z+1.0],
])
molecule = Topology(
top_type,
sequence=[monomer] * 8,
edges=[
(0, 1),
(1, 2),
(2, 3),
(3, 0),
(4, 5),
(5, 6),
(6, 7),
(7, 4),
(0, 4),
(1, 5),
(2, 6),
(3, 7),
],
positions=positions,
)
result.append(molecule)
return result
def create_system(
box: float = 25.0,
diffusion_dictionary: dict = DEFAULT_DICTIONARY,
reaction_radius: float = 1.0,
reaction_rate: float = 1.0,
**kwargs,
):
system = System([box, box, box], units=None)
# register species
system.add_species('C', diffusion_dictionary['C'])
system.add_species('E', diffusion_dictionary['E'])
# register topology species
system.add_topology_species('B', diffusion_dictionary['B'])
system.add_topology_species('A', diffusion_dictionary['A'])
system.topologies.add_type('molecule')
# register bonding
register_bonding(system)
# add potentials
register_potentials(system)
# register enzymatic reaction
system.topologies.add_spatial_reaction(
f'reaction: molecule(A) + (E) -> molecule(B) + (E)',
rate=reaction_rate,
radius=reaction_radius,
)
def reaction_function(topology):
recipe = readdy.StructuralReactionRecipe(topology)
# it is possible for there to be a lone particle in a topology
# when reactions happen very quickly, this step ensures that
# these are converted to C particles which are not topology-bound
vertices = topology.get_graph().get_vertices()
if len(vertices) == 1:
recipe.separate_vertex(0)
recipe.change_particle_type(vertices[0], 'C')
logger.debug('Structural 1')
# register A-B -> C + C reaction
elif len(vertices) == 2:
types = [topology.particle_type_of_vertex(v) for v in vertices]
if 'B' in types:
recipe.separate_vertex(0)
recipe.change_particle_type(vertices[0], 'C')
recipe.change_particle_type(vertices[1], 'C')
logger.debug('Structural 2')
# register -A-B-A- -> -A + A-A-
else:
# insert reaction
edges = topology.get_graph().get_edges()
for edge in edges:
if topology.particle_type_of_vertex(edge[0]) == 'B':
recipe.remove_edge(edge[0], edge[1])
recipe.change_particle_type(edge[0], 'A')
logger.debug('Structural 3A')
return recipe
elif topology.particle_type_of_vertex(edge[1]) == 'B':
recipe.remove_edge(edge[0], edge[1])
recipe.change_particle_type(edge[1], 'A')
logger.debug('Structural 3B')
return recipe
return recipe
system.topologies.add_structural_reaction(
name="BondBreaking",
topology_type="molecule",
reaction_function=reaction_function,
rate_function=lambda x: 10000.,
)
return system
def run_simulation(
name: str,
stride: int = 100,
timestep: float = 0.01,
length: int = 10000,
**kwargs
) -> Path:
# run equilibration
logger.info('Running equilibration...')
# insert code here
system = create_system(**kwargs)#, reaction=False)
simulation = system.simulation()
box = kwargs['box']
simulation.add_particles(
'E',
np.random.rand(kwargs['enzymes'], 3) * box - (box / 2)
)
# add topologies
topologies = create_topologies(kwargs['molecules'], box=box)
for topology in topologies:
topology.add_to_sim(simulation)
#output = Path(f'{name}.h5')
#if output.exists():
# output.unlink()
#simulation.output_file = str(output.absolute())
#simulation.make_checkpoints(
# stride=stride,
# output_directory="checkpoints/",
# max_n_saves=1
#)
#simulation.evaluate_topology_reactions = False
#simulation.observe.particles(stride)
#simulation.observe.topologies(stride)
#simulation.record_trajectory(stride)
#simulation.run(5 * stride, timestep)
#logger.info('Done!')
# run proper simulaton
logger.info('Configuring simulation...')
#system = create_system(**kwargs)
output = Path(f'{name}.h5')
if output.exists():
output.unlink()
simulation.output_file = str(output.absolute())
#simulation = system.simulation(output_file=str(output.absolute()))
# skip adding particles since these will be loaded
# add_particles(simulation, **kwargs)
#simulation.load_particles_from_latest_checkpoint(
# 'checkpoints/'
#)
#logger.info('Loaded particles successfully from checkpoint')#
#output = Path(f'{name}.h5')
#if output.exists():
# output.unlink()
#simulation = system.simulation(output_file=str(output.absolute()))
# include observables
simulation.observe.particles(stride)
simulation.observe.topologies(stride)
simulation.record_trajectory(stride)
simulation.reaction_handler = 'Gillespie'
logger.info(f'Running simulation {name}...')
simulation.run(length, timestep)
logger.info('Done!')
return output
def analyse_trajectory(
fname: Union[str, Path],
output: Union[str, Path, None] = None,
timestep: float = 0.01,
) -> pd.DataFrame:
logger.info('Analysing trajectory...')
fname = Path(fname).absolute()
trajectory = readdy.Trajectory(str(fname))
particle_types = trajectory.particle_types
particles = trajectory.read_observable_particles()
numbers = {
't': particles[0] * timestep,
'A': [],
'B': [],
'E': [],
'C': [],
}
for row in particles[1]:
numbers['A'].append(len(row[row == particle_types['A']]))
numbers['E'].append(len(row[row == particle_types['E']]))
numbers['B'].append(len(row[row == particle_types['B']]))
numbers['C'].append(len(row[row == particle_types['C']]))
results = pd.DataFrame(numbers)
if output:
results.to_csv(output, index=False)
return results
def gather_results(targets: List[Path]) -> pd.DataFrame:
results = pd.DataFrame()
dfs = {
'A': pd.DataFrame(),
'E': pd.DataFrame(),
'B': pd.DataFrame(),
'C': pd.DataFrame()
}
for i, target in enumerate(targets):
data = pd.read_csv(target)
if i == 0:
results['t'] = data['t']
for kind in dfs:
dfs[kind][i] = data[kind]
for kind in dfs.keys():
results[f'{kind}_mean'] = dfs[kind].mean(axis=1)
results[f'{kind}_std'] = dfs[kind].std(axis=1)
return results
def plot_final(data: pd.DataFrame, name: str = 'polymer'):
fig, ax = plt.subplots()
params = dict(
markevery=len(data) // 30 if len(data) > 50 else 5,
errorevery=len(data) // 30 if len(data) > 50 else 5,
capsize=2
)
ax.errorbar(
data['t'],
data['A_mean'],
yerr=data['A_std'],
fmt='bx-',
label='A',
**params
)
ax.errorbar(
data['t'],
data['B_mean'],
yerr=data['B_std'],
fmt='ro-',
label='B',
**params
)
ax.errorbar(
data['t'],
data['C_mean'],
yerr=data['C_std'],
fmt='go-',
label='C',
**params
)
ax.plot(data['t'], data['E_mean'], 'k:', label='E')
ax.set_xlabel('Timestep', fontsize='xx-large')
ax.set_ylabel('N', fontsize='xx-large')
ax.legend(frameon=False, fontsize='x-large')
fig.tight_layout()
fig.savefig(f'{name}.png')
data.to_csv(f'{name}.csv', index=False)
return
def main(
settings: str,
run: bool = False,
seeds: int = 5,
name: str = 'cube',
**kwargs
):
logger.info('Running cube...')
with open(settings, 'r') as f:
parameters = yaml.safe_load(f)
# insert code here
for seed in range(1, seeds + 1, 1):
prefix = f'{name}.{seed}'
if run:
traj = run_simulation(prefix, **parameters)
analyse_trajectory(
traj,
output=f'{prefix}.csv',
timestep=parameters['timestep']
)
else:
logger.info('Skipping simulation because --run was not passed!')
break
results = gather_results(Path().glob(f'{name}.*.csv'))
logger.info(results)
plot_final(results, name=name)
logger.info('All Done!')
logger.info('Done!')
return
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Enzymatic reaction -A-A-A- + E -> xC + E using ReaDDy'
)
parser.add_argument('settings', default='settings.yml')
parser.add_argument('--run', action='store_true')
parser.add_argument('-s', '--seeds', default=5, type=int)
parser.add_argument('-n', '--name', default='cube')
main(**vars(parser.parse_args()))
|
[
"pandas.DataFrame",
"hydrogels.utils.topology.TopologyBond",
"argparse.ArgumentParser",
"hydrogels.utils.topology.Topology",
"pandas.read_csv",
"softnanotools.logger.Logger",
"hydrogels.utils.system.System",
"pathlib.Path",
"readdy.StructuralReactionRecipe",
"numpy.array",
"yaml.safe_load",
"numpy.random.random",
"numpy.random.rand",
"matplotlib.pyplot.subplots"
] |
[((750, 767), 'softnanotools.logger.Logger', 'Logger', (['"""POLYMER"""'], {}), "('POLYMER')\n", (756, 767), False, 'from softnanotools.logger import Logger\n'), ((1117, 1210), 'hydrogels.utils.topology.TopologyBond', 'TopologyBond', (['"""harmonic"""', 'monomer', 'monomer'], {'length': 'length', 'force_constant': 'force_constant'}), "('harmonic', monomer, monomer, length=length, force_constant=\n force_constant)\n", (1129, 1210), False, 'from hydrogels.utils.topology import Topology, TopologyBond\n'), ((1290, 1368), 'hydrogels.utils.topology.TopologyBond', 'TopologyBond', (['"""harmonic"""', 'monomer', 'unbonded'], {'length': 'length', 'force_constant': '(0.0)'}), "('harmonic', monomer, unbonded, length=length, force_constant=0.0)\n", (1302, 1368), False, 'from hydrogels.utils.topology import Topology, TopologyBond\n'), ((1453, 1532), 'hydrogels.utils.topology.TopologyBond', 'TopologyBond', (['"""harmonic"""', 'unbonded', 'unbonded'], {'length': 'length', 'force_constant': '(0.0)'}), "('harmonic', unbonded, unbonded, length=length, force_constant=0.0)\n", (1465, 1532), False, 'from hydrogels.utils.topology import Topology, TopologyBond\n'), ((3283, 3318), 'hydrogels.utils.system.System', 'System', (['[box, box, box]'], {'units': 'None'}), '([box, box, box], units=None)\n', (3289, 3318), False, 'from hydrogels.utils.system import System\n'), ((7113, 7131), 'pathlib.Path', 'Path', (['f"""{name}.h5"""'], {}), "(f'{name}.h5')\n", (7117, 7131), False, 'from pathlib import Path\n'), ((8850, 8871), 'pandas.DataFrame', 'pd.DataFrame', (['numbers'], {}), '(numbers)\n', (8862, 8871), True, 'import pandas as pd\n'), ((9022, 9036), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (9034, 9036), True, 'import pandas as pd\n'), ((9602, 9616), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9614, 9616), True, 'import matplotlib.pyplot as plt\n'), ((11445, 11542), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Enzymatic reaction -A-A-A- + E -> xC + E using ReaDDy"""'}), "(description=\n 'Enzymatic reaction -A-A-A- + E -> xC + E using ReaDDy')\n", (11468, 11542), False, 'import argparse\n'), ((2334, 2513), 'numpy.array', 'np.array', (['[[x, y, z], [x + 1.0, y, z], [x + 1.0, y + 1.0, z], [x, y + 1.0, z], [x, y,\n z + 1.0], [x + 1.0, y, z + 1.0], [x + 1.0, y + 1.0, z + 1.0], [x, y + \n 1.0, z + 1.0]]'], {}), '([[x, y, z], [x + 1.0, y, z], [x + 1.0, y + 1.0, z], [x, y + 1.0, z\n ], [x, y, z + 1.0], [x + 1.0, y, z + 1.0], [x + 1.0, y + 1.0, z + 1.0],\n [x, y + 1.0, z + 1.0]])\n', (2342, 2513), True, 'import numpy as np\n'), ((2607, 2783), 'hydrogels.utils.topology.Topology', 'Topology', (['top_type'], {'sequence': '([monomer] * 8)', 'edges': '[(0, 1), (1, 2), (2, 3), (3, 0), (4, 5), (5, 6), (6, 7), (7, 4), (0, 4), (1,\n 5), (2, 6), (3, 7)]', 'positions': 'positions'}), '(top_type, sequence=[monomer] * 8, edges=[(0, 1), (1, 2), (2, 3), (\n 3, 0), (4, 5), (5, 6), (6, 7), (7, 4), (0, 4), (1, 5), (2, 6), (3, 7)],\n positions=positions)\n', (2615, 2783), False, 'from hydrogels.utils.topology import Topology, TopologyBond\n'), ((4027, 4068), 'readdy.StructuralReactionRecipe', 'readdy.StructuralReactionRecipe', (['topology'], {}), '(topology)\n', (4058, 4068), False, 'import readdy\n'), ((9062, 9076), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (9074, 9076), True, 'import pandas as pd\n'), ((9091, 9105), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (9103, 9105), True, 'import pandas as pd\n'), ((9120, 9134), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (9132, 9134), True, 'import pandas as pd\n'), ((9149, 9163), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (9161, 9163), True, 'import pandas as pd\n'), ((9227, 9246), 'pandas.read_csv', 'pd.read_csv', (['target'], {}), '(target)\n', (9238, 9246), True, 'import pandas as pd\n'), ((10746, 10763), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (10760, 10763), False, 'import yaml\n'), ((2287, 2306), 'numpy.random.random', 'np.random.random', (['(3)'], {}), '(3)\n', (2303, 2306), True, 'import numpy as np\n'), ((8241, 8252), 'pathlib.Path', 'Path', (['fname'], {}), '(fname)\n', (8245, 8252), False, 'from pathlib import Path\n'), ((6254, 6290), 'numpy.random.rand', 'np.random.rand', (["kwargs['enzymes']", '(3)'], {}), "(kwargs['enzymes'], 3)\n", (6268, 6290), True, 'import numpy as np\n'), ((11228, 11234), 'pathlib.Path', 'Path', ([], {}), '()\n', (11232, 11234), False, 'from pathlib import Path\n')]
|
import healpy as hp
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import scipy.special as spc
import math
import matplotlib as mpl
from scipy.special import lpmn
import scipy.integrate as integrate
from scipy.integrate import quad
from numpy import sin, cos
from matplotlib.cm import ScalarMappable
import random
nside = 64
npix = hp.nside2npix(nside)
SIZE = 400
DPI = 100
hpxmap2 = np.zeros(npix, dtype = np.float)
events = 8000
mult = 2500
for i in range(events):
for k in range(mult):
ipix = random.randint(0, npix-1)
#hpxmap2[indices2[i]] += 1.0
hpxmap2[ipix] += npix*1.0/mult/events
#hp_smoothed = hp.sphtfunc.smoothing(hpxmap2, fwhm=np.radians(1), iter = 1)
hp.mollview(hpxmap2, cmap = cm.jet, xsize = SIZE, min = 0.9, max = 1.1, title='Isotropic randomised')
hp.graticule()
plt.savefig("map_iso.png", dpi = DPI)
|
[
"random.randint",
"healpy.mollview",
"healpy.graticule",
"numpy.zeros",
"healpy.nside2npix",
"matplotlib.pyplot.savefig"
] |
[((365, 385), 'healpy.nside2npix', 'hp.nside2npix', (['nside'], {}), '(nside)\n', (378, 385), True, 'import healpy as hp\n'), ((419, 449), 'numpy.zeros', 'np.zeros', (['npix'], {'dtype': 'np.float'}), '(npix, dtype=np.float)\n', (427, 449), True, 'import numpy as np\n'), ((728, 826), 'healpy.mollview', 'hp.mollview', (['hpxmap2'], {'cmap': 'cm.jet', 'xsize': 'SIZE', 'min': '(0.9)', 'max': '(1.1)', 'title': '"""Isotropic randomised"""'}), "(hpxmap2, cmap=cm.jet, xsize=SIZE, min=0.9, max=1.1, title=\n 'Isotropic randomised')\n", (739, 826), True, 'import healpy as hp\n'), ((831, 845), 'healpy.graticule', 'hp.graticule', ([], {}), '()\n', (843, 845), True, 'import healpy as hp\n'), ((846, 881), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""map_iso.png"""'], {'dpi': 'DPI'}), "('map_iso.png', dpi=DPI)\n", (857, 881), True, 'import matplotlib.pyplot as plt\n'), ((542, 569), 'random.randint', 'random.randint', (['(0)', '(npix - 1)'], {}), '(0, npix - 1)\n', (556, 569), False, 'import random\n')]
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import dace
from dace.memlet import Memlet
import numpy as np
sr = dace.SDFG('strided_range_test')
s0 = sr.add_state('s0')
A = s0.add_array('A', [2, 16, 4], dace.float32)
B = s0.add_array('B', [16], dace.float32)
tasklet = s0.add_tasklet(
'srtest', {'a'}, {'b'}, """
b[0] = a[0,0] * 2
b[1] = a[0,1] * 2
b[2] = a[1,0] * 2
b[3] = a[1,1] * 2
""")
me, mx = s0.add_map('srmap', dict(i='0:4'))
# Reading A at [1, 2i:2i+8:8:2, 3]
s0.add_memlet_path(A,
me,
tasklet,
dst_conn='a',
memlet=Memlet.simple(A, '1, 2*i:2*i+10:8:2, 3'))
# Writing B at [4*i:4*i+4]
s0.add_memlet_path(tasklet,
mx,
B,
src_conn='b',
memlet=Memlet.simple(B, '4*i:4*i+4'))
def test():
print('Strided range tasklet test')
A = np.random.rand(2, 16, 4).astype(np.float32)
B = np.random.rand(16).astype(np.float32)
sr(A=A, B=B)
diffs = [
B[0:2] - 2 * A[1, 0:2, 3], B[2:4] - 2 * A[1, 8:10, 3],
B[4:6] - 2 * A[1, 2:4, 3], B[6:8] - 2 * A[1, 10:12, 3],
B[8:10] - 2 * A[1, 4:6, 3], B[10:12] - 2 * A[1, 12:14, 3],
B[12:14] - 2 * A[1, 6:8, 3], B[14:16] - 2 * A[1, 14:16, 3]
]
diff = np.linalg.norm(np.array(diffs))
print('Differences:', [np.linalg.norm(d) for d in diffs])
assert diff <= 1e-5
if __name__ == "__main__":
test()
|
[
"dace.memlet.Memlet.simple",
"numpy.array",
"numpy.linalg.norm",
"numpy.random.rand",
"dace.SDFG"
] |
[((144, 175), 'dace.SDFG', 'dace.SDFG', (['"""strided_range_test"""'], {}), "('strided_range_test')\n", (153, 175), False, 'import dace\n'), ((644, 684), 'dace.memlet.Memlet.simple', 'Memlet.simple', (['A', '"""1, 2*i:2*i+10:8:2, 3"""'], {}), "(A, '1, 2*i:2*i+10:8:2, 3')\n", (657, 684), False, 'from dace.memlet import Memlet\n'), ((846, 875), 'dace.memlet.Memlet.simple', 'Memlet.simple', (['B', '"""4*i:4*i+4"""'], {}), "(B, '4*i:4*i+4')\n", (859, 875), False, 'from dace.memlet import Memlet\n'), ((1355, 1370), 'numpy.array', 'np.array', (['diffs'], {}), '(diffs)\n', (1363, 1370), True, 'import numpy as np\n'), ((939, 963), 'numpy.random.rand', 'np.random.rand', (['(2)', '(16)', '(4)'], {}), '(2, 16, 4)\n', (953, 963), True, 'import numpy as np\n'), ((991, 1009), 'numpy.random.rand', 'np.random.rand', (['(16)'], {}), '(16)\n', (1005, 1009), True, 'import numpy as np\n'), ((1399, 1416), 'numpy.linalg.norm', 'np.linalg.norm', (['d'], {}), '(d)\n', (1413, 1416), True, 'import numpy as np\n')]
|
from builtins import zip
from builtins import range
import numpy as np
def save_data_regresssion():
# n = 20 # number of labeled/training data
# D = 1 # dimension of input data
x = np.array([[2.083970427750732, -0.821018066101379, -0.617870699182597, -1.183822608860694,\
0.274087442277144, 0.599441729295593, 1.768897919204435, -0.465645549031928,\
0.588852784375935, -0.832982214438054, -0.512106527960363, 0.277883144210116,\
-0.065870426922211, -0.821412363806325, 0.185399443778088, -0.858296174995998,\
0.370786630037059, -1.409869162416639,-0.144668412325022,-0.553299615220374]]).T
y = np.array([[4.549203746331698, 0.371985574437271, 0.711307965514790, -0.013212893618430, 2.255473255338191,\
1.009915749295733, 3.744675937965029, 0.424592771793202, 1.322833652295811, 0.278298293510020,\
0.267229130945574, 2.200112286723833, 1.200609983308969, 0.439971697236094, 2.628580433511255,\
0.503774817336353, 1.942525313820564, 0.579133950013327, 0.670874423968554, 0.377353755100965]]).T
# TEST points
# test points evenly distributed in the interval [-2, 2.5]
xstar = np.array(list(range(-200,250,4)), dtype=np.float64, ndmin=2).T
xstar /= 100
np.savez('Regression/regression_data', x=x, y=y, xstar=xstar)
def save_data_classification():
# Synthetic data for binary classification: two partially overlapping
# Gaussians in two dimensions. 120 data points are generated from two
# Gaussians with different means and covariances. One Gaussian is
# isotropic and contains 2/3 of the data (blue), the other is highly
# correlated and contains 1/3 of the points (red). Note, that the
# labels for the targets are -1/+1 (and not 0/1).
n1 = 80; n2 = 40
x1 = np.array([[0.089450165731417, -0.000700765006939],\
[ 1.171605560541542, 1.177765337635947],\
[ 1.404722675089394, -0.017417915887421],\
[ 0.556096196907929, -1.489370243839215],\
[ 1.213163445267992, 0.044545401368647],\
[ 0.173404742510759, -0.675668036759603],\
[ 2.225008556585363, 0.469803193769368],\
[ 1.470329290331445, 0.887642323697526],\
[ 2.715199208821485, 0.621044646503113],\
[ 0.173640760494328, -0.936054178730056],\
[ 2.038152815025167, 0.262587298316711],\
[ 1.670218375320427, -2.633186886994263],\
[ 0.270098501389591, -0.948779657473203],\
[ 1.396339236138275, -1.114992287201776],\
[-1.482070589718501, -0.654590652482805],\
[-1.493788226272929, 0.382017940248275],\
[ 1.025083846875763, -0.860344923788873],\
[ 0.750316336734172, -0.101864205602753],\
[ 0.184311310148912, -0.258523866245887],\
[ 0.221868667121623, -1.393954437105630],\
[ 2.258881477897777, -0.786806071526136],\
[ 1.211362530151533, -0.423431246029886],\
[ 1.525307406741207, -0.097975367602030],\
[ 0.978930232706465, 0.476154349549524],\
[ 1.347884229346280, -0.248408186838667],\
[ 1.205779546204216, -0.090878327349907],\
[ 0.124388644862000, 0.599612645000285],\
[ 0.784044356662233, 0.356596736271853],\
[ 1.060216683845210, -0.318474838087900],\
[ 1.678114484474938, 0.678735373910422],\
[ 0.973851135005570, 0.024880700382574],\
[ 0.016237746864886, -0.480899874254564],\
[ 0.979406721923196, 0.697708815321128],\
[ 2.217307638531248, -0.956931847027775],\
[ 2.150475558834153, 1.059031573329512],\
[ 1.050502393215048, 0.532141747419667],\
[ 1.210593098269218, -0.318123542280113],\
[ 0.426309208807901, -0.571727978045793],\
[ 0.742552105732714, -0.122112766396886],\
[ 0.757210723588679, 0.862002000781123],\
[-0.431639130160791, -0.763118261936640],\
[-0.748398486307095, -0.603667649379360],\
[ 0.975086541108249, -1.525297946453790],\
[ 0.074503762788667, -0.092155036190678],\
[-0.668889572018935, 1.305400680048752],\
[ 0.725632503186580, 0.096286255882168],\
[-1.042270707136463, 1.297009698531055],\
[ 1.943144890398260, -1.051176922438962],\
[ 1.191448645802597, 0.261349747400059],\
[ 0.778004017505022, -1.046301123377022],\
[ 0.628873970760607, 1.103926629619643],\
[ 1.295113890591403, -0.479519217798997],\
[ 1.522065175744686, 0.993476032742058],\
[ 1.100255776045601, 0.961069161713818],\
[-0.593243832838153, -0.479418953496258],\
[ 2.023196521366462, -0.275055494808503],\
[-0.788103134597041, -1.090707985778480],\
[-0.085168420896236, 1.226858390046108],\
[ 1.691706923196703, -1.153144804780540],\
[ 1.989279380395157, 1.974704317386435],\
[ 0.398799861652602, 3.051291814188982],\
[-0.707217210772927, 0.185505264874794],\
[ 0.697550136765320, 0.222287208720035],\
[ 2.186126058382323, -0.327829143438683],\
[ 1.368068331060010, 1.708138258453435],\
[ 0.883049126818189, -1.334269372314072],\
[ 1.737643116893527, 0.618452933813739],\
[ 2.002228743955222, 0.103381966018445],\
[-0.202638622737115, 0.495024938090909],\
[ 0.543309203560769, -0.802120609128192],\
[-1.796161599703804, -0.054795478648902],\
[ 1.460693782000059, 0.750052171180825],\
[ 0.133277872804608, -1.154891068006907],\
[ 0.203670382700157, -0.480336687666025],\
[-0.278985011909341, 0.030578590108392],\
[ 2.070490237052893, 2.420782751903098],\
[ 0.599023881366768, -1.673208560658818],\
[ 0.140506592147238, 0.804938444757444],\
[-0.980799204108985, -1.847987723222053],\
[-0.102350006007740, -0.822093851434857]])
x2 = np.array([[1.160257057434194, 1.544111720606185],\
[-0.458434595629321, 0.205667827100987],\
[-1.053562345687376, -0.614938261650010],\
[-1.687901005751336, -0.780028275457715],\
[-0.467035854712698, 0.561692074343868],\
[-0.703391186121452, 0.281301267639200],\
[-1.568557779993616, -0.629129013661319],\
[-2.176478596101226, -1.176211396013793],\
[ 0.768109265900499, 1.376893437232103],\
[-0.514772970064353, 0.474264363701950],\
[-1.301924381487904, -0.525179228127957],\
[-1.312024947004566, -0.049469442305628],\
[-0.623417800418214, 0.226456899059445],\
[ 0.020290591370131, 0.374055846421580],\
[-1.002901826023476, 0.076597486786743],\
[-2.553713136283273, -1.731788289864902],\
[-1.788156378743716, -0.742460481943494],\
[-1.119582270077321, -0.256154464598782],\
[-0.423084091988017, 0.395108309297119],\
[-1.645945345460644, -1.216319293733455],\
[ 0.227805611684674, 0.925948003854262],\
[-1.298719171366801, -0.965511301629466],\
[-0.618292817021891, 0.140045887498202],\
[ 0.794935039731655, 1.917830760420081],\
[-0.213709179946402, 0.617751634356751],\
[-0.474251035850546, -0.054854432018974],\
[ 0.056077816960464, 1.046282980014428],\
[ 0.887136693467512, 1.536490289895764],\
[ 1.377161915854166, 1.764872700787871],\
[-0.901195709427863, -0.340855547886558],\
[-0.783104424735034, -0.330927422324566],\
[-1.507139570543989, 0.137504213149820],\
[-0.348999111724700, 0.235931187612453],\
[-0.367309385513174, 0.655996377722041],\
[-0.050622309620072, 0.410969334468070],\
[ 1.734919039047271, 2.611080177877894],\
[-0.567413078682755, -0.458249564234885],\
[-0.622230797920433, 0.258401595566888],\
[-1.642146761593230, -1.138579130251617],\
[-0.285298076847255, 0.085451489400687]])
x = np.concatenate((x1,x2),axis=0)
y = np.concatenate((-np.ones((1,n1)),np.ones((1,n2))),axis=1).T
# For plotting, we superimpose the data points with the posterior equi-probability contour
# lines for the probability of class two given complete information about the generating mechanism.
t1,t2 = np.meshgrid(np.arange(-4,4.1,0.1),np.arange(-4,4.1,0.1))
t = np.array(list(zip(np.reshape(t1,(np.prod(t1.shape),)),np.reshape(t2,(np.prod(t2.shape),))))) # these are the test inputs
n = t.shape[0]
tmm = np.zeros_like(t)
S1 = np.eye(2); S2 = np.array([[1, 0.95], [0.95, 1]])
m1 = np.array([0.75, 0]); m2 = np.array([-0.75, 0])
tmm[:,0] = t[:,0] - m1[0]; tmm[:,1] = t[:,1] - m1[1]
p1 = n1*np.exp( (-np.dot(tmm,np.linalg.inv(S1))*tmm/2).sum(axis=1) )
tmm[:,0] = t[:,0] - m2[0]; tmm[:,1] = t[:,1] - m2[1]
S2i = np.linalg.inv(S2)
p2 = n2*np.exp( (-np.dot(tmm,S2i)*tmm/2).sum(axis=1) ) / np.sqrt(0.0975)
np.savez('Classification/classification_data', x=x, y=y, xstar=t, x1=x1,x2=x2,t1=t1,t2=t2,p1=p1,p2=p2)
if __name__=='__main__':
save_data_regresssion()
#save_data_classification()
|
[
"numpy.zeros_like",
"numpy.ones",
"numpy.prod",
"numpy.array",
"numpy.linalg.inv",
"numpy.arange",
"numpy.dot",
"numpy.eye",
"numpy.savez",
"builtins.range",
"numpy.concatenate",
"numpy.sqrt"
] |
[((1348, 1409), 'numpy.savez', 'np.savez', (['"""Regression/regression_data"""'], {'x': 'x', 'y': 'y', 'xstar': 'xstar'}), "('Regression/regression_data', x=x, y=y, xstar=xstar)\n", (1356, 1409), True, 'import numpy as np\n'), ((1899, 5413), 'numpy.array', 'np.array', (['[[0.089450165731417, -0.000700765006939], [1.171605560541542, \n 1.177765337635947], [1.404722675089394, -0.017417915887421], [\n 0.556096196907929, -1.489370243839215], [1.213163445267992, \n 0.044545401368647], [0.173404742510759, -0.675668036759603], [\n 2.225008556585363, 0.469803193769368], [1.470329290331445, \n 0.887642323697526], [2.715199208821485, 0.621044646503113], [\n 0.173640760494328, -0.936054178730056], [2.038152815025167, \n 0.262587298316711], [1.670218375320427, -2.633186886994263], [\n 0.270098501389591, -0.948779657473203], [1.396339236138275, -\n 1.114992287201776], [-1.482070589718501, -0.654590652482805], [-\n 1.493788226272929, 0.382017940248275], [1.025083846875763, -\n 0.860344923788873], [0.750316336734172, -0.101864205602753], [\n 0.184311310148912, -0.258523866245887], [0.221868667121623, -\n 1.39395443710563], [2.258881477897777, -0.786806071526136], [\n 1.211362530151533, -0.423431246029886], [1.525307406741207, -\n 0.09797536760203], [0.978930232706465, 0.476154349549524], [\n 1.34788422934628, -0.248408186838667], [1.205779546204216, -\n 0.090878327349907], [0.124388644862, 0.599612645000285], [\n 0.784044356662233, 0.356596736271853], [1.06021668384521, -\n 0.3184748380879], [1.678114484474938, 0.678735373910422], [\n 0.97385113500557, 0.024880700382574], [0.016237746864886, -\n 0.480899874254564], [0.979406721923196, 0.697708815321128], [\n 2.217307638531248, -0.956931847027775], [2.150475558834153, \n 1.059031573329512], [1.050502393215048, 0.532141747419667], [\n 1.210593098269218, -0.318123542280113], [0.426309208807901, -\n 0.571727978045793], [0.742552105732714, -0.122112766396886], [\n 0.757210723588679, 0.862002000781123], [-0.431639130160791, -\n 0.76311826193664], [-0.748398486307095, -0.60366764937936], [\n 0.975086541108249, -1.52529794645379], [0.074503762788667, -\n 0.092155036190678], [-0.668889572018935, 1.305400680048752], [\n 0.72563250318658, 0.096286255882168], [-1.042270707136463, \n 1.297009698531055], [1.94314489039826, -1.051176922438962], [\n 1.191448645802597, 0.261349747400059], [0.778004017505022, -\n 1.046301123377022], [0.628873970760607, 1.103926629619643], [\n 1.295113890591403, -0.479519217798997], [1.522065175744686, \n 0.993476032742058], [1.100255776045601, 0.961069161713818], [-\n 0.593243832838153, -0.479418953496258], [2.023196521366462, -\n 0.275055494808503], [-0.788103134597041, -1.09070798577848], [-\n 0.085168420896236, 1.226858390046108], [1.691706923196703, -\n 1.15314480478054], [1.989279380395157, 1.974704317386435], [\n 0.398799861652602, 3.051291814188982], [-0.707217210772927, \n 0.185505264874794], [0.69755013676532, 0.222287208720035], [\n 2.186126058382323, -0.327829143438683], [1.36806833106001, \n 1.708138258453435], [0.883049126818189, -1.334269372314072], [\n 1.737643116893527, 0.618452933813739], [2.002228743955222, \n 0.103381966018445], [-0.202638622737115, 0.495024938090909], [\n 0.543309203560769, -0.802120609128192], [-1.796161599703804, -\n 0.054795478648902], [1.460693782000059, 0.750052171180825], [\n 0.133277872804608, -1.154891068006907], [0.203670382700157, -\n 0.480336687666025], [-0.278985011909341, 0.030578590108392], [\n 2.070490237052893, 2.420782751903098], [0.599023881366768, -\n 1.673208560658818], [0.140506592147238, 0.804938444757444], [-\n 0.980799204108985, -1.847987723222053], [-0.10235000600774, -\n 0.822093851434857]]'], {}), '([[0.089450165731417, -0.000700765006939], [1.171605560541542, \n 1.177765337635947], [1.404722675089394, -0.017417915887421], [\n 0.556096196907929, -1.489370243839215], [1.213163445267992, \n 0.044545401368647], [0.173404742510759, -0.675668036759603], [\n 2.225008556585363, 0.469803193769368], [1.470329290331445, \n 0.887642323697526], [2.715199208821485, 0.621044646503113], [\n 0.173640760494328, -0.936054178730056], [2.038152815025167, \n 0.262587298316711], [1.670218375320427, -2.633186886994263], [\n 0.270098501389591, -0.948779657473203], [1.396339236138275, -\n 1.114992287201776], [-1.482070589718501, -0.654590652482805], [-\n 1.493788226272929, 0.382017940248275], [1.025083846875763, -\n 0.860344923788873], [0.750316336734172, -0.101864205602753], [\n 0.184311310148912, -0.258523866245887], [0.221868667121623, -\n 1.39395443710563], [2.258881477897777, -0.786806071526136], [\n 1.211362530151533, -0.423431246029886], [1.525307406741207, -\n 0.09797536760203], [0.978930232706465, 0.476154349549524], [\n 1.34788422934628, -0.248408186838667], [1.205779546204216, -\n 0.090878327349907], [0.124388644862, 0.599612645000285], [\n 0.784044356662233, 0.356596736271853], [1.06021668384521, -\n 0.3184748380879], [1.678114484474938, 0.678735373910422], [\n 0.97385113500557, 0.024880700382574], [0.016237746864886, -\n 0.480899874254564], [0.979406721923196, 0.697708815321128], [\n 2.217307638531248, -0.956931847027775], [2.150475558834153, \n 1.059031573329512], [1.050502393215048, 0.532141747419667], [\n 1.210593098269218, -0.318123542280113], [0.426309208807901, -\n 0.571727978045793], [0.742552105732714, -0.122112766396886], [\n 0.757210723588679, 0.862002000781123], [-0.431639130160791, -\n 0.76311826193664], [-0.748398486307095, -0.60366764937936], [\n 0.975086541108249, -1.52529794645379], [0.074503762788667, -\n 0.092155036190678], [-0.668889572018935, 1.305400680048752], [\n 0.72563250318658, 0.096286255882168], [-1.042270707136463, \n 1.297009698531055], [1.94314489039826, -1.051176922438962], [\n 1.191448645802597, 0.261349747400059], [0.778004017505022, -\n 1.046301123377022], [0.628873970760607, 1.103926629619643], [\n 1.295113890591403, -0.479519217798997], [1.522065175744686, \n 0.993476032742058], [1.100255776045601, 0.961069161713818], [-\n 0.593243832838153, -0.479418953496258], [2.023196521366462, -\n 0.275055494808503], [-0.788103134597041, -1.09070798577848], [-\n 0.085168420896236, 1.226858390046108], [1.691706923196703, -\n 1.15314480478054], [1.989279380395157, 1.974704317386435], [\n 0.398799861652602, 3.051291814188982], [-0.707217210772927, \n 0.185505264874794], [0.69755013676532, 0.222287208720035], [\n 2.186126058382323, -0.327829143438683], [1.36806833106001, \n 1.708138258453435], [0.883049126818189, -1.334269372314072], [\n 1.737643116893527, 0.618452933813739], [2.002228743955222, \n 0.103381966018445], [-0.202638622737115, 0.495024938090909], [\n 0.543309203560769, -0.802120609128192], [-1.796161599703804, -\n 0.054795478648902], [1.460693782000059, 0.750052171180825], [\n 0.133277872804608, -1.154891068006907], [0.203670382700157, -\n 0.480336687666025], [-0.278985011909341, 0.030578590108392], [\n 2.070490237052893, 2.420782751903098], [0.599023881366768, -\n 1.673208560658818], [0.140506592147238, 0.804938444757444], [-\n 0.980799204108985, -1.847987723222053], [-0.10235000600774, -\n 0.822093851434857]])\n', (1907, 5413), True, 'import numpy as np\n'), ((6069, 7846), 'numpy.array', 'np.array', (['[[1.160257057434194, 1.544111720606185], [-0.458434595629321, \n 0.205667827100987], [-1.053562345687376, -0.61493826165001], [-\n 1.687901005751336, -0.780028275457715], [-0.467035854712698, \n 0.561692074343868], [-0.703391186121452, 0.2813012676392], [-\n 1.568557779993616, -0.629129013661319], [-2.176478596101226, -\n 1.176211396013793], [0.768109265900499, 1.376893437232103], [-\n 0.514772970064353, 0.47426436370195], [-1.301924381487904, -\n 0.525179228127957], [-1.312024947004566, -0.049469442305628], [-\n 0.623417800418214, 0.226456899059445], [0.020290591370131, \n 0.37405584642158], [-1.002901826023476, 0.076597486786743], [-\n 2.553713136283273, -1.731788289864902], [-1.788156378743716, -\n 0.742460481943494], [-1.119582270077321, -0.256154464598782], [-\n 0.423084091988017, 0.395108309297119], [-1.645945345460644, -\n 1.216319293733455], [0.227805611684674, 0.925948003854262], [-\n 1.298719171366801, -0.965511301629466], [-0.618292817021891, \n 0.140045887498202], [0.794935039731655, 1.917830760420081], [-\n 0.213709179946402, 0.617751634356751], [-0.474251035850546, -\n 0.054854432018974], [0.056077816960464, 1.046282980014428], [\n 0.887136693467512, 1.536490289895764], [1.377161915854166, \n 1.764872700787871], [-0.901195709427863, -0.340855547886558], [-\n 0.783104424735034, -0.330927422324566], [-1.507139570543989, \n 0.13750421314982], [-0.3489991117247, 0.235931187612453], [-\n 0.367309385513174, 0.655996377722041], [-0.050622309620072, \n 0.41096933446807], [1.734919039047271, 2.611080177877894], [-\n 0.567413078682755, -0.458249564234885], [-0.622230797920433, \n 0.258401595566888], [-1.64214676159323, -1.138579130251617], [-\n 0.285298076847255, 0.085451489400687]]'], {}), '([[1.160257057434194, 1.544111720606185], [-0.458434595629321, \n 0.205667827100987], [-1.053562345687376, -0.61493826165001], [-\n 1.687901005751336, -0.780028275457715], [-0.467035854712698, \n 0.561692074343868], [-0.703391186121452, 0.2813012676392], [-\n 1.568557779993616, -0.629129013661319], [-2.176478596101226, -\n 1.176211396013793], [0.768109265900499, 1.376893437232103], [-\n 0.514772970064353, 0.47426436370195], [-1.301924381487904, -\n 0.525179228127957], [-1.312024947004566, -0.049469442305628], [-\n 0.623417800418214, 0.226456899059445], [0.020290591370131, \n 0.37405584642158], [-1.002901826023476, 0.076597486786743], [-\n 2.553713136283273, -1.731788289864902], [-1.788156378743716, -\n 0.742460481943494], [-1.119582270077321, -0.256154464598782], [-\n 0.423084091988017, 0.395108309297119], [-1.645945345460644, -\n 1.216319293733455], [0.227805611684674, 0.925948003854262], [-\n 1.298719171366801, -0.965511301629466], [-0.618292817021891, \n 0.140045887498202], [0.794935039731655, 1.917830760420081], [-\n 0.213709179946402, 0.617751634356751], [-0.474251035850546, -\n 0.054854432018974], [0.056077816960464, 1.046282980014428], [\n 0.887136693467512, 1.536490289895764], [1.377161915854166, \n 1.764872700787871], [-0.901195709427863, -0.340855547886558], [-\n 0.783104424735034, -0.330927422324566], [-1.507139570543989, \n 0.13750421314982], [-0.3489991117247, 0.235931187612453], [-\n 0.367309385513174, 0.655996377722041], [-0.050622309620072, \n 0.41096933446807], [1.734919039047271, 2.611080177877894], [-\n 0.567413078682755, -0.458249564234885], [-0.622230797920433, \n 0.258401595566888], [-1.64214676159323, -1.138579130251617], [-\n 0.285298076847255, 0.085451489400687]])\n', (6077, 7846), True, 'import numpy as np\n'), ((8236, 8268), 'numpy.concatenate', 'np.concatenate', (['(x1, x2)'], {'axis': '(0)'}), '((x1, x2), axis=0)\n', (8250, 8268), True, 'import numpy as np\n'), ((8764, 8780), 'numpy.zeros_like', 'np.zeros_like', (['t'], {}), '(t)\n', (8777, 8780), True, 'import numpy as np\n'), ((8790, 8799), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (8796, 8799), True, 'import numpy as np\n'), ((8806, 8838), 'numpy.array', 'np.array', (['[[1, 0.95], [0.95, 1]]'], {}), '([[1, 0.95], [0.95, 1]])\n', (8814, 8838), True, 'import numpy as np\n'), ((8848, 8867), 'numpy.array', 'np.array', (['[0.75, 0]'], {}), '([0.75, 0])\n', (8856, 8867), True, 'import numpy as np\n'), ((8874, 8894), 'numpy.array', 'np.array', (['[-0.75, 0]'], {}), '([-0.75, 0])\n', (8882, 8894), True, 'import numpy as np\n'), ((9098, 9115), 'numpy.linalg.inv', 'np.linalg.inv', (['S2'], {}), '(S2)\n', (9111, 9115), True, 'import numpy as np\n'), ((9198, 9310), 'numpy.savez', 'np.savez', (['"""Classification/classification_data"""'], {'x': 'x', 'y': 'y', 'xstar': 't', 'x1': 'x1', 'x2': 'x2', 't1': 't1', 't2': 't2', 'p1': 'p1', 'p2': 'p2'}), "('Classification/classification_data', x=x, y=y, xstar=t, x1=x1, x2\n =x2, t1=t1, t2=t2, p1=p1, p2=p2)\n", (9206, 9310), True, 'import numpy as np\n'), ((206, 640), 'numpy.array', 'np.array', (['[[2.083970427750732, -0.821018066101379, -0.617870699182597, -\n 1.183822608860694, 0.274087442277144, 0.599441729295593, \n 1.768897919204435, -0.465645549031928, 0.588852784375935, -\n 0.832982214438054, -0.512106527960363, 0.277883144210116, -\n 0.065870426922211, -0.821412363806325, 0.185399443778088, -\n 0.858296174995998, 0.370786630037059, -1.409869162416639, -\n 0.144668412325022, -0.553299615220374]]'], {}), '([[2.083970427750732, -0.821018066101379, -0.617870699182597, -\n 1.183822608860694, 0.274087442277144, 0.599441729295593, \n 1.768897919204435, -0.465645549031928, 0.588852784375935, -\n 0.832982214438054, -0.512106527960363, 0.277883144210116, -\n 0.065870426922211, -0.821412363806325, 0.185399443778088, -\n 0.858296174995998, 0.370786630037059, -1.409869162416639, -\n 0.144668412325022, -0.553299615220374]])\n', (214, 640), True, 'import numpy as np\n'), ((698, 1118), 'numpy.array', 'np.array', (['[[4.549203746331698, 0.371985574437271, 0.71130796551479, -0.01321289361843,\n 2.255473255338191, 1.009915749295733, 3.744675937965029, \n 0.424592771793202, 1.322833652295811, 0.27829829351002, \n 0.267229130945574, 2.200112286723833, 1.200609983308969, \n 0.439971697236094, 2.628580433511255, 0.503774817336353, \n 1.942525313820564, 0.579133950013327, 0.670874423968554, 0.377353755100965]\n ]'], {}), '([[4.549203746331698, 0.371985574437271, 0.71130796551479, -\n 0.01321289361843, 2.255473255338191, 1.009915749295733, \n 3.744675937965029, 0.424592771793202, 1.322833652295811, \n 0.27829829351002, 0.267229130945574, 2.200112286723833, \n 1.200609983308969, 0.439971697236094, 2.628580433511255, \n 0.503774817336353, 1.942525313820564, 0.579133950013327, \n 0.670874423968554, 0.377353755100965]])\n', (706, 1118), True, 'import numpy as np\n'), ((8560, 8583), 'numpy.arange', 'np.arange', (['(-4)', '(4.1)', '(0.1)'], {}), '(-4, 4.1, 0.1)\n', (8569, 8583), True, 'import numpy as np\n'), ((8582, 8605), 'numpy.arange', 'np.arange', (['(-4)', '(4.1)', '(0.1)'], {}), '(-4, 4.1, 0.1)\n', (8591, 8605), True, 'import numpy as np\n'), ((9177, 9192), 'numpy.sqrt', 'np.sqrt', (['(0.0975)'], {}), '(0.0975)\n', (9184, 9192), True, 'import numpy as np\n'), ((1278, 1297), 'builtins.range', 'range', (['(-200)', '(250)', '(4)'], {}), '(-200, 250, 4)\n', (1283, 1297), False, 'from builtins import range\n'), ((8308, 8324), 'numpy.ones', 'np.ones', (['(1, n2)'], {}), '((1, n2))\n', (8315, 8324), True, 'import numpy as np\n'), ((8292, 8308), 'numpy.ones', 'np.ones', (['(1, n1)'], {}), '((1, n1))\n', (8299, 8308), True, 'import numpy as np\n'), ((8646, 8663), 'numpy.prod', 'np.prod', (['t1.shape'], {}), '(t1.shape)\n', (8653, 8663), True, 'import numpy as np\n'), ((8682, 8699), 'numpy.prod', 'np.prod', (['t2.shape'], {}), '(t2.shape)\n', (8689, 8699), True, 'import numpy as np\n'), ((8985, 9002), 'numpy.linalg.inv', 'np.linalg.inv', (['S1'], {}), '(S1)\n', (8998, 9002), True, 'import numpy as np\n'), ((9138, 9154), 'numpy.dot', 'np.dot', (['tmm', 'S2i'], {}), '(tmm, S2i)\n', (9144, 9154), True, 'import numpy as np\n')]
|
import torch
import torch.optim as optim
import numpy as np
from PIL import Image
#import pano
import pano_gen as pano
import time
def vecang(vec1, vec2):
vec1 = vec1 / np.sqrt((vec1 ** 2).sum())
vec2 = vec2 / np.sqrt((vec2 ** 2).sum())
return np.arccos(np.dot(vec1, vec2))
def rotatevec(vec, theta):
x = vec[0] * torch.cos(theta) - vec[1] * torch.sin(theta)
y = vec[0] * torch.sin(theta) + vec[1] * torch.cos(theta)
return torch.cat([x, y])
def pts_linspace(pa, pb, pts=300):
pa = pa.view(1, 2)
pb = pb.view(1, 2)
w = torch.arange(0, pts + 1, dtype=pa.dtype).view(-1, 1)
return (pa * (pts - w) + pb * w) / pts
def xyz2uv(xy, z=-1):
c = torch.sqrt((xy ** 2).sum(1))
u = torch.atan2(xy[:, 1], xy[:, 0]).view(-1, 1)
v = torch.atan2(torch.zeros_like(c) + z, c).view(-1, 1)
return torch.cat([u, v], dim=1)
def uv2idx(uv, w, h):
col = (uv[:, 0] / (2 * np.pi) + 0.5) * w - 0.5
row = (uv[:, 1] / np.pi + 0.5) * h - 0.5
return torch.cat([col.view(-1, 1), row.view(-1, 1)], dim=1)
def wallidx(xy, w, h, z1, z2):
col = (torch.atan2(xy[1], xy[0]) / (2 * np.pi) + 0.5) * w - 0.5
c = torch.sqrt((xy ** 2).sum())
row_s = (torch.atan2(torch.zeros_like(c) + z1, c) / np.pi + 0.5) * h - 0.5
row_t = (torch.atan2(torch.zeros_like(c) + z2, c) / np.pi + 0.5) * h - 0.5
pa = torch.cat([col.view(1), row_s.view(1)])
pb = torch.cat([col.view(1), row_t.view(1)])
return pts_linspace(pa, pb)
def map_coordinates(input, coordinates):
''' PyTorch version of scipy.ndimage.interpolation.map_coordinates
input: (H, W)
coordinates: (2, ...)
'''
h = input.shape[0]
w = input.shape[1]
def _coordinates_pad_wrap(h, w, coordinates):
coordinates[0] = coordinates[0] % h
coordinates[1] = coordinates[1] % w
return coordinates
co_floor = torch.floor(coordinates).long()
co_ceil = torch.ceil(coordinates).long()
d1 = (coordinates[1] - co_floor[1].float())
d2 = (coordinates[0] - co_floor[0].float())
co_floor = _coordinates_pad_wrap(h, w, co_floor)
co_ceil = _coordinates_pad_wrap(h, w, co_ceil)
f00 = input[co_floor[0], co_floor[1]]
f10 = input[co_floor[0], co_ceil[1]]
f01 = input[co_ceil[0], co_floor[1]]
f11 = input[co_ceil[0], co_ceil[1]]
fx1 = f00 + d1 * (f10 - f00)
fx2 = f01 + d1 * (f11 - f01)
return fx1 + d2 * (fx2 - fx1)
def pc2cor_id(pc, pc_vec, pc_theta, pc_height):
if pc_theta.numel()==1:
ps = torch.stack([
(pc + pc_vec),
(pc + rotatevec(pc_vec, pc_theta)),
(pc - pc_vec),
(pc + rotatevec(pc_vec, pc_theta - np.pi))
])
else:
ps = pc + pc_vec
ps = ps.view(-1,2)
for c_num in range(pc_theta.shape[1]):
ps = torch.cat((ps, ps[c_num:,:]),0)
if (c_num % 2) == 0:
ps[-1,1] = pc_theta[0,c_num]
else:
ps[-1,0] = pc_theta[0,c_num]
ps = torch.cat((ps, ps[-1:,:]),0)
ps[-1,1] = ps[0,1]
return torch.cat([
uv2idx(xyz2uv(ps, z=-1), 1024, 512),
uv2idx(xyz2uv(ps, z=pc_height), 1024, 512),
], dim=0)
def project2sphere_score(pc, pc_vec, pc_theta, pc_height, scoreedg, scorecor, i_step=None):
# Sample corner loss
corid = pc2cor_id(pc, pc_vec, pc_theta, pc_height)
corid_coordinates = torch.stack([corid[:, 1], corid[:, 0]])
loss_cor = -map_coordinates(scorecor, corid_coordinates).mean()
# Sample boundary loss
if pc_theta.numel()==1:
p1 = pc + pc_vec
p2 = pc + rotatevec(pc_vec, pc_theta)
p3 = pc - pc_vec
p4 = pc + rotatevec(pc_vec, pc_theta - np.pi)
segs = [
pts_linspace(p1, p2),
pts_linspace(p2, p3),
pts_linspace(p3, p4),
pts_linspace(p4, p1),
]
else:
ps = pc + pc_vec
ps = ps.view(-1,2)
for c_num in range(pc_theta.shape[1]):
ps = torch.cat((ps, ps[c_num:,:]),0)
if (c_num % 2) == 0:
ps[-1,1] = pc_theta[0,c_num]
else:
ps[-1,0] = pc_theta[0,c_num]
ps = torch.cat((ps, ps[-1:,:]),0)
ps[-1,1] = ps[0,1]
segs = []
for c_num in range(ps.shape[0]-1):
segs.append(pts_linspace(ps[c_num,:], ps[c_num+1,:]))
segs.append(pts_linspace(ps[-1,:], ps[0,:]))
# ceil-wall
loss_ceilwall = 0
for seg in segs:
ceil_uv = xyz2uv(seg, z=-1)
ceil_idx = uv2idx(ceil_uv, 1024, 512)
ceil_coordinates = torch.stack([ceil_idx[:, 1], ceil_idx[:, 0]])
loss_ceilwall -= map_coordinates(scoreedg[..., 1], ceil_coordinates).mean() / len(segs)
# floor-wall
loss_floorwall = 0
for seg in segs:
floor_uv = xyz2uv(seg, z=pc_height)
floor_idx = uv2idx(floor_uv, 1024, 512)
floor_coordinates = torch.stack([floor_idx[:, 1], floor_idx[:, 0]])
loss_floorwall -= map_coordinates(scoreedg[..., 2], floor_coordinates).mean() / len(segs)
#losses = 1.0 * loss_cor + 0.1 * loss_wallwall + 0.5 * loss_ceilwall + 1.0 * loss_floorwall
losses = 1.0 * loss_cor + 1.0 * loss_ceilwall + 1.0 * loss_floorwall
if i_step is not None:
with torch.no_grad():
print('step %d: %.3f (cor %.3f, wall %.3f, ceil %.3f, floor %.3f)' % (
i_step, losses,
loss_cor, loss_wallwall,
loss_ceilwall, loss_floorwall))
return losses
def optimize_cor_id(cor_id, scoreedg, scorecor, num_iters=100, verbose=False):
assert scoreedg.shape == (512, 1024, 3)
assert scorecor.shape == (512, 1024)
Z = -1
ceil_cor_id = cor_id[0::2]
floor_cor_id = cor_id[1::2]
ceil_cor_id, ceil_cor_id_xy = pano.constraint_cor_id_same_z(ceil_cor_id, scorecor, Z)
#ceil_cor_id_xyz = np.hstack([ceil_cor_id_xy, np.zeros(4).reshape(-1, 1) + Z])
ceil_cor_id_xyz = np.hstack([ceil_cor_id_xy, np.zeros(ceil_cor_id.shape[0]).reshape(-1, 1) + Z])
# TODO: revise here to general layout
#pc = (ceil_cor_id_xy[0] + ceil_cor_id_xy[2]) / 2
#print(ceil_cor_id_xy)
if abs(ceil_cor_id_xy[0,0]-ceil_cor_id_xy[1,0])>abs(ceil_cor_id_xy[0,1]-ceil_cor_id_xy[1,1]):
ceil_cor_id_xy = np.concatenate((ceil_cor_id_xy[1:,:],ceil_cor_id_xy[:1,:]), axis=0)
#print(cor_id)
#print(ceil_cor_id_xy)
pc = np.mean(ceil_cor_id_xy, axis=0)
pc_vec = ceil_cor_id_xy[0] - pc
pc_theta = vecang(pc_vec, ceil_cor_id_xy[1] - pc)
pc_height = pano.fit_avg_z(floor_cor_id, ceil_cor_id_xy, scorecor)
if ceil_cor_id_xy.shape[0] > 4:
pc_theta = np.array([ceil_cor_id_xy[1,1]])
for c_num in range(2, ceil_cor_id_xy.shape[0]-1):
if (c_num % 2) == 0:
pc_theta = np.append(pc_theta, ceil_cor_id_xy[c_num,0])
else:
pc_theta = np.append(pc_theta, ceil_cor_id_xy[c_num,1])
scoreedg = torch.FloatTensor(scoreedg)
scorecor = torch.FloatTensor(scorecor)
pc = torch.FloatTensor(pc)
pc_vec = torch.FloatTensor(pc_vec)
pc_theta = torch.FloatTensor([pc_theta])
pc_height = torch.FloatTensor([pc_height])
pc.requires_grad = True
pc_vec.requires_grad = True
pc_theta.requires_grad = True
pc_height.requires_grad = True
#print(pc_theta)
#time.sleep(2)
#return cor_id
optimizer = optim.SGD([
pc, pc_vec, pc_theta, pc_height
], lr=1e-3, momentum=0.9)
best = {'score': 1e9}
for i_step in range(num_iters):
i = i_step if verbose else None
optimizer.zero_grad()
score = project2sphere_score(pc, pc_vec, pc_theta, pc_height, scoreedg, scorecor, i)
if score.item() < best['score']:
best['score'] = score.item()
best['pc'] = pc.clone()
best['pc_vec'] = pc_vec.clone()
best['pc_theta'] = pc_theta.clone()
best['pc_height'] = pc_height.clone()
score.backward()
optimizer.step()
pc = best['pc']
pc_vec = best['pc_vec']
pc_theta = best['pc_theta']
pc_height = best['pc_height']
opt_cor_id = pc2cor_id(pc, pc_vec, pc_theta, pc_height).detach().numpy()
split_num = int(opt_cor_id.shape[0]//2)
opt_cor_id = np.stack([opt_cor_id[:split_num], opt_cor_id[split_num:]], axis=1).reshape(split_num*2, 2)
#print(opt_cor_id)
#print(cor_id)
#time.sleep(500)
return opt_cor_id
|
[
"torch.cat",
"torch.cos",
"torch.ceil",
"numpy.mean",
"torch.arange",
"torch.no_grad",
"pano_gen.fit_avg_z",
"torch.FloatTensor",
"numpy.append",
"torch.atan2",
"numpy.stack",
"torch.zeros_like",
"pano_gen.constraint_cor_id_same_z",
"torch.floor",
"numpy.dot",
"numpy.concatenate",
"torch.stack",
"numpy.zeros",
"numpy.array",
"torch.sin",
"torch.optim.SGD"
] |
[((451, 468), 'torch.cat', 'torch.cat', (['[x, y]'], {}), '([x, y])\n', (460, 468), False, 'import torch\n'), ((840, 864), 'torch.cat', 'torch.cat', (['[u, v]'], {'dim': '(1)'}), '([u, v], dim=1)\n', (849, 864), False, 'import torch\n'), ((3390, 3429), 'torch.stack', 'torch.stack', (['[corid[:, 1], corid[:, 0]]'], {}), '([corid[:, 1], corid[:, 0]])\n', (3401, 3429), False, 'import torch\n'), ((5786, 5841), 'pano_gen.constraint_cor_id_same_z', 'pano.constraint_cor_id_same_z', (['ceil_cor_id', 'scorecor', 'Z'], {}), '(ceil_cor_id, scorecor, Z)\n', (5815, 5841), True, 'import pano_gen as pano\n'), ((6396, 6427), 'numpy.mean', 'np.mean', (['ceil_cor_id_xy'], {'axis': '(0)'}), '(ceil_cor_id_xy, axis=0)\n', (6403, 6427), True, 'import numpy as np\n'), ((6534, 6588), 'pano_gen.fit_avg_z', 'pano.fit_avg_z', (['floor_cor_id', 'ceil_cor_id_xy', 'scorecor'], {}), '(floor_cor_id, ceil_cor_id_xy, scorecor)\n', (6548, 6588), True, 'import pano_gen as pano\n'), ((6950, 6977), 'torch.FloatTensor', 'torch.FloatTensor', (['scoreedg'], {}), '(scoreedg)\n', (6967, 6977), False, 'import torch\n'), ((6993, 7020), 'torch.FloatTensor', 'torch.FloatTensor', (['scorecor'], {}), '(scorecor)\n', (7010, 7020), False, 'import torch\n'), ((7030, 7051), 'torch.FloatTensor', 'torch.FloatTensor', (['pc'], {}), '(pc)\n', (7047, 7051), False, 'import torch\n'), ((7065, 7090), 'torch.FloatTensor', 'torch.FloatTensor', (['pc_vec'], {}), '(pc_vec)\n', (7082, 7090), False, 'import torch\n'), ((7106, 7135), 'torch.FloatTensor', 'torch.FloatTensor', (['[pc_theta]'], {}), '([pc_theta])\n', (7123, 7135), False, 'import torch\n'), ((7152, 7182), 'torch.FloatTensor', 'torch.FloatTensor', (['[pc_height]'], {}), '([pc_height])\n', (7169, 7182), False, 'import torch\n'), ((7388, 7456), 'torch.optim.SGD', 'optim.SGD', (['[pc, pc_vec, pc_theta, pc_height]'], {'lr': '(0.001)', 'momentum': '(0.9)'}), '([pc, pc_vec, pc_theta, pc_height], lr=0.001, momentum=0.9)\n', (7397, 7456), True, 'import torch.optim as optim\n'), ((267, 285), 'numpy.dot', 'np.dot', (['vec1', 'vec2'], {}), '(vec1, vec2)\n', (273, 285), True, 'import numpy as np\n'), ((3000, 3030), 'torch.cat', 'torch.cat', (['(ps, ps[-1:, :])', '(0)'], {}), '((ps, ps[-1:, :]), 0)\n', (3009, 3030), False, 'import torch\n'), ((4180, 4210), 'torch.cat', 'torch.cat', (['(ps, ps[-1:, :])', '(0)'], {}), '((ps, ps[-1:, :]), 0)\n', (4189, 4210), False, 'import torch\n'), ((4585, 4630), 'torch.stack', 'torch.stack', (['[ceil_idx[:, 1], ceil_idx[:, 0]]'], {}), '([ceil_idx[:, 1], ceil_idx[:, 0]])\n', (4596, 4630), False, 'import torch\n'), ((4909, 4956), 'torch.stack', 'torch.stack', (['[floor_idx[:, 1], floor_idx[:, 0]]'], {}), '([floor_idx[:, 1], floor_idx[:, 0]])\n', (4920, 4956), False, 'import torch\n'), ((6273, 6343), 'numpy.concatenate', 'np.concatenate', (['(ceil_cor_id_xy[1:, :], ceil_cor_id_xy[:1, :])'], {'axis': '(0)'}), '((ceil_cor_id_xy[1:, :], ceil_cor_id_xy[:1, :]), axis=0)\n', (6287, 6343), True, 'import numpy as np\n'), ((6649, 6681), 'numpy.array', 'np.array', (['[ceil_cor_id_xy[1, 1]]'], {}), '([ceil_cor_id_xy[1, 1]])\n', (6657, 6681), True, 'import numpy as np\n'), ((333, 349), 'torch.cos', 'torch.cos', (['theta'], {}), '(theta)\n', (342, 349), False, 'import torch\n'), ((361, 377), 'torch.sin', 'torch.sin', (['theta'], {}), '(theta)\n', (370, 377), False, 'import torch\n'), ((395, 411), 'torch.sin', 'torch.sin', (['theta'], {}), '(theta)\n', (404, 411), False, 'import torch\n'), ((423, 439), 'torch.cos', 'torch.cos', (['theta'], {}), '(theta)\n', (432, 439), False, 'import torch\n'), ((560, 600), 'torch.arange', 'torch.arange', (['(0)', '(pts + 1)'], {'dtype': 'pa.dtype'}), '(0, pts + 1, dtype=pa.dtype)\n', (572, 600), False, 'import torch\n'), ((725, 756), 'torch.atan2', 'torch.atan2', (['xy[:, 1]', 'xy[:, 0]'], {}), '(xy[:, 1], xy[:, 0])\n', (736, 756), False, 'import torch\n'), ((1869, 1893), 'torch.floor', 'torch.floor', (['coordinates'], {}), '(coordinates)\n', (1880, 1893), False, 'import torch\n'), ((1915, 1938), 'torch.ceil', 'torch.ceil', (['coordinates'], {}), '(coordinates)\n', (1925, 1938), False, 'import torch\n'), ((2814, 2847), 'torch.cat', 'torch.cat', (['(ps, ps[c_num:, :])', '(0)'], {}), '((ps, ps[c_num:, :]), 0)\n', (2823, 2847), False, 'import torch\n'), ((3994, 4027), 'torch.cat', 'torch.cat', (['(ps, ps[c_num:, :])', '(0)'], {}), '((ps, ps[c_num:, :]), 0)\n', (4003, 4027), False, 'import torch\n'), ((5266, 5281), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5279, 5281), False, 'import torch\n'), ((8260, 8326), 'numpy.stack', 'np.stack', (['[opt_cor_id[:split_num], opt_cor_id[split_num:]]'], {'axis': '(1)'}), '([opt_cor_id[:split_num], opt_cor_id[split_num:]], axis=1)\n', (8268, 8326), True, 'import numpy as np\n'), ((6799, 6844), 'numpy.append', 'np.append', (['pc_theta', 'ceil_cor_id_xy[c_num, 0]'], {}), '(pc_theta, ceil_cor_id_xy[c_num, 0])\n', (6808, 6844), True, 'import numpy as np\n'), ((6889, 6934), 'numpy.append', 'np.append', (['pc_theta', 'ceil_cor_id_xy[c_num, 1]'], {}), '(pc_theta, ceil_cor_id_xy[c_num, 1])\n', (6898, 6934), True, 'import numpy as np\n'), ((789, 808), 'torch.zeros_like', 'torch.zeros_like', (['c'], {}), '(c)\n', (805, 808), False, 'import torch\n'), ((1093, 1118), 'torch.atan2', 'torch.atan2', (['xy[1]', 'xy[0]'], {}), '(xy[1], xy[0])\n', (1104, 1118), False, 'import torch\n'), ((5974, 6004), 'numpy.zeros', 'np.zeros', (['ceil_cor_id.shape[0]'], {}), '(ceil_cor_id.shape[0])\n', (5982, 6004), True, 'import numpy as np\n'), ((1211, 1230), 'torch.zeros_like', 'torch.zeros_like', (['c'], {}), '(c)\n', (1227, 1230), False, 'import torch\n'), ((1290, 1309), 'torch.zeros_like', 'torch.zeros_like', (['c'], {}), '(c)\n', (1306, 1309), False, 'import torch\n')]
|
"""
Prior class for use in pisa.core.Param objects
"""
from __future__ import absolute_import, division
from collections import Iterable, OrderedDict
from numbers import Number
from operator import setitem
import numpy as np
from scipy.interpolate import splev, splrep, interp1d
from scipy.optimize import fminbound
import pint
from pisa import ureg
from pisa.utils.comparisons import isbarenumeric, recursiveEquality
from pisa.utils.fileio import from_file
from pisa.utils.log import logging, set_verbosity
__all__ = ['Prior', 'plot_prior', 'get_prior_bounds', 'test_Prior',
'test_Prior_plot']
__author__ = '<NAME>'
__license__ = '''Copyright (c) 2014-2017, The IceCube Collaboration
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
# TODO: uniform prior should take a constant, such that e.g. discrete parameter
# values when run separately will return valid comparisons across the
# discretely-chosen values (with different uniform priors)
# TODO: use units "natively" (not via strings) internal to the object; only
# serializing to json should convert to strings (and deserializing should
# convert from strings to Units objects)
# TODO: add a "to" and/or "ito" method for converting units akin to those
# methods in Pint quantities.
class Prior(object):
"""Prior information for a parameter. Defines the penalty (in
log-likelihood (llh)) for a parameter being at a given value (within the
prior's valid parameter range). Chi-squared penalties can also be returned
(but the *definition* of a prior here is always in terms of llh).
Note that since this is a penalty, the more negative the prior's log
likelihood, the greater the penalty and the less likely the parameter's
value is.
Valid parameters and properties of the object differ based upon what `kind`
of prior is specified.
Parameters
----------
kind='uniform', llh_offset=...
Uniform prior, no preference for any position relative to the valid
range, which is taken to be [-inf, +inf] [x-units].
kind='gaussian', mean=..., stddev=...
Gaussian prior, defining log likelihood penalty for parameter being at
any particular position. Valid range is [-inf, +inf] [x-units].
kind='linterp', param_vals=..., llh_vals=...
Linearly-interpolated prior. Note that "corners" in linear
interpolation may cause difficulties for some minimizers.
kind='spline', knots=..., coeffs=..., deg=...
Smooth spline interpolation.
Properties
----------
kind
max_at
max_at_str
state
valid_range
Additional properties are defined based on `kind`:
kind='uniform':
llh_offset
kind='gaussian':
mean
stddev
kind='linterp':
param_vals
llh_vals
kind='spline':
knots
coeffs
deg
Methods
-------
chi2
llh
Notes
-----
If the parameter the prior is being applied to has units, the prior's
"x"-values specification must have compatible units.
If you implement a new prior, it ***must*** raise an exception if methods
`llh` or `chi2` are called with a parameter value outside the prior's valid
range, so subtle bugs aren't introduced that appear as an issue in e.g. the
minimizer.
Examples
--------
For spline prior: knots, coeffs, and deg can be found by, e.g.,
scipy.interpolate.splrep; evaluation of spline priors is carried out
internally by scipy.interpolate.splev, so an exact match to the output of
the spline prior can be produced as follows:
>>> from scipy.interpolate import splrep, splev
>>> # Generate sample points
>>> param_vals = np.linspace(-10, 10, 100)
>>> llh_vals = param_vals**2
>>> # Define spline interpolant
>>> knots, coeffs, deg = splrep(param_vals, llh_vals)
>>> # Instantiate spline prior
>>> prior = Prior(kind='spline', knots=knots, coeffs=coeffs, deg=deg)
>>> # Generate sample points for interpolation
>>> param_upsamp = np.linspace(-10, 10, 1000)
>>> # Evaluation of spline using splev
>>> llh_upsamp = splev(param_upsamp, tck=(knots, coeffs, deg), ext=2)
>>> # Check that evaluation of spline matches call to prior.llh()
>>> all(prior.llh(param_upsamp) == llh_upsamp)
True
"""
def __init__(self, kind, **kwargs):
self._state_attrs = ['kind', 'max_at', 'units', 'valid_range']
self.units = None
kind = kind.lower() if isinstance(kind, basestring) else kind
self.chi2 = lambda x: -2*self.llh(x)
# Dispatch the correct initialization method
if kind in [None, 'none', 'uniform']:
self.__init_uniform(**kwargs)
elif kind == 'gaussian':
self.__init_gaussian(**kwargs)
elif kind == 'linterp':
self.__init_linterp(**kwargs)
elif kind == 'spline':
self.__init_spline(**kwargs)
elif kind == 'jeffreys':
self.__init_jeffreys(**kwargs)
else:
raise TypeError('Unknown Prior kind `' + str(kind) + '`')
@property
def units_str(self):
if self.units is None:
return ''
return ' ' + format(ureg(self.units).units, '~').strip()
def __str__(self):
return self._str(self)
def __repr__(self):
return '<' + str(self.__class__) + ' ' + self.__str__() + '>'
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return recursiveEquality(self.state, other.state)
def __ne__(self, other):
return not self.__eq__(other)
@property
def state(self):
state = OrderedDict()
for attr in self._state_attrs:
setitem(state, attr, getattr(self, attr))
return state
def __init_uniform(self, llh_offset=0):
self._state_attrs.extend(['llh_offset'])
self.kind = 'uniform'
self.llh_offset = llh_offset
def llh(x):
return 0.*self.__strip(x) + self.llh_offset
self.llh = llh
self.max_at = np.nan
self.max_at_str = 'no maximum'
self.valid_range = (-np.inf * ureg(self.units),
np.inf * ureg(self.units))
self._str = lambda s: 'uniform prior, llh_offset=%s' %self.llh_offset
def __init_jeffreys(self, A, B):
"""Calculate jeffreys prior as defined in Sivia p.125"""
self.kind = 'jeffreys'
if isinstance(A, Number):
A = A * ureg.dimensionless
if isinstance(B, Number):
B = B * ureg.dimensionless
assert A.dimensionality == B.dimensionality
self._state_attrs.extend(['A', 'B'])
if isinstance(A, ureg.Quantity):
self.units = str(A.units)
assert isinstance(B, ureg.Quantity), '%s' %type(B)
B = B.to(self.units)
self.A = A
self.B = B
def llh(x):
x = self.__strip(self.__convert(x))
A = self.__strip(self.A)
B = self.__strip(self.B)
return - np.log(x) + np.log(np.log(B)-np.log(A))
self.llh = llh
self.max_at = self.A
self.max_at_str = self.__stringify(self.max_at)
self.valid_range = (self.A * ureg(self.units),
self.B * ureg(self.units))
self._str = lambda s: "jeffreys' prior, range [%s,%s]"%(self.A, self.B)
def __init_gaussian(self, mean, stddev):
if isinstance(mean, Number):
mean = mean * ureg.dimensionless
if isinstance(stddev, Number):
stddev = stddev * ureg.dimensionless
assert mean.dimensionality == stddev.dimensionality
self._state_attrs.extend(['mean', 'stddev'])
self.kind = 'gaussian'
if isinstance(mean, ureg.Quantity):
self.units = str(mean.units)
assert isinstance(stddev, ureg.Quantity), \
str(type(stddev))
stddev = stddev.to(self.units)
self.mean = mean
self.stddev = stddev
def llh(x):
x = self.__strip(self.__convert(x))
m = self.__strip(self.mean)
s = self.__strip(self.stddev)
return -(x-m)**2 / (2*s**2)
self.llh = llh
self.max_at = self.mean
self.max_at_str = self.__stringify(self.max_at)
self.valid_range = (-np.inf * ureg(self.units),
np.inf * ureg(self.units))
self._str = lambda s: 'gaussian prior: stddev=%s%s, maximum at %s%s' \
%(self.__stringify(self.stddev), self.units_str,
self.__stringify(self.mean), self.units_str)
def __init_linterp(self, param_vals, llh_vals):
if not isinstance(param_vals, ureg.Quantity):
param_vals = param_vals * ureg.dimensionless
self._state_attrs.extend(['param_vals', 'llh_vals'])
self.kind = 'linterp'
if isinstance(param_vals, ureg.Quantity):
self.units = str(param_vals.units)
self.interp = interp1d(param_vals, llh_vals, kind='linear', copy=True,
bounds_error=True, assume_sorted=False)
self.param_vals = param_vals
self.llh_vals = llh_vals
def llh(x):
x = self.__strip(self.__convert(x))
return self.interp(x)
self.llh = llh
self.max_at = self.param_vals[self.llh_vals == np.max(self.llh_vals)]
self.max_at_str = ', '.join([self.__stringify(v) for v in self.max_at])
self.valid_range = (np.min(self.param_vals) * ureg(self.units),
np.max(self.param_vals) * ureg(self.units))
self._str = lambda s: 'linearly-interpolated prior: valid in [%s, %s]%s, maxima at (%s)%s' \
%(self.__stringify(np.min(self.param_vals)),
self.__stringify(np.max(self.param_vals)), self.units_str,
self.max_at_str, self.units_str)
def __init_spline(self, knots, coeffs, deg, units=None):
if not isinstance(knots, ureg.Quantity):
if units is None:
knots = knots * ureg.dimensionless
else:
knots = ureg.Quantity(np.asarray(knots), units)
self._state_attrs.extend(['knots', 'coeffs', 'deg', 'units'])
self.kind = 'spline'
if isinstance(knots, ureg.Quantity):
self.units = str(knots.units)
self.knots = knots
self.coeffs = coeffs
self.deg = deg
def llh(x):
x = self.__strip(self.__convert(x))
return splev(x, tck=(self.__strip(self.knots), coeffs, deg), ext=2)
self.llh = llh
self.max_at = fminbound(
func=self.__attach_units_to_args(self.chi2),
x1=np.min(self.__strip(self.knots)),
x2=np.max(self.__strip(self.knots)),
)
if self.units is not None:
self.max_at = self.max_at * ureg(self.units)
self.max_at_str = self.__stringify(self.max_at)
self.valid_range = (np.min(self.knots) * ureg(self.units),
np.max(self.knots) * ureg(self.units))
self._str = lambda s: 'spline prior: deg=%d, valid in [%s, %s]%s; max at %s%s' \
%(self.deg, self.__stringify(np.min(self.knots)),
self.__stringify(np.max(self.knots)), self.units_str,
self.max_at_str, self.units_str)
def __check_units(self, param_val):
if self.units is None:
if (isinstance(param_val, ureg.Quantity)
and param_val.dimensionality
!= ureg.dimensionless.dimensionality):
raise TypeError('Passed a value with units (%s), but this'
' prior has no units.' %param_val.units)
else:
if not isinstance(param_val, ureg.Quantity):
raise TypeError('Passed a value without units, but this prior'
' has units (%s).' %self.units)
if param_val.dimensionality != ureg(self.units).dimensionality:
raise TypeError('Passed a value with units (%s);'
' incompatible with prior units (%s)'
%(param_val.units, self.units))
def __convert(self, x):
if self.units is None:
if (isinstance(x, ureg.Quantity)
and x.dimensionality != ureg.dimensionless.dimensionality):
raise TypeError('No units on prior, so cannot understand'
' passed value (with units): %s' %x)
return x
if not isinstance(x, ureg.Quantity):
raise TypeError('Units %s must be present on param values (got'
' %s, type %s instead).'
% (self.units, x, type(x)))
return x.to(self.units)
@staticmethod
def __strip(x):
if isinstance(x, ureg.Quantity):
return x.magnitude
return x
def __stringify(self, x):
if self.units is not None:
x = x.to(self.units).magnitude
return format(x, '0.4e')
# TODO: proper function wrapping, including @wraps decorator
def __attach_units_to_args(self, func):
def newfunc(*args):
if self.units is None:
return func(*args)
u = ureg(self.units)
unitized_args = tuple([u*arg for arg in args])
return func(*unitized_args)
return newfunc
def plot_prior(obj, param=None, x_xform=None, ax1=None, ax2=None, **plt_kwargs):
"""Plot prior for param from template settings, params, or prior filename
or dict.
Arguments
---------
obj : str or dict
if str, interpret as path from which to load a dict
if (nested) dict, (innermost) must be dict of prior properties :
either supply `param` to choose which parameter's prior in `obj`
to plot, or prior dict, in which case `param` need not be specified
param
Param name to plot; necessary if obj is either pipeline settings or
params dict
x_xform
Transform to apply to x-values. E.g., to plot against sin^2 theta, use
x_xform = lambda x: np.sin(x)**2
ax1, ax2
Axes onto which to plot LLH and chi-squared, respectively. If none are
provided, new figures & axes will be created.
plt_kwargs
Keyword arguments to pass on to the plot function
Returns
-------
ax1, ax2
The axes onto which plots were drawn (ax1 = LLH, ax2 = chi^2)
"""
import matplotlib as mpl
mpl.use('pdf')
import matplotlib.pyplot as plt
if isinstance(obj, basestring):
obj = from_file(obj)
if param is not None and param in obj:
obj = obj[param]
if 'prior' in obj:
obj = obj['prior']
prior = Prior(**obj)
logging.info('Plotting Prior: %s', prior)
x0 = prior.valid_range[0]
x1 = prior.valid_range[1]
if prior.kind == 'gaussian':
x0 = max(x0, prior.max_at - 5*prior.stddev)
x1 = min(x1, prior.max_at + 5*prior.stddev)
if np.isinf(x0):
x0 = -1
if np.isinf(x1):
x1 = +1
# if prior.units is None, will result in dimensionless quantity
x = ureg.Quantity(np.linspace(x0, x1, 5000), prior.units)
llh = prior.llh(x)
chi2 = prior.chi2(x)
if x_xform is not None:
x = x_xform(x)
if ax1 is None:
f = plt.figure()
ax1 = f.add_subplot(111)
if ax2 is None:
f = plt.figure()
ax2 = f.add_subplot(111)
ax1.plot(x, llh, **plt_kwargs)
ax2.plot(x, chi2, **plt_kwargs)
ax1.set_title(str(prior), fontsize=8, y=1.02)
ax2.set_title(str(prior), fontsize=8, y=1.02)
ax1.set_xlabel(param)
ax2.set_xlabel(param)
ax1.set_ylabel('LLH')
ax2.set_ylabel(r'$\Delta\chi^2$')
return ax1, ax2
def get_prior_bounds(obj, param=None, stddev=1.0):
"""Obtain confidence regions for CL corresponding to given number of
stddevs from parameter prior.
Parameters
----------
obj : string or Mapping
if str, interpret as path from which to load a dict
if dict, can be:
template settings dict; must supply `param` to choose which to plot
params dict; must supply `param` to choose which to plot
prior dict
param : Param
Name of param for which to get bounds;
necessary if obj is either template settings or params
stddev : float or Iterable of floats
number of stddevs
Returns
-------
bounds : OrderedDict
A dictionary mapping the passed `stddev` values to the corresponding
bounds
"""
if isbarenumeric(stddev):
stddev = [stddev]
elif isinstance(stddev, Iterable):
stddev = list(stddev)
bounds = OrderedDict()
for s in stddev:
bounds[s] = []
if isinstance(obj, basestring):
obj = from_file(obj)
if 'params' in obj:
obj = obj['params']
if param is not None and param in obj:
obj = obj[param]
if 'prior' in obj:
obj = obj['prior']
prior = Prior(**obj)
logging.debug('Getting confidence region from prior: %s', prior)
x0 = prior.valid_range[0]
x1 = prior.valid_range[1]
x = ureg.Quantity(np.linspace(x0, x1, 10000), prior.units)
chi2 = prior.chi2(x)
for (i, xval) in enumerate(x[:-1]):
for s in stddev:
chi2_level = s**2
if chi2[i] > chi2_level and chi2[i+1] < chi2_level:
bounds[s].append(xval)
elif chi2[i] < chi2_level and chi2[i+1] > chi2_level:
bounds[s].append(x[i+1])
return bounds
# TODO enumerate all the cases rather than picking just a few.
# pylint: disable=unused-variable
def test_Prior():
"""Unit tests for Prior class"""
uniform = Prior(kind='uniform', llh_offset=1.5)
gaussian = Prior(kind='gaussian', mean=10, stddev=1)
x = np.linspace(-10, 10, 100)
y = x**2
linterp = Prior(kind='linterp', param_vals=x*ureg.meter,
llh_vals=y)
param_vals = np.linspace(-10, 10, 100)
llh_vals = x**2
knots, coeffs, deg = splrep(param_vals, llh_vals)
spline = Prior(kind='spline', knots=knots*ureg.foot, coeffs=coeffs,
deg=deg)
param_upsamp = np.linspace(-10, 10, 1000)*ureg.foot
llh_upsamp = splev(param_upsamp, tck=(knots, coeffs, deg), ext=2)
assert all(spline.llh(param_upsamp) == llh_upsamp)
# Asking for param value outside of range should fail
try:
linterp.llh(-1000*ureg.mile)
except ValueError:
pass
else:
assert False
try:
linterp.chi2(-1000*ureg.km)
except ValueError:
pass
else:
assert False
try:
spline.llh(-1000*ureg.meter)
except ValueError:
pass
else:
assert False
try:
spline.chi2(+1000*ureg.meter)
except ValueError:
pass
else:
assert False
# Asking for param value when units were used should fail
try:
spline.llh(10)
except TypeError:
pass
else:
assert False
# ... or vice versa
try:
gaussian.llh(10*ureg.meter)
except (TypeError, pint.DimensionalityError):
pass
else:
assert False
logging.info('<< PASS : test_Prior >>')
# TODO: FIX ME
def test_Prior_plot(ts_fname, param_name='theta23'):
"""Produce plots roughly like NuFIT's 1D chi-squared projections.
Parameters
----------
ts_fname : string
param_name : string
Returns
-------
ax1, ax2 : Matplotlib.axis
The plot axes are returned for further manipulation
"""
import matplotlib as mpl
mpl.use('pdf')
import matplotlib.pyplot as plt
stddev = [1, 2, 3, 4, 5]
chi2 = [s**2 for s in stddev]
ts = from_file(ts_fname)
f1 = plt.figure(1) #,figsize=(8,14),dpi=60)
f2 = plt.figure(2) #,figsize=(8,14),dpi=60)
f1.clf()
f2.clf()
ax1 = f1.add_subplot(111)
ax2 = f2.add_subplot(111)
# Defaults
x_xform = None
xlabel = param_name
xlim = None
ylim = 0, 15
# Special cases
if param_name == 'theta12':
x_xform = lambda x: np.sin(x)**2
xlabel = r'$\sin^2\theta_{12}$'
xlim = 0.2, 0.42
elif param_name == 'theta23':
x_xform = lambda x: np.sin(x)**2
xlabel = r'$\sin^2\theta_{23}$'
xlim = 0.26, 0.74
elif param_name == 'theta13':
x_xform = lambda x: np.sin(x)**2
xlabel = r'$\sin^2\theta_{13}$'
xlim = 0.012, 0.032
elif param_name == 'deltam21':
x_xform = lambda x: x*1e5
xlabel = r'$\Delta m^2_{21} \; {\rm[10^{-5}\;eV^2]}$'
xlim = 6.5, 8.7
elif param_name == 'deltam31':
x_xform = lambda x: np.abs(x)*1e3
xlabel = r'$|\Delta m^2_{31}| \; {\rm[10^{-3}\;eV^2]}$'
xlim = 2.15, 2.8
elif param_name == 'deltacp':
xlabel = r'$\delta_{\rm CP} \; {\rm [deg]}$'
plot_prior(select_hierarchy(ts['params'], normal_hierarchy=True),
param=param_name,
x_xform=x_xform, ax1=ax1, ax2=ax2,
color='r', label=r'${\rm NH}$')
plot_prior(select_hierarchy(ts['params'], normal_hierarchy=False),
param=param_name,
x_xform=x_xform, ax1=ax1, ax2=ax2,
color='b', linestyle='--', label=r'${\rm IH}$')
ax1.set_ylim([-0.5*y for y in ylim[::-1]])
ax2.set_ylim(ylim)
plt.tight_layout()
for ax in [ax1, ax2]:
ax.legend(loc='best', frameon=False)
ax.set_xlim(xlim)
ax.set_xlabel(xlabel)
ax.grid(which='both', b=True)
ax.set_title('')
for c2 in chi2:
ax2.plot(xlim, [c2, c2], 'k-', lw=1.0, alpha=0.4)
return ax1, ax2
if __name__ == '__main__':
set_verbosity(1)
test_Prior()
|
[
"numpy.abs",
"matplotlib.pyplot.figure",
"numpy.sin",
"pisa.utils.log.logging.info",
"scipy.interpolate.interp1d",
"matplotlib.pyplot.tight_layout",
"pisa.utils.log.logging.debug",
"pisa.utils.fileio.from_file",
"numpy.max",
"numpy.linspace",
"scipy.interpolate.splrep",
"pisa.utils.log.set_verbosity",
"numpy.asarray",
"numpy.isinf",
"pisa.utils.comparisons.isbarenumeric",
"numpy.min",
"matplotlib.use",
"collections.OrderedDict",
"pisa.ureg",
"numpy.log",
"pisa.utils.comparisons.recursiveEquality",
"scipy.interpolate.splev"
] |
[((15175, 15189), 'matplotlib.use', 'mpl.use', (['"""pdf"""'], {}), "('pdf')\n", (15182, 15189), True, 'import matplotlib as mpl\n'), ((15439, 15480), 'pisa.utils.log.logging.info', 'logging.info', (['"""Plotting Prior: %s"""', 'prior'], {}), "('Plotting Prior: %s', prior)\n", (15451, 15480), False, 'from pisa.utils.log import logging, set_verbosity\n'), ((15685, 15697), 'numpy.isinf', 'np.isinf', (['x0'], {}), '(x0)\n', (15693, 15697), True, 'import numpy as np\n'), ((15722, 15734), 'numpy.isinf', 'np.isinf', (['x1'], {}), '(x1)\n', (15730, 15734), True, 'import numpy as np\n'), ((17282, 17303), 'pisa.utils.comparisons.isbarenumeric', 'isbarenumeric', (['stddev'], {}), '(stddev)\n', (17295, 17303), False, 'from pisa.utils.comparisons import isbarenumeric, recursiveEquality\n'), ((17414, 17427), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17425, 17427), False, 'from collections import Iterable, OrderedDict\n'), ((17740, 17804), 'pisa.utils.log.logging.debug', 'logging.debug', (['"""Getting confidence region from prior: %s"""', 'prior'], {}), "('Getting confidence region from prior: %s', prior)\n", (17753, 17804), False, 'from pisa.utils.log import logging, set_verbosity\n'), ((18548, 18573), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(100)'], {}), '(-10, 10, 100)\n', (18559, 18573), True, 'import numpy as np\n'), ((18697, 18722), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(100)'], {}), '(-10, 10, 100)\n', (18708, 18722), True, 'import numpy as np\n'), ((18768, 18796), 'scipy.interpolate.splrep', 'splrep', (['param_vals', 'llh_vals'], {}), '(param_vals, llh_vals)\n', (18774, 18796), False, 'from scipy.interpolate import splev, splrep, interp1d\n'), ((18970, 19022), 'scipy.interpolate.splev', 'splev', (['param_upsamp'], {'tck': '(knots, coeffs, deg)', 'ext': '(2)'}), '(param_upsamp, tck=(knots, coeffs, deg), ext=2)\n', (18975, 19022), False, 'from scipy.interpolate import splev, splrep, interp1d\n'), ((19922, 19961), 'pisa.utils.log.logging.info', 'logging.info', (['"""<< PASS : test_Prior >>"""'], {}), "('<< PASS : test_Prior >>')\n", (19934, 19961), False, 'from pisa.utils.log import logging, set_verbosity\n'), ((20337, 20351), 'matplotlib.use', 'mpl.use', (['"""pdf"""'], {}), "('pdf')\n", (20344, 20351), True, 'import matplotlib as mpl\n'), ((20461, 20480), 'pisa.utils.fileio.from_file', 'from_file', (['ts_fname'], {}), '(ts_fname)\n', (20470, 20480), False, 'from pisa.utils.fileio import from_file\n'), ((20490, 20503), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (20500, 20503), True, 'import matplotlib.pyplot as plt\n'), ((20538, 20551), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (20548, 20551), True, 'import matplotlib.pyplot as plt\n'), ((22098, 22116), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (22114, 22116), True, 'import matplotlib.pyplot as plt\n'), ((22441, 22457), 'pisa.utils.log.set_verbosity', 'set_verbosity', (['(1)'], {}), '(1)\n', (22454, 22457), False, 'from pisa.utils.log import logging, set_verbosity\n'), ((6028, 6070), 'pisa.utils.comparisons.recursiveEquality', 'recursiveEquality', (['self.state', 'other.state'], {}), '(self.state, other.state)\n', (6045, 6070), False, 'from pisa.utils.comparisons import isbarenumeric, recursiveEquality\n'), ((6191, 6204), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6202, 6204), False, 'from collections import Iterable, OrderedDict\n'), ((9556, 9656), 'scipy.interpolate.interp1d', 'interp1d', (['param_vals', 'llh_vals'], {'kind': '"""linear"""', 'copy': '(True)', 'bounds_error': '(True)', 'assume_sorted': '(False)'}), "(param_vals, llh_vals, kind='linear', copy=True, bounds_error=True,\n assume_sorted=False)\n", (9564, 9656), False, 'from scipy.interpolate import splev, splrep, interp1d\n'), ((15276, 15290), 'pisa.utils.fileio.from_file', 'from_file', (['obj'], {}), '(obj)\n', (15285, 15290), False, 'from pisa.utils.fileio import from_file\n'), ((15842, 15867), 'numpy.linspace', 'np.linspace', (['x0', 'x1', '(5000)'], {}), '(x0, x1, 5000)\n', (15853, 15867), True, 'import numpy as np\n'), ((16016, 16028), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (16026, 16028), True, 'import matplotlib.pyplot as plt\n'), ((16094, 16106), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (16104, 16106), True, 'import matplotlib.pyplot as plt\n'), ((17523, 17537), 'pisa.utils.fileio.from_file', 'from_file', (['obj'], {}), '(obj)\n', (17532, 17537), False, 'from pisa.utils.fileio import from_file\n'), ((17887, 17913), 'numpy.linspace', 'np.linspace', (['x0', 'x1', '(10000)'], {}), '(x0, x1, 10000)\n', (17898, 17913), True, 'import numpy as np\n'), ((18916, 18942), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(1000)'], {}), '(-10, 10, 1000)\n', (18927, 18942), True, 'import numpy as np\n'), ((13915, 13931), 'pisa.ureg', 'ureg', (['self.units'], {}), '(self.units)\n', (13919, 13931), False, 'from pisa import ureg\n'), ((6685, 6701), 'pisa.ureg', 'ureg', (['self.units'], {}), '(self.units)\n', (6689, 6701), False, 'from pisa import ureg\n'), ((6740, 6756), 'pisa.ureg', 'ureg', (['self.units'], {}), '(self.units)\n', (6744, 6756), False, 'from pisa import ureg\n'), ((7774, 7790), 'pisa.ureg', 'ureg', (['self.units'], {}), '(self.units)\n', (7778, 7790), False, 'from pisa import ureg\n'), ((7829, 7845), 'pisa.ureg', 'ureg', (['self.units'], {}), '(self.units)\n', (7833, 7845), False, 'from pisa import ureg\n'), ((8902, 8918), 'pisa.ureg', 'ureg', (['self.units'], {}), '(self.units)\n', (8906, 8918), False, 'from pisa import ureg\n'), ((8957, 8973), 'pisa.ureg', 'ureg', (['self.units'], {}), '(self.units)\n', (8961, 8973), False, 'from pisa import ureg\n'), ((9934, 9955), 'numpy.max', 'np.max', (['self.llh_vals'], {}), '(self.llh_vals)\n', (9940, 9955), True, 'import numpy as np\n'), ((10065, 10088), 'numpy.min', 'np.min', (['self.param_vals'], {}), '(self.param_vals)\n', (10071, 10088), True, 'import numpy as np\n'), ((10091, 10107), 'pisa.ureg', 'ureg', (['self.units'], {}), '(self.units)\n', (10095, 10107), False, 'from pisa import ureg\n'), ((10137, 10160), 'numpy.max', 'np.max', (['self.param_vals'], {}), '(self.param_vals)\n', (10143, 10160), True, 'import numpy as np\n'), ((10163, 10179), 'pisa.ureg', 'ureg', (['self.units'], {}), '(self.units)\n', (10167, 10179), False, 'from pisa import ureg\n'), ((11454, 11470), 'pisa.ureg', 'ureg', (['self.units'], {}), '(self.units)\n', (11458, 11470), False, 'from pisa import ureg\n'), ((11555, 11573), 'numpy.min', 'np.min', (['self.knots'], {}), '(self.knots)\n', (11561, 11573), True, 'import numpy as np\n'), ((11576, 11592), 'pisa.ureg', 'ureg', (['self.units'], {}), '(self.units)\n', (11580, 11592), False, 'from pisa import ureg\n'), ((11622, 11640), 'numpy.max', 'np.max', (['self.knots'], {}), '(self.knots)\n', (11628, 11640), True, 'import numpy as np\n'), ((11643, 11659), 'pisa.ureg', 'ureg', (['self.units'], {}), '(self.units)\n', (11647, 11659), False, 'from pisa import ureg\n'), ((20836, 20845), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (20842, 20845), True, 'import numpy as np\n'), ((7589, 7598), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (7595, 7598), True, 'import numpy as np\n'), ((10719, 10736), 'numpy.asarray', 'np.asarray', (['knots'], {}), '(knots)\n', (10729, 10736), True, 'import numpy as np\n'), ((12577, 12593), 'pisa.ureg', 'ureg', (['self.units'], {}), '(self.units)\n', (12581, 12593), False, 'from pisa import ureg\n'), ((20976, 20985), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (20982, 20985), True, 'import numpy as np\n'), ((7608, 7617), 'numpy.log', 'np.log', (['B'], {}), '(B)\n', (7614, 7617), True, 'import numpy as np\n'), ((7618, 7627), 'numpy.log', 'np.log', (['A'], {}), '(A)\n', (7624, 7627), True, 'import numpy as np\n'), ((10317, 10340), 'numpy.min', 'np.min', (['self.param_vals'], {}), '(self.param_vals)\n', (10323, 10340), True, 'import numpy as np\n'), ((10378, 10401), 'numpy.max', 'np.max', (['self.param_vals'], {}), '(self.param_vals)\n', (10384, 10401), True, 'import numpy as np\n'), ((11795, 11813), 'numpy.min', 'np.min', (['self.knots'], {}), '(self.knots)\n', (11801, 11813), True, 'import numpy as np\n'), ((11851, 11869), 'numpy.max', 'np.max', (['self.knots'], {}), '(self.knots)\n', (11857, 11869), True, 'import numpy as np\n'), ((21117, 21126), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (21123, 21126), True, 'import numpy as np\n'), ((5721, 5737), 'pisa.ureg', 'ureg', (['self.units'], {}), '(self.units)\n', (5725, 5737), False, 'from pisa import ureg\n'), ((21416, 21425), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (21422, 21425), True, 'import numpy as np\n')]
|
import sys
import unittest
import numpy as np
import crocoddyl
import pinocchio
from crocoddyl.utils import (CoMPositionCostDerived, ControlCostDerived, FramePlacementCostDerived,
FrameTranslationCostDerived, FrameVelocityCostDerived, StateCostDerived)
class CostModelAbstractTestCase(unittest.TestCase):
ROBOT_MODEL = None
ROBOT_STATE = None
COST = None
COST_DER = None
def setUp(self):
self.robot_data = self.ROBOT_MODEL.createData()
self.x = self.ROBOT_STATE.rand()
self.u = pinocchio.utils.rand(self.ROBOT_MODEL.nv)
self.data = self.COST.createData(self.robot_data)
self.data_der = self.COST_DER.createData(self.robot_data)
nq, nv = self.ROBOT_MODEL.nq, self.ROBOT_MODEL.nv
pinocchio.forwardKinematics(self.ROBOT_MODEL, self.robot_data, self.x[:nq], self.x[nq:])
pinocchio.computeForwardKinematicsDerivatives(self.ROBOT_MODEL, self.robot_data, self.x[:nq], self.x[nq:],
pinocchio.utils.zero(nv))
pinocchio.computeJointJacobians(self.ROBOT_MODEL, self.robot_data, self.x[:nq])
pinocchio.updateFramePlacements(self.ROBOT_MODEL, self.robot_data)
pinocchio.jacobianCenterOfMass(self.ROBOT_MODEL, self.robot_data, self.x[:nq], False)
def test_dimensions(self):
self.assertEqual(self.COST.state.nx, self.COST_DER.state.nx, "Wrong nx.")
self.assertEqual(self.COST.state.ndx, self.COST_DER.state.ndx, "Wrong ndx.")
self.assertEqual(self.COST.nu, self.COST_DER.nu, "Wrong nu.")
self.assertEqual(self.COST.state.nq, self.COST_DER.state.nq, "Wrong nq.")
self.assertEqual(self.COST.state.nv, self.COST_DER.state.nv, "Wrong nv.")
self.assertEqual(self.COST.activation.nr, self.COST_DER.activation.nr, "Wrong nr.")
def test_calc(self):
# Run calc for both action models
self.COST.calc(self.data, self.x, self.u)
self.COST_DER.calc(self.data_der, self.x, self.u)
# Checking the cost value and its residual
self.assertAlmostEqual(self.data.cost, self.data_der.cost, 10, "Wrong cost value.")
self.assertTrue(np.allclose(self.data.r, self.data_der.r, atol=1e-9), "Wrong cost residuals.")
def test_calcDiff(self):
# Run calc for both action models
self.COST.calcDiff(self.data, self.x, self.u)
self.COST_DER.calcDiff(self.data_der, self.x, self.u)
# Checking the cost value and its residual
self.assertAlmostEqual(self.data.cost, self.data_der.cost, 10, "Wrong cost value.")
self.assertTrue(np.allclose(self.data.r, self.data_der.r, atol=1e-9), "Wrong cost residuals.")
# Checking the Jacobians and Hessians of the cost
self.assertTrue(np.allclose(self.data.Lx, self.data_der.Lx, atol=1e-9), "Wrong Lx.")
self.assertTrue(np.allclose(self.data.Lu, self.data_der.Lu, atol=1e-9), "Wrong Lu.")
self.assertTrue(np.allclose(self.data.Lxx, self.data_der.Lxx, atol=1e-9), "Wrong Lxx.")
self.assertTrue(np.allclose(self.data.Lxu, self.data_der.Lxu, atol=1e-9), "Wrong Lxu.")
self.assertTrue(np.allclose(self.data.Luu, self.data_der.Luu, atol=1e-9), "Wrong Luu.")
class CostModelSumTestCase(unittest.TestCase):
ROBOT_MODEL = None
ROBOT_STATE = None
COST = None
def setUp(self):
self.robot_data = self.ROBOT_MODEL.createData()
self.x = self.ROBOT_STATE.rand()
self.u = pinocchio.utils.rand(self.ROBOT_MODEL.nv)
self.cost_sum = crocoddyl.CostModelSum(self.ROBOT_STATE)
self.cost_sum.addCost('myCost', self.COST, 1.)
self.data = self.COST.createData(self.robot_data)
self.data_sum = self.cost_sum.createData(self.robot_data)
nq, nv = self.ROBOT_MODEL.nq, self.ROBOT_MODEL.nv
pinocchio.forwardKinematics(self.ROBOT_MODEL, self.robot_data, self.x[:nq], self.x[nq:])
pinocchio.computeForwardKinematicsDerivatives(self.ROBOT_MODEL, self.robot_data, self.x[:nq], self.x[nq:],
pinocchio.utils.zero(nv))
pinocchio.computeJointJacobians(self.ROBOT_MODEL, self.robot_data, self.x[:nq])
pinocchio.updateFramePlacements(self.ROBOT_MODEL, self.robot_data)
pinocchio.jacobianCenterOfMass(self.ROBOT_MODEL, self.robot_data, self.x[:nq], False)
def test_dimensions(self):
self.assertEqual(self.COST.state.nx, self.cost_sum.state.nx, "Wrong nx.")
self.assertEqual(self.COST.state.ndx, self.cost_sum.state.ndx, "Wrong ndx.")
self.assertEqual(self.COST.nu, self.cost_sum.nu, "Wrong nu.")
self.assertEqual(self.COST.state.nq, self.cost_sum.state.nq, "Wrong nq.")
self.assertEqual(self.COST.state.nv, self.cost_sum.state.nv, "Wrong nv.")
self.assertEqual(self.COST.activation.nr, self.cost_sum.nr, "Wrong nr.")
def test_calc(self):
# Run calc for both action models
self.COST.calc(self.data, self.x, self.u)
self.cost_sum.calc(self.data_sum, self.x, self.u)
# Checking the cost value and its residual
self.assertAlmostEqual(self.data.cost, self.data_sum.cost, 10, "Wrong cost value.")
self.assertTrue(np.allclose(self.data.r, self.data_sum.r, atol=1e-9), "Wrong cost residuals.")
def test_calcDiff(self):
# Run calc for both action models
self.COST.calcDiff(self.data, self.x, self.u)
self.cost_sum.calcDiff(self.data_sum, self.x, self.u)
# Checking the cost value and its residual
self.assertAlmostEqual(self.data.cost, self.data_sum.cost, 10, "Wrong cost value.")
self.assertTrue(np.allclose(self.data.r, self.data_sum.r, atol=1e-9), "Wrong cost residuals.")
# Checking the Jacobians and Hessians of the cost
self.assertTrue(np.allclose(self.data.Lx, self.data_sum.Lx, atol=1e-9), "Wrong Lx.")
self.assertTrue(np.allclose(self.data.Lu, self.data_sum.Lu, atol=1e-9), "Wrong Lu.")
self.assertTrue(np.allclose(self.data.Lxx, self.data_sum.Lxx, atol=1e-9), "Wrong Lxx.")
self.assertTrue(np.allclose(self.data.Lxu, self.data_sum.Lxu, atol=1e-9), "Wrong Lxu.")
self.assertTrue(np.allclose(self.data.Luu, self.data_sum.Luu, atol=1e-9), "Wrong Luu.")
def test_removeCost(self):
self.cost_sum.removeCost("myCost")
self.assertEqual(len(self.cost_sum.costs), 0, "The number of cost items should be zero")
class StateCostTest(CostModelAbstractTestCase):
ROBOT_MODEL = pinocchio.buildSampleModelHumanoidRandom()
ROBOT_STATE = crocoddyl.StateMultibody(ROBOT_MODEL)
COST = crocoddyl.CostModelState(ROBOT_STATE)
COST_DER = StateCostDerived(ROBOT_STATE)
class StateCostSumTest(CostModelSumTestCase):
ROBOT_MODEL = pinocchio.buildSampleModelHumanoidRandom()
ROBOT_STATE = crocoddyl.StateMultibody(ROBOT_MODEL)
COST = crocoddyl.CostModelState(ROBOT_STATE)
class ControlCostTest(CostModelAbstractTestCase):
ROBOT_MODEL = pinocchio.buildSampleModelHumanoidRandom()
ROBOT_STATE = crocoddyl.StateMultibody(ROBOT_MODEL)
COST = crocoddyl.CostModelControl(ROBOT_STATE)
COST_DER = ControlCostDerived(ROBOT_STATE)
class ControlCostSumTest(CostModelSumTestCase):
ROBOT_MODEL = pinocchio.buildSampleModelHumanoidRandom()
ROBOT_STATE = crocoddyl.StateMultibody(ROBOT_MODEL)
COST = crocoddyl.CostModelControl(ROBOT_STATE)
class CoMPositionCostTest(CostModelAbstractTestCase):
ROBOT_MODEL = pinocchio.buildSampleModelHumanoidRandom()
ROBOT_STATE = crocoddyl.StateMultibody(ROBOT_MODEL)
cref = pinocchio.utils.rand(3)
COST = crocoddyl.CostModelCoMPosition(ROBOT_STATE, cref)
COST_DER = CoMPositionCostDerived(ROBOT_STATE, cref=cref)
class CoMPositionCostSumTest(CostModelSumTestCase):
ROBOT_MODEL = pinocchio.buildSampleModelHumanoidRandom()
ROBOT_STATE = crocoddyl.StateMultibody(ROBOT_MODEL)
cref = pinocchio.utils.rand(3)
COST = crocoddyl.CostModelCoMPosition(ROBOT_STATE, cref)
class FramePlacementCostTest(CostModelAbstractTestCase):
ROBOT_MODEL = pinocchio.buildSampleModelHumanoidRandom()
ROBOT_STATE = crocoddyl.StateMultibody(ROBOT_MODEL)
Mref = crocoddyl.FramePlacement(ROBOT_MODEL.getFrameId('rleg5_joint'), pinocchio.SE3.Random())
COST = crocoddyl.CostModelFramePlacement(ROBOT_STATE, Mref)
COST_DER = FramePlacementCostDerived(ROBOT_STATE, Mref=Mref)
class FramePlacementCostSumTest(CostModelSumTestCase):
ROBOT_MODEL = pinocchio.buildSampleModelHumanoidRandom()
ROBOT_STATE = crocoddyl.StateMultibody(ROBOT_MODEL)
Mref = crocoddyl.FramePlacement(ROBOT_MODEL.getFrameId('rleg5_joint'), pinocchio.SE3.Random())
COST = crocoddyl.CostModelFramePlacement(ROBOT_STATE, Mref)
class FrameTranslationCostTest(CostModelAbstractTestCase):
ROBOT_MODEL = pinocchio.buildSampleModelHumanoidRandom()
ROBOT_STATE = crocoddyl.StateMultibody(ROBOT_MODEL)
xref = crocoddyl.FrameTranslation(ROBOT_MODEL.getFrameId('rleg5_joint'), pinocchio.utils.rand(3))
COST = crocoddyl.CostModelFrameTranslation(ROBOT_STATE, xref)
COST_DER = FrameTranslationCostDerived(ROBOT_STATE, xref=xref)
class FrameTranslationCostSumTest(CostModelSumTestCase):
ROBOT_MODEL = pinocchio.buildSampleModelHumanoidRandom()
ROBOT_STATE = crocoddyl.StateMultibody(ROBOT_MODEL)
xref = crocoddyl.FrameTranslation(ROBOT_MODEL.getFrameId('rleg5_joint'), pinocchio.utils.rand(3))
COST = crocoddyl.CostModelFrameTranslation(ROBOT_STATE, xref)
class FrameVelocityCostTest(CostModelAbstractTestCase):
ROBOT_MODEL = pinocchio.buildSampleModelHumanoidRandom()
ROBOT_STATE = crocoddyl.StateMultibody(ROBOT_MODEL)
vref = crocoddyl.FrameMotion(ROBOT_MODEL.getFrameId('rleg5_joint'), pinocchio.Motion.Random())
COST = crocoddyl.CostModelFrameVelocity(ROBOT_STATE, vref)
COST_DER = FrameVelocityCostDerived(ROBOT_STATE, vref=vref)
class FrameVelocityCostSumTest(CostModelSumTestCase):
ROBOT_MODEL = pinocchio.buildSampleModelHumanoidRandom()
ROBOT_STATE = crocoddyl.StateMultibody(ROBOT_MODEL)
vref = crocoddyl.FrameMotion(ROBOT_MODEL.getFrameId('rleg5_joint'), pinocchio.Motion.Random())
COST = crocoddyl.CostModelFrameVelocity(ROBOT_STATE, vref)
if __name__ == '__main__':
test_classes_to_run = [
StateCostTest, StateCostSumTest, ControlCostTest, ControlCostSumTest, CoMPositionCostTest,
CoMPositionCostSumTest, FramePlacementCostTest, FramePlacementCostSumTest, FrameTranslationCostTest,
FrameTranslationCostSumTest, FrameVelocityCostTest, FrameVelocityCostSumTest
]
loader = unittest.TestLoader()
suites_list = []
for test_class in test_classes_to_run:
suite = loader.loadTestsFromTestCase(test_class)
suites_list.append(suite)
big_suite = unittest.TestSuite(suites_list)
runner = unittest.TextTestRunner()
results = runner.run(big_suite)
sys.exit(not results.wasSuccessful())
|
[
"numpy.allclose",
"pinocchio.forwardKinematics",
"unittest.TestLoader",
"pinocchio.jacobianCenterOfMass",
"crocoddyl.CostModelCoMPosition",
"crocoddyl.utils.FrameVelocityCostDerived",
"pinocchio.SE3.Random",
"crocoddyl.utils.FrameTranslationCostDerived",
"crocoddyl.CostModelState",
"pinocchio.computeJointJacobians",
"crocoddyl.CostModelSum",
"pinocchio.buildSampleModelHumanoidRandom",
"pinocchio.Motion.Random",
"crocoddyl.utils.FramePlacementCostDerived",
"crocoddyl.StateMultibody",
"crocoddyl.utils.StateCostDerived",
"crocoddyl.utils.ControlCostDerived",
"crocoddyl.utils.CoMPositionCostDerived",
"unittest.TestSuite",
"crocoddyl.CostModelFramePlacement",
"pinocchio.updateFramePlacements",
"unittest.TextTestRunner",
"crocoddyl.CostModelFrameTranslation",
"crocoddyl.CostModelControl",
"pinocchio.utils.zero",
"crocoddyl.CostModelFrameVelocity",
"pinocchio.utils.rand"
] |
[((6529, 6571), 'pinocchio.buildSampleModelHumanoidRandom', 'pinocchio.buildSampleModelHumanoidRandom', ([], {}), '()\n', (6569, 6571), False, 'import pinocchio\n'), ((6590, 6627), 'crocoddyl.StateMultibody', 'crocoddyl.StateMultibody', (['ROBOT_MODEL'], {}), '(ROBOT_MODEL)\n', (6614, 6627), False, 'import crocoddyl\n'), ((6640, 6677), 'crocoddyl.CostModelState', 'crocoddyl.CostModelState', (['ROBOT_STATE'], {}), '(ROBOT_STATE)\n', (6664, 6677), False, 'import crocoddyl\n'), ((6693, 6722), 'crocoddyl.utils.StateCostDerived', 'StateCostDerived', (['ROBOT_STATE'], {}), '(ROBOT_STATE)\n', (6709, 6722), False, 'from crocoddyl.utils import CoMPositionCostDerived, ControlCostDerived, FramePlacementCostDerived, FrameTranslationCostDerived, FrameVelocityCostDerived, StateCostDerived\n'), ((6789, 6831), 'pinocchio.buildSampleModelHumanoidRandom', 'pinocchio.buildSampleModelHumanoidRandom', ([], {}), '()\n', (6829, 6831), False, 'import pinocchio\n'), ((6850, 6887), 'crocoddyl.StateMultibody', 'crocoddyl.StateMultibody', (['ROBOT_MODEL'], {}), '(ROBOT_MODEL)\n', (6874, 6887), False, 'import crocoddyl\n'), ((6900, 6937), 'crocoddyl.CostModelState', 'crocoddyl.CostModelState', (['ROBOT_STATE'], {}), '(ROBOT_STATE)\n', (6924, 6937), False, 'import crocoddyl\n'), ((7008, 7050), 'pinocchio.buildSampleModelHumanoidRandom', 'pinocchio.buildSampleModelHumanoidRandom', ([], {}), '()\n', (7048, 7050), False, 'import pinocchio\n'), ((7069, 7106), 'crocoddyl.StateMultibody', 'crocoddyl.StateMultibody', (['ROBOT_MODEL'], {}), '(ROBOT_MODEL)\n', (7093, 7106), False, 'import crocoddyl\n'), ((7119, 7158), 'crocoddyl.CostModelControl', 'crocoddyl.CostModelControl', (['ROBOT_STATE'], {}), '(ROBOT_STATE)\n', (7145, 7158), False, 'import crocoddyl\n'), ((7174, 7205), 'crocoddyl.utils.ControlCostDerived', 'ControlCostDerived', (['ROBOT_STATE'], {}), '(ROBOT_STATE)\n', (7192, 7205), False, 'from crocoddyl.utils import CoMPositionCostDerived, ControlCostDerived, FramePlacementCostDerived, FrameTranslationCostDerived, FrameVelocityCostDerived, StateCostDerived\n'), ((7274, 7316), 'pinocchio.buildSampleModelHumanoidRandom', 'pinocchio.buildSampleModelHumanoidRandom', ([], {}), '()\n', (7314, 7316), False, 'import pinocchio\n'), ((7335, 7372), 'crocoddyl.StateMultibody', 'crocoddyl.StateMultibody', (['ROBOT_MODEL'], {}), '(ROBOT_MODEL)\n', (7359, 7372), False, 'import crocoddyl\n'), ((7385, 7424), 'crocoddyl.CostModelControl', 'crocoddyl.CostModelControl', (['ROBOT_STATE'], {}), '(ROBOT_STATE)\n', (7411, 7424), False, 'import crocoddyl\n'), ((7499, 7541), 'pinocchio.buildSampleModelHumanoidRandom', 'pinocchio.buildSampleModelHumanoidRandom', ([], {}), '()\n', (7539, 7541), False, 'import pinocchio\n'), ((7560, 7597), 'crocoddyl.StateMultibody', 'crocoddyl.StateMultibody', (['ROBOT_MODEL'], {}), '(ROBOT_MODEL)\n', (7584, 7597), False, 'import crocoddyl\n'), ((7610, 7633), 'pinocchio.utils.rand', 'pinocchio.utils.rand', (['(3)'], {}), '(3)\n', (7630, 7633), False, 'import pinocchio\n'), ((7645, 7694), 'crocoddyl.CostModelCoMPosition', 'crocoddyl.CostModelCoMPosition', (['ROBOT_STATE', 'cref'], {}), '(ROBOT_STATE, cref)\n', (7675, 7694), False, 'import crocoddyl\n'), ((7710, 7756), 'crocoddyl.utils.CoMPositionCostDerived', 'CoMPositionCostDerived', (['ROBOT_STATE'], {'cref': 'cref'}), '(ROBOT_STATE, cref=cref)\n', (7732, 7756), False, 'from crocoddyl.utils import CoMPositionCostDerived, ControlCostDerived, FramePlacementCostDerived, FrameTranslationCostDerived, FrameVelocityCostDerived, StateCostDerived\n'), ((7829, 7871), 'pinocchio.buildSampleModelHumanoidRandom', 'pinocchio.buildSampleModelHumanoidRandom', ([], {}), '()\n', (7869, 7871), False, 'import pinocchio\n'), ((7890, 7927), 'crocoddyl.StateMultibody', 'crocoddyl.StateMultibody', (['ROBOT_MODEL'], {}), '(ROBOT_MODEL)\n', (7914, 7927), False, 'import crocoddyl\n'), ((7940, 7963), 'pinocchio.utils.rand', 'pinocchio.utils.rand', (['(3)'], {}), '(3)\n', (7960, 7963), False, 'import pinocchio\n'), ((7975, 8024), 'crocoddyl.CostModelCoMPosition', 'crocoddyl.CostModelCoMPosition', (['ROBOT_STATE', 'cref'], {}), '(ROBOT_STATE, cref)\n', (8005, 8024), False, 'import crocoddyl\n'), ((8102, 8144), 'pinocchio.buildSampleModelHumanoidRandom', 'pinocchio.buildSampleModelHumanoidRandom', ([], {}), '()\n', (8142, 8144), False, 'import pinocchio\n'), ((8163, 8200), 'crocoddyl.StateMultibody', 'crocoddyl.StateMultibody', (['ROBOT_MODEL'], {}), '(ROBOT_MODEL)\n', (8187, 8200), False, 'import crocoddyl\n'), ((8312, 8364), 'crocoddyl.CostModelFramePlacement', 'crocoddyl.CostModelFramePlacement', (['ROBOT_STATE', 'Mref'], {}), '(ROBOT_STATE, Mref)\n', (8345, 8364), False, 'import crocoddyl\n'), ((8380, 8429), 'crocoddyl.utils.FramePlacementCostDerived', 'FramePlacementCostDerived', (['ROBOT_STATE'], {'Mref': 'Mref'}), '(ROBOT_STATE, Mref=Mref)\n', (8405, 8429), False, 'from crocoddyl.utils import CoMPositionCostDerived, ControlCostDerived, FramePlacementCostDerived, FrameTranslationCostDerived, FrameVelocityCostDerived, StateCostDerived\n'), ((8505, 8547), 'pinocchio.buildSampleModelHumanoidRandom', 'pinocchio.buildSampleModelHumanoidRandom', ([], {}), '()\n', (8545, 8547), False, 'import pinocchio\n'), ((8566, 8603), 'crocoddyl.StateMultibody', 'crocoddyl.StateMultibody', (['ROBOT_MODEL'], {}), '(ROBOT_MODEL)\n', (8590, 8603), False, 'import crocoddyl\n'), ((8715, 8767), 'crocoddyl.CostModelFramePlacement', 'crocoddyl.CostModelFramePlacement', (['ROBOT_STATE', 'Mref'], {}), '(ROBOT_STATE, Mref)\n', (8748, 8767), False, 'import crocoddyl\n'), ((8847, 8889), 'pinocchio.buildSampleModelHumanoidRandom', 'pinocchio.buildSampleModelHumanoidRandom', ([], {}), '()\n', (8887, 8889), False, 'import pinocchio\n'), ((8908, 8945), 'crocoddyl.StateMultibody', 'crocoddyl.StateMultibody', (['ROBOT_MODEL'], {}), '(ROBOT_MODEL)\n', (8932, 8945), False, 'import crocoddyl\n'), ((9060, 9114), 'crocoddyl.CostModelFrameTranslation', 'crocoddyl.CostModelFrameTranslation', (['ROBOT_STATE', 'xref'], {}), '(ROBOT_STATE, xref)\n', (9095, 9114), False, 'import crocoddyl\n'), ((9130, 9181), 'crocoddyl.utils.FrameTranslationCostDerived', 'FrameTranslationCostDerived', (['ROBOT_STATE'], {'xref': 'xref'}), '(ROBOT_STATE, xref=xref)\n', (9157, 9181), False, 'from crocoddyl.utils import CoMPositionCostDerived, ControlCostDerived, FramePlacementCostDerived, FrameTranslationCostDerived, FrameVelocityCostDerived, StateCostDerived\n'), ((9259, 9301), 'pinocchio.buildSampleModelHumanoidRandom', 'pinocchio.buildSampleModelHumanoidRandom', ([], {}), '()\n', (9299, 9301), False, 'import pinocchio\n'), ((9320, 9357), 'crocoddyl.StateMultibody', 'crocoddyl.StateMultibody', (['ROBOT_MODEL'], {}), '(ROBOT_MODEL)\n', (9344, 9357), False, 'import crocoddyl\n'), ((9472, 9526), 'crocoddyl.CostModelFrameTranslation', 'crocoddyl.CostModelFrameTranslation', (['ROBOT_STATE', 'xref'], {}), '(ROBOT_STATE, xref)\n', (9507, 9526), False, 'import crocoddyl\n'), ((9603, 9645), 'pinocchio.buildSampleModelHumanoidRandom', 'pinocchio.buildSampleModelHumanoidRandom', ([], {}), '()\n', (9643, 9645), False, 'import pinocchio\n'), ((9664, 9701), 'crocoddyl.StateMultibody', 'crocoddyl.StateMultibody', (['ROBOT_MODEL'], {}), '(ROBOT_MODEL)\n', (9688, 9701), False, 'import crocoddyl\n'), ((9813, 9864), 'crocoddyl.CostModelFrameVelocity', 'crocoddyl.CostModelFrameVelocity', (['ROBOT_STATE', 'vref'], {}), '(ROBOT_STATE, vref)\n', (9845, 9864), False, 'import crocoddyl\n'), ((9880, 9928), 'crocoddyl.utils.FrameVelocityCostDerived', 'FrameVelocityCostDerived', (['ROBOT_STATE'], {'vref': 'vref'}), '(ROBOT_STATE, vref=vref)\n', (9904, 9928), False, 'from crocoddyl.utils import CoMPositionCostDerived, ControlCostDerived, FramePlacementCostDerived, FrameTranslationCostDerived, FrameVelocityCostDerived, StateCostDerived\n'), ((10003, 10045), 'pinocchio.buildSampleModelHumanoidRandom', 'pinocchio.buildSampleModelHumanoidRandom', ([], {}), '()\n', (10043, 10045), False, 'import pinocchio\n'), ((10064, 10101), 'crocoddyl.StateMultibody', 'crocoddyl.StateMultibody', (['ROBOT_MODEL'], {}), '(ROBOT_MODEL)\n', (10088, 10101), False, 'import crocoddyl\n'), ((10213, 10264), 'crocoddyl.CostModelFrameVelocity', 'crocoddyl.CostModelFrameVelocity', (['ROBOT_STATE', 'vref'], {}), '(ROBOT_STATE, vref)\n', (10245, 10264), False, 'import crocoddyl\n'), ((10634, 10655), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (10653, 10655), False, 'import unittest\n'), ((10827, 10858), 'unittest.TestSuite', 'unittest.TestSuite', (['suites_list'], {}), '(suites_list)\n', (10845, 10858), False, 'import unittest\n'), ((10872, 10897), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (10895, 10897), False, 'import unittest\n'), ((556, 597), 'pinocchio.utils.rand', 'pinocchio.utils.rand', (['self.ROBOT_MODEL.nv'], {}), '(self.ROBOT_MODEL.nv)\n', (576, 597), False, 'import pinocchio\n'), ((790, 882), 'pinocchio.forwardKinematics', 'pinocchio.forwardKinematics', (['self.ROBOT_MODEL', 'self.robot_data', 'self.x[:nq]', 'self.x[nq:]'], {}), '(self.ROBOT_MODEL, self.robot_data, self.x[:nq],\n self.x[nq:])\n', (817, 882), False, 'import pinocchio\n'), ((1082, 1161), 'pinocchio.computeJointJacobians', 'pinocchio.computeJointJacobians', (['self.ROBOT_MODEL', 'self.robot_data', 'self.x[:nq]'], {}), '(self.ROBOT_MODEL, self.robot_data, self.x[:nq])\n', (1113, 1161), False, 'import pinocchio\n'), ((1170, 1236), 'pinocchio.updateFramePlacements', 'pinocchio.updateFramePlacements', (['self.ROBOT_MODEL', 'self.robot_data'], {}), '(self.ROBOT_MODEL, self.robot_data)\n', (1201, 1236), False, 'import pinocchio\n'), ((1245, 1335), 'pinocchio.jacobianCenterOfMass', 'pinocchio.jacobianCenterOfMass', (['self.ROBOT_MODEL', 'self.robot_data', 'self.x[:nq]', '(False)'], {}), '(self.ROBOT_MODEL, self.robot_data, self.x[:\n nq], False)\n', (1275, 1335), False, 'import pinocchio\n'), ((3491, 3532), 'pinocchio.utils.rand', 'pinocchio.utils.rand', (['self.ROBOT_MODEL.nv'], {}), '(self.ROBOT_MODEL.nv)\n', (3511, 3532), False, 'import pinocchio\n'), ((3558, 3598), 'crocoddyl.CostModelSum', 'crocoddyl.CostModelSum', (['self.ROBOT_STATE'], {}), '(self.ROBOT_STATE)\n', (3580, 3598), False, 'import crocoddyl\n'), ((3846, 3938), 'pinocchio.forwardKinematics', 'pinocchio.forwardKinematics', (['self.ROBOT_MODEL', 'self.robot_data', 'self.x[:nq]', 'self.x[nq:]'], {}), '(self.ROBOT_MODEL, self.robot_data, self.x[:nq],\n self.x[nq:])\n', (3873, 3938), False, 'import pinocchio\n'), ((4138, 4217), 'pinocchio.computeJointJacobians', 'pinocchio.computeJointJacobians', (['self.ROBOT_MODEL', 'self.robot_data', 'self.x[:nq]'], {}), '(self.ROBOT_MODEL, self.robot_data, self.x[:nq])\n', (4169, 4217), False, 'import pinocchio\n'), ((4226, 4292), 'pinocchio.updateFramePlacements', 'pinocchio.updateFramePlacements', (['self.ROBOT_MODEL', 'self.robot_data'], {}), '(self.ROBOT_MODEL, self.robot_data)\n', (4257, 4292), False, 'import pinocchio\n'), ((4301, 4391), 'pinocchio.jacobianCenterOfMass', 'pinocchio.jacobianCenterOfMass', (['self.ROBOT_MODEL', 'self.robot_data', 'self.x[:nq]', '(False)'], {}), '(self.ROBOT_MODEL, self.robot_data, self.x[:\n nq], False)\n', (4331, 4391), False, 'import pinocchio\n'), ((8277, 8299), 'pinocchio.SE3.Random', 'pinocchio.SE3.Random', ([], {}), '()\n', (8297, 8299), False, 'import pinocchio\n'), ((8680, 8702), 'pinocchio.SE3.Random', 'pinocchio.SE3.Random', ([], {}), '()\n', (8700, 8702), False, 'import pinocchio\n'), ((9024, 9047), 'pinocchio.utils.rand', 'pinocchio.utils.rand', (['(3)'], {}), '(3)\n', (9044, 9047), False, 'import pinocchio\n'), ((9436, 9459), 'pinocchio.utils.rand', 'pinocchio.utils.rand', (['(3)'], {}), '(3)\n', (9456, 9459), False, 'import pinocchio\n'), ((9775, 9800), 'pinocchio.Motion.Random', 'pinocchio.Motion.Random', ([], {}), '()\n', (9798, 9800), False, 'import pinocchio\n'), ((10175, 10200), 'pinocchio.Motion.Random', 'pinocchio.Motion.Random', ([], {}), '()\n', (10198, 10200), False, 'import pinocchio\n'), ((1048, 1072), 'pinocchio.utils.zero', 'pinocchio.utils.zero', (['nv'], {}), '(nv)\n', (1068, 1072), False, 'import pinocchio\n'), ((2199, 2252), 'numpy.allclose', 'np.allclose', (['self.data.r', 'self.data_der.r'], {'atol': '(1e-09)'}), '(self.data.r, self.data_der.r, atol=1e-09)\n', (2210, 2252), True, 'import numpy as np\n'), ((2633, 2686), 'numpy.allclose', 'np.allclose', (['self.data.r', 'self.data_der.r'], {'atol': '(1e-09)'}), '(self.data.r, self.data_der.r, atol=1e-09)\n', (2644, 2686), True, 'import numpy as np\n'), ((2794, 2849), 'numpy.allclose', 'np.allclose', (['self.data.Lx', 'self.data_der.Lx'], {'atol': '(1e-09)'}), '(self.data.Lx, self.data_der.Lx, atol=1e-09)\n', (2805, 2849), True, 'import numpy as np\n'), ((2887, 2942), 'numpy.allclose', 'np.allclose', (['self.data.Lu', 'self.data_der.Lu'], {'atol': '(1e-09)'}), '(self.data.Lu, self.data_der.Lu, atol=1e-09)\n', (2898, 2942), True, 'import numpy as np\n'), ((2980, 3037), 'numpy.allclose', 'np.allclose', (['self.data.Lxx', 'self.data_der.Lxx'], {'atol': '(1e-09)'}), '(self.data.Lxx, self.data_der.Lxx, atol=1e-09)\n', (2991, 3037), True, 'import numpy as np\n'), ((3076, 3133), 'numpy.allclose', 'np.allclose', (['self.data.Lxu', 'self.data_der.Lxu'], {'atol': '(1e-09)'}), '(self.data.Lxu, self.data_der.Lxu, atol=1e-09)\n', (3087, 3133), True, 'import numpy as np\n'), ((3172, 3229), 'numpy.allclose', 'np.allclose', (['self.data.Luu', 'self.data_der.Luu'], {'atol': '(1e-09)'}), '(self.data.Luu, self.data_der.Luu, atol=1e-09)\n', (3183, 3229), True, 'import numpy as np\n'), ((4104, 4128), 'pinocchio.utils.zero', 'pinocchio.utils.zero', (['nv'], {}), '(nv)\n', (4124, 4128), False, 'import pinocchio\n'), ((5244, 5297), 'numpy.allclose', 'np.allclose', (['self.data.r', 'self.data_sum.r'], {'atol': '(1e-09)'}), '(self.data.r, self.data_sum.r, atol=1e-09)\n', (5255, 5297), True, 'import numpy as np\n'), ((5678, 5731), 'numpy.allclose', 'np.allclose', (['self.data.r', 'self.data_sum.r'], {'atol': '(1e-09)'}), '(self.data.r, self.data_sum.r, atol=1e-09)\n', (5689, 5731), True, 'import numpy as np\n'), ((5839, 5894), 'numpy.allclose', 'np.allclose', (['self.data.Lx', 'self.data_sum.Lx'], {'atol': '(1e-09)'}), '(self.data.Lx, self.data_sum.Lx, atol=1e-09)\n', (5850, 5894), True, 'import numpy as np\n'), ((5932, 5987), 'numpy.allclose', 'np.allclose', (['self.data.Lu', 'self.data_sum.Lu'], {'atol': '(1e-09)'}), '(self.data.Lu, self.data_sum.Lu, atol=1e-09)\n', (5943, 5987), True, 'import numpy as np\n'), ((6025, 6082), 'numpy.allclose', 'np.allclose', (['self.data.Lxx', 'self.data_sum.Lxx'], {'atol': '(1e-09)'}), '(self.data.Lxx, self.data_sum.Lxx, atol=1e-09)\n', (6036, 6082), True, 'import numpy as np\n'), ((6121, 6178), 'numpy.allclose', 'np.allclose', (['self.data.Lxu', 'self.data_sum.Lxu'], {'atol': '(1e-09)'}), '(self.data.Lxu, self.data_sum.Lxu, atol=1e-09)\n', (6132, 6178), True, 'import numpy as np\n'), ((6217, 6274), 'numpy.allclose', 'np.allclose', (['self.data.Luu', 'self.data_sum.Luu'], {'atol': '(1e-09)'}), '(self.data.Luu, self.data_sum.Luu, atol=1e-09)\n', (6228, 6274), True, 'import numpy as np\n')]
|
#
# Copyright (c) 2017, UT-BATTELLE, LLC
# All rights reserved.
#
# This software is released under the BSD license detailed
# in the LICENSE file in the top level a-prime directory
#
import numpy
from get_season_months_index import get_season_months_index
def get_days_in_season_months(begin_month, end_month):
days_in_month = numpy.array([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])
index_months, n_months_season = get_season_months_index(begin_month, end_month)
days_season_months = days_in_month[index_months]
return days_season_months
|
[
"get_season_months_index.get_season_months_index",
"numpy.array"
] |
[((334, 395), 'numpy.array', 'numpy.array', (['[31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]'], {}), '([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])\n', (345, 395), False, 'import numpy\n'), ((433, 480), 'get_season_months_index.get_season_months_index', 'get_season_months_index', (['begin_month', 'end_month'], {}), '(begin_month, end_month)\n', (456, 480), False, 'from get_season_months_index import get_season_months_index\n')]
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import division
import time
from datetime import datetime
import warnings
import numpy as np
import pandas as pd
from numpy import dot, exp
from numpy.linalg import norm, inv
from scipy.linalg import solve as spsolve
from scipy.integrate import trapz
import scipy.stats as stats
from lifelines.fitters import BaseFitter
from lifelines.statistics import chisq_test
from lifelines.utils import (survival_table_from_events, inv_normal_cdf, normalize,
significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times,
pass_for_numeric_dtypes_or_raise, check_low_var, coalesce,
check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning,
StepSizer, ConvergenceError, string_justify)
class CoxPHFitter(BaseFitter):
"""
This class implements fitting Cox's proportional hazard model:
h(t|x) = h_0(t)*exp(x'*beta)
Parameters:
alpha: the level in the confidence intervals.
tie_method: specify how the fitter should deal with ties. Currently only
'Efron' is available.
penalizer: Attach a L2 penalizer to the size of the coeffcients during regression. This improves
stability of the estimates and controls for high correlation between covariates.
For example, this shrinks the absolute value of beta_i. Recommended, even if a small value.
The penalty is 1/2 * penalizer * ||beta||^2.
strata: specify a list of columns to use in stratification. This is useful if a
catagorical covariate does not obey the proportional hazard assumption. This
is used similar to the `strata` expression in R.
See http://courses.washington.edu/b515/l17.pdf.
"""
def __init__(self, alpha=0.95, tie_method='Efron', penalizer=0.0, strata=None):
if not (0 < alpha <= 1.):
raise ValueError('alpha parameter must be between 0 and 1.')
if penalizer < 0:
raise ValueError("penalizer parameter must be >= 0.")
if tie_method != 'Efron':
raise NotImplementedError("Only Efron is available atm.")
self.alpha = alpha
self.tie_method = tie_method
self.penalizer = penalizer
self.strata = strata
def fit(self, df, duration_col, event_col=None,
show_progress=False, initial_beta=None,
strata=None, step_size=None, weights_col=None,
cluster_col=None, robust=False):
"""
Fit the Cox Propertional Hazard model to a dataset. Tied survival times
are handled using Efron's tie-method.
Parameters:
df: a Pandas dataframe with necessary columns `duration_col` and
`event_col`, plus other covariates. `duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: the column in dataframe that contains the subjects'
lifetimes.
event_col: the column in dataframe that contains the subjects' death
observation. If left as None, assume all individuals are non-censored.
weights_col: an optional column in the dataframe that denotes the weight per subject.
This column is expelled and not used as a covariate, but as a weight in the
final regression. Default weight is 1.
This can be used for case-weights. For example, a weight of 2 means there were two subjects with
identical observations.
This can be used for sampling weights. In that case, use `robust=True` to get more accurate standard errors.
show_progress: since the fitter is iterative, show convergence
diagnostics.
initial_beta: initialize the starting point of the iterative
algorithm. Default is the zero vector.
strata: specify a list of columns to use in stratification. This is useful if a
catagorical covariate does not obey the proportional hazard assumption. This
is used similar to the `strata` expression in R.
See http://courses.washington.edu/b515/l17.pdf.
step_size: set an initial step size for the fitting algorithm.
robust: Compute the robust errors using the Huber sandwich estimator, aka Wei-Lin estimate. This does not handle
ties, so if there are high number of ties, results may significantly differ. See
"The Robust Inference for the Cox Proportional Hazards Model", Journal of the American Statistical Association, Vol. 84, No. 408 (Dec., 1989), pp. 1074- 1078
cluster_col: specifies what column has unique identifers for clustering covariances. Using this forces the sandwich estimator (robust variance estimator) to
be used.
Returns:
self, with additional properties: hazards_, confidence_intervals_, baseline_survival_, etc.
"""
df = df.copy()
# Sort on time
df = df.sort_values(by=duration_col)
self._time_fit_was_called = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") + ' UTC'
self.duration_col = duration_col
self.event_col = event_col
self.robust = robust
self.cluster_col = cluster_col
self.weights_col = weights_col
self._n_examples = df.shape[0]
self.strata = coalesce(strata, self.strata)
if self.strata is not None:
original_index = df.index.copy()
df = df.set_index(self.strata)
# Extract time and event
T = df[duration_col]
del df[duration_col]
if event_col is None:
E = pd.Series(np.ones(df.shape[0]), index=df.index)
else:
E = df[event_col]
del df[event_col]
if weights_col:
weights = df.pop(weights_col)
if (weights.astype(int) != weights).any() and not self.robust:
warnings.warn("""It appears your weights are not integers, possibly propensity or sampling scores then?
It's important to know that the naive variance estimates of the coefficients are biased. Instead a) set `robust=True` in the call to `fit`, or b) use Monte Carlo to
estimate the variances. See paper "Variance estimation when using inverse probability of treatment weighting (IPTW) with survival analysis"
""", RuntimeWarning)
if (weights <= 0).any():
raise ValueError("values in weights_col must be positive.")
else:
weights = pd.Series(np.ones((self._n_examples,)), index=df.index)
if self.cluster_col:
self._clusters = df.pop(self.cluster_col)
self._check_values(df, T, E)
df = df.astype(float)
# save fitting data for later
self.durations = T.copy()
self.event_observed = E.copy()
if self.strata is not None:
self.durations.index = original_index
self.event_observed.index = original_index
self.event_observed = self.event_observed.astype(bool)
self._norm_mean = df.mean(0)
self._norm_std = df.std(0)
E = E.astype(bool)
hazards_ = self._newton_rhaphson(normalize(df, self._norm_mean, self._norm_std), T, E,
weights=weights,
initial_beta=initial_beta,
show_progress=show_progress,
step_size=step_size)
self.hazards_ = pd.DataFrame(hazards_.T, columns=df.columns, index=['coef']) / self._norm_std
self.variance_matrix_ = -inv(self._hessian_) / np.outer(self._norm_std, self._norm_std)
self.standard_errors_ = self._compute_standard_errors(normalize(df, self._norm_mean, self._norm_std), T, E, weights)
self.confidence_intervals_ = self._compute_confidence_intervals()
self.baseline_hazard_ = self._compute_baseline_hazards(df, T, E, weights)
self.baseline_cumulative_hazard_ = self._compute_baseline_cumulative_hazard()
self.baseline_survival_ = self._compute_baseline_survival()
self._predicted_partial_hazards_ = self.predict_partial_hazard(df).values
self._train_log_partial_hazard = self.predict_log_partial_hazard(self._norm_mean.to_frame().T)
return self
def _newton_rhaphson(self, X, T, E, weights=None, initial_beta=None, step_size=None,
precision=10e-6, show_progress=True, max_steps=50):
"""
Newton Rhaphson algorithm for fitting CPH model.
Note that data is assumed to be sorted on T!
Parameters:
X: (n,d) Pandas DataFrame of observations.
T: (n) Pandas Series representing observed durations.
E: (n) Pandas Series representing death events.
weights: (n) an iterable representing weights per observation.
initial_beta: (1,d) numpy array of initial starting point for
NR algorithm. Default 0.
step_size: float > 0.001 to determine a starting step size in NR algorithm.
precision: the convergence halts if the norm of delta between
successive positions is less than epsilon.
show_progress: since the fitter is iterative, show convergence
diagnostics.
max_steps: the maximum number of interations of the Newton-Rhaphson algorithm.
Returns:
beta: (1,d) numpy array.
"""
self.path = []
assert precision <= 1., "precision must be less than or equal to 1."
n, d = X.shape
# make sure betas are correct size.
if initial_beta is not None:
assert initial_beta.shape == (d, 1)
beta = initial_beta
else:
beta = np.zeros((d, 1))
step_sizer = StepSizer(step_size)
step_size = step_sizer.next()
# Method of choice is just efron right now
if self.tie_method == 'Efron':
get_gradients = self._get_efron_values
else:
raise NotImplementedError("Only Efron is available.")
i = 0
converging = True
ll, previous_ll = 0, 0
start = time.time()
while converging:
self.path.append(beta.copy())
i += 1
if self.strata is None:
h, g, ll = get_gradients(X.values, beta, T.values, E.values, weights.values)
else:
g = np.zeros_like(beta).T
h = np.zeros((beta.shape[0], beta.shape[0]))
ll = 0
for strata in np.unique(X.index):
stratified_X, stratified_T, stratified_E, stratified_W = X.loc[[strata]], T.loc[[strata]], E.loc[[strata]], weights.loc[[strata]]
_h, _g, _ll = get_gradients(stratified_X.values, beta, stratified_T.values, stratified_E.values, stratified_W.values)
g += _g
h += _h
ll += _ll
if self.penalizer > 0:
# add the gradient and hessian of the l2 term
g -= self.penalizer * beta.T
h.flat[::d + 1] -= self.penalizer
# reusing a piece to make g * inv(h) * g.T faster later
try:
inv_h_dot_g_T = spsolve(-h, g.T, sym_pos=True)
except ValueError as e:
if 'infs or NaNs' in str(e):
raise ConvergenceError("""hessian or gradient contains nan or inf value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""")
else:
# something else?
raise e
delta = step_size * inv_h_dot_g_T
if np.any(np.isnan(delta)):
raise ConvergenceError("""delta contains nan value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""")
# Save these as pending result
hessian, gradient = h, g
norm_delta = norm(delta)
# reusing an above piece to make g * inv(h) * g.T faster.
newton_decrement = g.dot(inv_h_dot_g_T)/2
if show_progress:
print("Iteration %d: norm_delta = %.5f, step_size = %.5f, ll = %.5f, newton_decrement = %.5f, seconds_since_start = %.1f" % (i, norm_delta, step_size, ll, newton_decrement, time.time() - start))
# convergence criteria
if norm_delta < precision:
converging, completed = False, True
elif previous_ll != 0 and abs(ll - previous_ll) / (-previous_ll) < 1e-09:
# this is what R uses by default
converging, completed = False, True
elif newton_decrement < precision:
converging, completed = False, True
elif i >= max_steps:
# 50 iterations steps with N-R is a lot.
# Expected convergence is ~10 steps
converging, completed = False, False
elif step_size <= 0.00001:
converging, completed = False, False
elif abs(ll) < 0.0001 and norm_delta > 1.0:
warnings.warn("The log-likelihood is getting suspciously close to 0 and the delta is still large. There may be complete separation in the dataset. This may result in incorrect inference of coefficients. \
See https://stats.idre.ucla.edu/other/mult-pkg/faq/general/faqwhat-is-complete-or-quasi-complete-separation-in-logisticprobit-regression-and-how-do-we-deal-with-them/ ", ConvergenceWarning)
converging, completed = False, False
step_size = step_sizer.update(norm_delta).next()
beta += delta
previous_ll = ll
self._hessian_ = hessian
self._score_ = gradient
self._log_likelihood = ll
if show_progress and completed:
print("Convergence completed after %d iterations." % (i))
if not completed:
warnings.warn("Newton-Rhapson failed to converge sufficiently in %d steps." % max_steps, ConvergenceWarning)
return beta
def _get_efron_values(self, X, beta, T, E, weights):
"""
Calculates the first and second order vector differentials, with respect to beta.
Note that X, T, E are assumed to be sorted on T!
A good explaination for Efron. Consider three of five subjects who fail at the time.
As it is not known a priori that who is the first to fail, so one-third of
(φ1 + φ2 + φ3) is adjusted from sum_j^{5} φj after one fails. Similarly two-third
of (φ1 + φ2 + φ3) is adjusted after first two individuals fail, etc.
From https://cran.r-project.org/web/packages/survival/survival.pdf:
"Setting all weights to 2 for instance will give the same coefficient estimate but halve the variance. When
the Efron approximation for ties (default) is employed replication of the data will not give exactly the same coefficients as the
weights option, and in this case the weighted fit is arguably the correct one."
Parameters:
X: (n,d) numpy array of observations.
beta: (1, d) numpy array of coefficients.
T: (n) numpy array representing observed durations.
E: (n) numpy array representing death events.
weights: (n) an array representing weights per observation.
Returns:
hessian: (d, d) numpy array,
gradient: (1, d) numpy array
log_likelihood: double
"""
n, d = X.shape
hessian = np.zeros((d, d))
gradient = np.zeros((1, d))
log_lik = 0
# Init risk and tie sums to zero
x_tie_sum = np.zeros((1, d))
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = np.zeros((1, d)), np.zeros((1, d))
risk_phi_x_x, tie_phi_x_x = np.zeros((d, d)), np.zeros((d, d))
# Init number of ties and weights
weight_count = 0.0
tie_count = 0
scores = weights[:,None] * exp(dot(X, beta))
# Iterate backwards to utilize recursive relationship
for i in range(n - 1, -1, -1):
# Doing it like this to preserve shape
ti = T[i]
ei = E[i]
xi = X[i:i + 1]
score = scores[i:i+1]
w = weights[i]
# Calculate phi values
phi_i = score
phi_x_i = phi_i * xi
phi_x_x_i = dot(xi.T, phi_x_i)
# Calculate sums of Risk set
risk_phi += phi_i
risk_phi_x += phi_x_i
risk_phi_x_x += phi_x_x_i
# Calculate sums of Ties, if this is an event
if ei:
x_tie_sum += w * xi
tie_phi += phi_i
tie_phi_x += phi_x_i
tie_phi_x_x += phi_x_x_i
# Keep track of count
tie_count += 1
weight_count += w
if i > 0 and T[i - 1] == ti:
# There are more ties/members of the risk set
continue
elif tie_count == 0:
# Only censored with current time, move on
continue
# There was atleast one event and no more ties remain. Time to sum.
partial_gradient = np.zeros((1, d))
weighted_average = weight_count / tie_count
for l in range(tie_count):
"""
A good explaination for Efron. Consider three of five subjects who fail at the time.
As it is not known a priori that who is the first to fail, so one-third of
(φ1 + φ2 + φ3) is adjusted from sum_j^{5} φj after one fails. Similarly two-third
of (φ1 + φ2 + φ3) is adjusted after first two individuals fail, etc.
"""
numer = (risk_phi_x - l * tie_phi_x / tie_count)
denom = (risk_phi - l * tie_phi / tie_count)
# Gradient
partial_gradient += weighted_average * numer / denom
# Hessian
a1 = (risk_phi_x_x - l * tie_phi_x_x / tie_count) / denom
# In case numer and denom both are really small numbers,
# make sure to do division before multiplications
a2 = dot(numer.T / denom, numer / denom)
hessian -= weighted_average * (a1 - a2)
log_lik -= weighted_average * np.log(denom[0][0])
# Values outside tie sum
gradient += x_tie_sum - partial_gradient
log_lik += dot(x_tie_sum, beta)[0][0]
# reset tie values
tie_count = 0
weight_count = 0.0
x_tie_sum = np.zeros((1, d))
tie_phi = 0
tie_phi_x = np.zeros((1, d))
tie_phi_x_x = np.zeros((d, d))
return hessian, gradient, log_lik
def _compute_baseline_cumulative_hazard(self):
return self.baseline_hazard_.cumsum()
@staticmethod
def _check_values(df, T, E):
pass_for_numeric_dtypes_or_raise(df)
check_nans_or_infs(T)
check_nans_or_infs(E)
check_nans_or_infs(df)
check_low_var(df)
check_complete_separation(df, E, T)
def _compute_confidence_intervals(self):
alpha2 = inv_normal_cdf((1. + self.alpha) / 2.)
se = self.standard_errors_
hazards = self.hazards_.values
return pd.DataFrame(np.r_[hazards - alpha2 * se,
hazards + alpha2 * se],
index=['lower-bound', 'upper-bound'],
columns=self.hazards_.columns)
def _compute_sandwich_estimator(self, X, T, E, weights):
_, d = X.shape
if self.strata is not None and self.cluster_col is not None:
# TODO
raise NotImplementedError("Providing clusters and strata is not implemented yet")
if self.strata is not None:
score_residuals = np.empty((0, d))
for strata in np.unique(X.index):
# TODO: use pandas .groupby
stratified_X, stratified_T, stratified_E, stratified_W = X.loc[[strata]], T.loc[[strata]], E.loc[[strata]], weights.loc[[strata]]
score_residuals = np.append(score_residuals,
self._compute_residuals_within_strata(stratified_X.values, stratified_T.values, stratified_E.values, stratified_W.values) * stratified_W[:, None],
axis=0)
else:
score_residuals = self._compute_residuals_within_strata(X.values, T.values, E.values, weights.values) * weights[:, None]
if self.cluster_col:
score_residuals_ = np.empty((0, d))
for cluster in np.unique(self._clusters):
ix = self._clusters == cluster
weights_ = weights.values[ix]
score_residuals_ = np.append(score_residuals_,
(score_residuals[ix, :] * weights_[:, None]).sum(0).reshape(1, d),
axis=0)
score_residuals = score_residuals_
naive_var = inv(self._hessian_)
delta_betas = score_residuals.dot(naive_var)
sandwich_estimator = delta_betas.T.dot(delta_betas) / np.outer(self._norm_std, self._norm_std)
return sandwich_estimator
def _compute_residuals_within_strata(self, X, T, E, weights):
# https://www.stat.tamu.edu/~carroll/ftp/gk001.pdf
# lin1989
# https://www.ics.uci.edu/~dgillen/STAT255/Handouts/lecture10.pdf
# TODO: doesn't handle ties.
n, d = X.shape
# we already unnormalized the betas in `fit`, so we need normalize them again since X is
# normalized.
beta = self.hazards_.values[0] * self._norm_std
E = E.astype(int)
score_residuals = np.zeros((n, d))
phi_s = exp(dot(X, beta))
# compute these within strata
# need to store these histories, as we access them often
# this is a reverse cumulative sum. See original code in https://github.com/CamDavidsonPilon/lifelines/pull/496/files#diff-81ee0759dbae0770e1a02cf17f4cfbb1R431
risk_phi_x_history = (X * (weights * phi_s)[:, None])[::-1].cumsum(0)[::-1]
risk_phi_history = (weights * phi_s) [::-1].cumsum() [::-1][:, None]
# Iterate forwards
for i in range(0, n):
xi = X[i:i + 1]
phi_i = phi_s[i]
score = - phi_i * (
(E[:i+1] * weights[:i+1] / risk_phi_history[:i+1].T).T # this is constant-ish, and could be cached
* (xi - risk_phi_x_history[:i+1] / risk_phi_history[:i+1])
).sum(0)
if E[i]:
score = score + (xi - risk_phi_x_history[i] / risk_phi_history[i])
score_residuals[i, :] = score
return score_residuals
def _compute_standard_errors(self, df, T, E, weights):
if self.robust or self.cluster_col:
se = np.sqrt(self._compute_sandwich_estimator(df, T, E, weights).diagonal()) # / self._norm_std
else:
se = np.sqrt(self.variance_matrix_.diagonal())
return pd.DataFrame(se[None, :],
index=['se'], columns=self.hazards_.columns)
def _compute_z_values(self):
return (self.hazards_.loc['coef'] /
self.standard_errors_.loc['se'])
def _compute_p_values(self):
U = self._compute_z_values() ** 2
return stats.chi2.sf(U, 1)
@property
def summary(self):
"""Summary statistics describing the fit.
Set alpha property in the object before calling.
Returns
-------
df : pd.DataFrame
Contains columns coef, exp(coef), se(coef), z, p, lower, upper"""
df = pd.DataFrame(index=self.hazards_.columns)
df['coef'] = self.hazards_.loc['coef'].values
df['exp(coef)'] = exp(self.hazards_.loc['coef'].values)
df['se(coef)'] = self.standard_errors_.loc['se'].values
df['z'] = self._compute_z_values()
df['p'] = self._compute_p_values()
df['lower %.2f' % self.alpha] = self.confidence_intervals_.loc['lower-bound'].values
df['upper %.2f' % self.alpha] = self.confidence_intervals_.loc['upper-bound'].values
return df
def print_summary(self):
"""
Print summary statistics describing the fit.
"""
# Print information about data first
justify = string_justify(18)
print(self)
print("{} = {}".format(justify('duration col'), self.duration_col))
print("{} = {}".format(justify('event col'), self.event_col))
if self.weights_col:
print("{} = {}".format(justify('weights col'), self.weights_col))
if self.cluster_col:
print("{} = {}".format(justify('cluster col'), self.cluster_col))
if self.robust or self.cluster_col:
print("{} = {}".format(justify('robust variance'), True))
if self.strata:
print('{} = {}'.format(justify('strata'), self.strata))
print('{} = {}'.format(justify('number of subjects'), self._n_examples))
print('{} = {}'.format(justify('number of events'), self.event_observed.sum()))
print('{} = {:.3f}'.format(justify('log-likelihood'), self._log_likelihood))
print('{} = {}'.format(justify("time fit was run"), self._time_fit_was_called), end='\n\n')
print('---')
df = self.summary
# Significance codes last
df[''] = [significance_code(p) for p in df['p']]
print(df.to_string(float_format=lambda f: '{:4.4f}'.format(f)))
# Significance code explanation
print('---')
print(significance_codes_as_text(), end='\n\n')
print("Concordance = {:.3f}".format(self.score_))
print("Likelihood ratio test = {:.3f} on {} df, p={:.5f}".format(*self._compute_likelihood_ratio_test()))
return
def _compute_likelihood_ratio_test(self):
"""
This function computes the likelihood ratio test for the Cox model. We
compare the existing model (with all the covariates) to the trivial model
of no covariates.
Conviently, we can actually use the class itself to do most of the work.
"""
trivial_dataset = pd.DataFrame({'E': self.event_observed, 'T': self.durations})
cp_null = CoxPHFitter()
cp_null.fit(trivial_dataset, 'T', 'E', show_progress=False)
ll_null = cp_null._log_likelihood
ll_alt = self._log_likelihood
test_stat = 2*ll_alt - 2*ll_null
degrees_freedom = self.hazards_.shape[1]
_, p_value = chisq_test(test_stat, degrees_freedom=degrees_freedom, alpha=0.0)
return test_stat, degrees_freedom, p_value
def predict_partial_hazard(self, X):
"""
X: a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
If X is a dataframe, the order of the columns do not matter. But
if X is an array, then the column ordering is assumed to be the
same as the training dataset.
Returns the partial hazard for the individuals, partial since the
baseline hazard is not included. Equal to \exp{\beta (X - mean{X_train})}
"""
return exp(self.predict_log_partial_hazard(X))
def predict_log_partial_hazard(self, X):
"""
X: a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
This is equivalent to R's linear.predictors.
Returns the log of the partial hazard for the individuals, partial since the
baseline hazard is not included. Equal to \beta (X - mean{X_train})
If X is a dataframe, the order of the columns do not matter. But
if X is an array, then the column ordering is assumed to be the
same as the training dataset.
"""
hazard_names = self.hazards_.columns
if isinstance(X, pd.DataFrame):
order = hazard_names
X = X[order]
pass_for_numeric_dtypes_or_raise(X)
elif isinstance(X, pd.Series) and ((X.shape[0] == len(hazard_names) + 2) or (X.shape[0] == len(hazard_names))):
X = X.to_frame().T
order = hazard_names
X = X[order]
pass_for_numeric_dtypes_or_raise(X)
elif isinstance(X, pd.Series):
assert len(hazard_names) == 1, 'Series not the correct arugment'
X = pd.DataFrame(X)
pass_for_numeric_dtypes_or_raise(X)
X = X.astype(float)
index = _get_index(X)
X = normalize(X, self._norm_mean.values, 1)
return pd.DataFrame(np.dot(X, self.hazards_.T), index=index)
def predict_log_hazard_relative_to_mean(self, X):
"""
X: a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns the log hazard relative to the hazard of the mean covariates. This is the behaviour
of R's predict.coxph. Equal to \beta X - \beta mean{X_train}}
"""
return self.predict_log_partial_hazard(X) - self._train_log_partial_hazard.squeeze()
def predict_cumulative_hazard(self, X, times=None):
"""
X: a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
times: an iterable of increasing times to predict the cumulative hazard at. Default
is the set of all durations (observed and unobserved). Uses a linear interpolation if
points in time are not in the index.
Returns the cumulative hazard of individuals.
"""
if self.strata:
cumulative_hazard_ = pd.DataFrame()
for stratum, stratified_X in X.groupby(self.strata):
try:
c_0 = self.baseline_cumulative_hazard_[[stratum]]
except KeyError:
raise StatError("""The stratum %s was not found in the original training data. For example, try
the following on the original dataset, df: `df.groupby(%s).size()`. Expected is that %s is not present in the output.
""" % (stratum, self.strata, stratum))
col = _get_index(stratified_X)
v = self.predict_partial_hazard(stratified_X)
cumulative_hazard_ = cumulative_hazard_.merge(pd.DataFrame(np.dot(c_0, v.T), index=c_0.index, columns=col), how='outer', right_index=True, left_index=True)
else:
c_0 = self.baseline_cumulative_hazard_
v = self.predict_partial_hazard(X)
col = _get_index(v)
cumulative_hazard_ = pd.DataFrame(np.dot(c_0, v.T), columns=col, index=c_0.index)
if times is not None:
# non-linear interpolations can push the survival curves above 1 and below 0.
return cumulative_hazard_.reindex(cumulative_hazard_.index.union(times)).interpolate("index").loc[times]
else:
return cumulative_hazard_
def predict_survival_function(self, X, times=None):
"""
X: a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
times: an iterable of increasing times to predict the survival function at. Default
is the set of all durations (observed and unobserved)
Returns the estimated survival functions for the individuals
"""
return exp(-self.predict_cumulative_hazard(X, times=times))
def predict_percentile(self, X, p=0.5):
"""
X: a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns the median lifetimes for the individuals, by default. If the survival curve of an
individual does not cross 0.5, then the result is infinity.
http://stats.stackexchange.com/questions/102986/percentile-loss-functions
"""
subjects = _get_index(X)
return qth_survival_times(p, self.predict_survival_function(X)[subjects]).T
def predict_median(self, X):
"""
X: a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns the median lifetimes for the individuals. If the survival curve of an
individual does not cross 0.5, then the result is infinity.
"""
return self.predict_percentile(X, 0.5)
def predict_expectation(self, X):
"""
X: a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Compute the expected lifetime, E[T], using covarites X. This algorithm to compute the expection is
to use the fact that E[T] = int_0^inf P(T > t) dt = int_0^inf S(t) dt
To compute the integal, we use the trapizoidal rule to approximate the integral. However, if the
survival function, S(t), doesn't converge to 0, the the expectation is really infinity.
"""
subjects = _get_index(X)
v = self.predict_survival_function(X)[subjects]
return pd.DataFrame(trapz(v.values.T, v.index), index=subjects)
def _compute_baseline_hazard(self, data, durations, event_observed, weights, name):
# https://stats.stackexchange.com/questions/46532/cox-baseline-hazard
ind_hazards = self.predict_partial_hazard(data) * weights[:, None]
ind_hazards['event_at'] = durations.values
ind_hazards_summed_over_durations = ind_hazards.groupby('event_at')[0].sum().sort_index(ascending=False).cumsum()
ind_hazards_summed_over_durations.name = 'hazards'
event_table = survival_table_from_events(durations, event_observed, weights=weights)
event_table = event_table.join(ind_hazards_summed_over_durations)
baseline_hazard = pd.DataFrame(event_table['observed'] / event_table['hazards'], columns=[name]).fillna(0)
return baseline_hazard
def _compute_baseline_hazards(self, df, T, E, weights):
if self.strata:
index = self.durations.unique()
baseline_hazards_ = pd.DataFrame(index=index)
for stratum in df.index.unique():
baseline_hazards_ = baseline_hazards_.merge(
self._compute_baseline_hazard(data=df.loc[[stratum]], durations=T.loc[[stratum]], event_observed=E.loc[[stratum]], weights=weights.loc[[stratum]], name=stratum),
left_index=True,
right_index=True,
how='left')
return baseline_hazards_.fillna(0)
else:
return self._compute_baseline_hazard(data=df, durations=T, event_observed=E, weights=weights, name='baseline hazard')
def _compute_baseline_survival(self):
"""
Importantly, this agrees with what the KaplanMeierFitter produces. Ex:
from lifelines.datasets import load_rossi
from lifelines import CoxPHFitter, KaplanMeierFitter
rossi = load_rossi()
kmf = KaplanMeierFitter()
kmf.fit(rossi['week'], rossi['arrest'])
rossi2 = rossi[['week', 'arrest']].copy()
rossi2['var1'] = np.random.randn(432)
cph = CoxPHFitter()
cph.fit(rossi2, 'week', 'arrest')
ax = cph.baseline_survival_.plot()
kmf.plot(ax=ax)
"""
survival_df = exp(-self.baseline_cumulative_hazard_)
if self.strata is None:
survival_df.columns = ['baseline survival']
return survival_df
def plot(self, standardized=False, columns=None, **kwargs):
"""
Produces a visual representation of the fitted coefficients, including their standard errors and magnitudes.
Parameters:
standardized: standardize each estimated coefficient and confidence interval
endpoints by the standard error of the estimate.
columns : list-like, default None
Returns:
ax: the matplotlib axis that be edited.
"""
from matplotlib import pyplot as plt
ax = kwargs.get('ax', None) or plt.figure().add_subplot(111)
if columns is not None:
yaxis_locations = range(len(columns))
summary = self.summary.loc[columns]
lower_bound = self.confidence_intervals_[columns].loc['lower-bound'].copy()
upper_bound = self.confidence_intervals_[columns].loc['upper-bound'].copy()
hazards = self.hazards_[columns].values[0].copy()
else:
yaxis_locations = range(len(self.hazards_.columns))
summary = self.summary
lower_bound = self.confidence_intervals_.loc['lower-bound'].copy()
upper_bound = self.confidence_intervals_.loc['upper-bound'].copy()
hazards = self.hazards_.values[0].copy()
if standardized:
se = summary['se(coef)']
lower_bound /= se
upper_bound /= se
hazards /= se
order = np.argsort(hazards)
ax.scatter(upper_bound.values[order], yaxis_locations, marker='|', c='k')
ax.scatter(lower_bound.values[order], yaxis_locations, marker='|', c='k')
ax.scatter(hazards[order], yaxis_locations, marker='o', c='k')
ax.hlines(yaxis_locations, lower_bound.values[order], upper_bound.values[order], color='k', lw=1)
tick_labels = [c + significance_code(p).strip() for (c, p) in summary['p'][order].iteritems()]
plt.yticks(yaxis_locations, tick_labels)
plt.xlabel("standardized coef" if standardized else "coef")
return ax
def plot_covariate_groups(self, covariate, groups, **kwargs):
"""
Produces a visual representation comparing the baseline survival curve of the model versus
what happens when a covariate is varied over values in a group. This is useful to compare
subjects' survival as we vary a single covariate, all else being held equal. The baseline survival
curve is equal to the predicted survival curve at all average values in the original dataset.
Parameters:
covariate: a string of the covariate in the original dataset that we wish to vary.
groups: an iterable of the values we wish the covariate to take on.
Returns:
ax: the matplotlib axis that be edited.
"""
from matplotlib import pyplot as plt
if covariate not in self.hazards_.columns:
raise KeyError('covariate `%s` is not present in the original dataset' % covariate)
ax = kwargs.get('ax', None) or plt.figure().add_subplot(111)
x_bar = self._norm_mean.to_frame().T
X = pd.concat([x_bar] * len(groups))
X.index = ['%s=%s' % (covariate, g) for g in groups]
X[covariate] = groups
self.predict_survival_function(X).plot(ax=ax)
self.baseline_survival_.plot(ax=ax, ls='--')
return ax
@property
def score_(self):
if hasattr(self, '_concordance_score_'):
return self._concordance_score_
else:
self._concordance_score_ = concordance_index(self.durations,
-self._predicted_partial_hazards_,
self.event_observed)
del self._predicted_partial_hazards_
return self._concordance_score_
|
[
"lifelines.utils.significance_codes_as_text",
"scipy.linalg.solve",
"lifelines.statistics.chisq_test",
"numpy.empty",
"numpy.ones",
"numpy.isnan",
"numpy.argsort",
"datetime.datetime.utcnow",
"matplotlib.pyplot.figure",
"numpy.linalg.norm",
"numpy.exp",
"lifelines.utils.normalize",
"numpy.unique",
"pandas.DataFrame",
"lifelines.utils.ConvergenceError",
"numpy.zeros_like",
"matplotlib.pyplot.yticks",
"lifelines.utils.string_justify",
"lifelines.utils.check_nans_or_infs",
"scipy.stats.chi2.sf",
"lifelines.utils.StatError",
"lifelines.utils.concordance_index",
"lifelines.utils.survival_table_from_events",
"numpy.linalg.inv",
"lifelines.utils.inv_normal_cdf",
"lifelines.utils.coalesce",
"lifelines.utils.check_low_var",
"numpy.dot",
"lifelines.utils.significance_code",
"lifelines.utils.StepSizer",
"lifelines.utils.pass_for_numeric_dtypes_or_raise",
"numpy.outer",
"numpy.log",
"numpy.zeros",
"lifelines.utils._get_index",
"time.time",
"lifelines.utils.check_complete_separation",
"scipy.integrate.trapz",
"warnings.warn",
"matplotlib.pyplot.xlabel"
] |
[((5481, 5510), 'lifelines.utils.coalesce', 'coalesce', (['strata', 'self.strata'], {}), '(strata, self.strata)\n', (5489, 5510), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((9996, 10016), 'lifelines.utils.StepSizer', 'StepSizer', (['step_size'], {}), '(step_size)\n', (10005, 10016), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((10365, 10376), 'time.time', 'time.time', ([], {}), '()\n', (10374, 10376), False, 'import time\n'), ((16033, 16049), 'numpy.zeros', 'np.zeros', (['(d, d)'], {}), '((d, d))\n', (16041, 16049), True, 'import numpy as np\n'), ((16069, 16085), 'numpy.zeros', 'np.zeros', (['(1, d)'], {}), '((1, d))\n', (16077, 16085), True, 'import numpy as np\n'), ((16168, 16184), 'numpy.zeros', 'np.zeros', (['(1, d)'], {}), '((1, d))\n', (16176, 16184), True, 'import numpy as np\n'), ((19508, 19544), 'lifelines.utils.pass_for_numeric_dtypes_or_raise', 'pass_for_numeric_dtypes_or_raise', (['df'], {}), '(df)\n', (19540, 19544), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((19553, 19574), 'lifelines.utils.check_nans_or_infs', 'check_nans_or_infs', (['T'], {}), '(T)\n', (19571, 19574), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((19583, 19604), 'lifelines.utils.check_nans_or_infs', 'check_nans_or_infs', (['E'], {}), '(E)\n', (19601, 19604), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((19613, 19635), 'lifelines.utils.check_nans_or_infs', 'check_nans_or_infs', (['df'], {}), '(df)\n', (19631, 19635), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((19644, 19661), 'lifelines.utils.check_low_var', 'check_low_var', (['df'], {}), '(df)\n', (19657, 19661), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((19670, 19705), 'lifelines.utils.check_complete_separation', 'check_complete_separation', (['df', 'E', 'T'], {}), '(df, E, T)\n', (19695, 19705), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((19769, 19809), 'lifelines.utils.inv_normal_cdf', 'inv_normal_cdf', (['((1.0 + self.alpha) / 2.0)'], {}), '((1.0 + self.alpha) / 2.0)\n', (19783, 19809), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((19897, 20036), 'pandas.DataFrame', 'pd.DataFrame', (['np.r_[hazards - alpha2 * se, hazards + alpha2 * se]'], {'index': "['lower-bound', 'upper-bound']", 'columns': 'self.hazards_.columns'}), "(np.r_[hazards - alpha2 * se, hazards + alpha2 * se], index=[\n 'lower-bound', 'upper-bound'], columns=self.hazards_.columns)\n", (19909, 20036), True, 'import pandas as pd\n'), ((21686, 21705), 'numpy.linalg.inv', 'inv', (['self._hessian_'], {}), '(self._hessian_)\n', (21689, 21705), False, 'from numpy.linalg import norm, inv\n'), ((22404, 22420), 'numpy.zeros', 'np.zeros', (['(n, d)'], {}), '((n, d))\n', (22412, 22420), True, 'import numpy as np\n'), ((23746, 23816), 'pandas.DataFrame', 'pd.DataFrame', (['se[None, :]'], {'index': "['se']", 'columns': 'self.hazards_.columns'}), "(se[None, :], index=['se'], columns=self.hazards_.columns)\n", (23758, 23816), True, 'import pandas as pd\n'), ((24063, 24082), 'scipy.stats.chi2.sf', 'stats.chi2.sf', (['U', '(1)'], {}), '(U, 1)\n', (24076, 24082), True, 'import scipy.stats as stats\n'), ((24379, 24420), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'self.hazards_.columns'}), '(index=self.hazards_.columns)\n', (24391, 24420), True, 'import pandas as pd\n'), ((24501, 24538), 'numpy.exp', 'exp', (["self.hazards_.loc['coef'].values"], {}), "(self.hazards_.loc['coef'].values)\n", (24504, 24538), False, 'from numpy import dot, exp\n'), ((25065, 25083), 'lifelines.utils.string_justify', 'string_justify', (['(18)'], {}), '(18)\n', (25079, 25083), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((26911, 26972), 'pandas.DataFrame', 'pd.DataFrame', (["{'E': self.event_observed, 'T': self.durations}"], {}), "({'E': self.event_observed, 'T': self.durations})\n", (26923, 26972), True, 'import pandas as pd\n'), ((27267, 27332), 'lifelines.statistics.chisq_test', 'chisq_test', (['test_stat'], {'degrees_freedom': 'degrees_freedom', 'alpha': '(0.0)'}), '(test_stat, degrees_freedom=degrees_freedom, alpha=0.0)\n', (27277, 27332), False, 'from lifelines.statistics import chisq_test\n'), ((29403, 29416), 'lifelines.utils._get_index', '_get_index', (['X'], {}), '(X)\n', (29413, 29416), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((29430, 29469), 'lifelines.utils.normalize', 'normalize', (['X', 'self._norm_mean.values', '(1)'], {}), '(X, self._norm_mean.values, 1)\n', (29439, 29469), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((33108, 33121), 'lifelines.utils._get_index', '_get_index', (['X'], {}), '(X)\n', (33118, 33121), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((34332, 34345), 'lifelines.utils._get_index', '_get_index', (['X'], {}), '(X)\n', (34342, 34345), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((34971, 35041), 'lifelines.utils.survival_table_from_events', 'survival_table_from_events', (['durations', 'event_observed'], {'weights': 'weights'}), '(durations, event_observed, weights=weights)\n', (34997, 35041), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((36666, 36704), 'numpy.exp', 'exp', (['(-self.baseline_cumulative_hazard_)'], {}), '(-self.baseline_cumulative_hazard_)\n', (36669, 36704), False, 'from numpy import dot, exp\n'), ((38301, 38320), 'numpy.argsort', 'np.argsort', (['hazards'], {}), '(hazards)\n', (38311, 38320), True, 'import numpy as np\n'), ((38774, 38814), 'matplotlib.pyplot.yticks', 'plt.yticks', (['yaxis_locations', 'tick_labels'], {}), '(yaxis_locations, tick_labels)\n', (38784, 38814), True, 'from matplotlib import pyplot as plt\n'), ((38823, 38882), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('standardized coef' if standardized else 'coef')"], {}), "('standardized coef' if standardized else 'coef')\n", (38833, 38882), True, 'from matplotlib import pyplot as plt\n'), ((7300, 7346), 'lifelines.utils.normalize', 'normalize', (['df', 'self._norm_mean', 'self._norm_std'], {}), '(df, self._norm_mean, self._norm_std)\n', (7309, 7346), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((7637, 7697), 'pandas.DataFrame', 'pd.DataFrame', (['hazards_.T'], {'columns': 'df.columns', 'index': "['coef']"}), "(hazards_.T, columns=df.columns, index=['coef'])\n", (7649, 7697), True, 'import pandas as pd\n'), ((7771, 7811), 'numpy.outer', 'np.outer', (['self._norm_std', 'self._norm_std'], {}), '(self._norm_std, self._norm_std)\n', (7779, 7811), True, 'import numpy as np\n'), ((7874, 7920), 'lifelines.utils.normalize', 'normalize', (['df', 'self._norm_mean', 'self._norm_std'], {}), '(df, self._norm_mean, self._norm_std)\n', (7883, 7920), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((9957, 9973), 'numpy.zeros', 'np.zeros', (['(d, 1)'], {}), '((d, 1))\n', (9965, 9973), True, 'import numpy as np\n'), ((12448, 12459), 'numpy.linalg.norm', 'norm', (['delta'], {}), '(delta)\n', (12452, 12459), False, 'from numpy.linalg import norm, inv\n'), ((14414, 14526), 'warnings.warn', 'warnings.warn', (["('Newton-Rhapson failed to converge sufficiently in %d steps.' % max_steps)", 'ConvergenceWarning'], {}), "('Newton-Rhapson failed to converge sufficiently in %d steps.' %\n max_steps, ConvergenceWarning)\n", (14427, 14526), False, 'import warnings\n'), ((16250, 16266), 'numpy.zeros', 'np.zeros', (['(1, d)'], {}), '((1, d))\n', (16258, 16266), True, 'import numpy as np\n'), ((16268, 16284), 'numpy.zeros', 'np.zeros', (['(1, d)'], {}), '((1, d))\n', (16276, 16284), True, 'import numpy as np\n'), ((16321, 16337), 'numpy.zeros', 'np.zeros', (['(d, d)'], {}), '((d, d))\n', (16329, 16337), True, 'import numpy as np\n'), ((16339, 16355), 'numpy.zeros', 'np.zeros', (['(d, d)'], {}), '((d, d))\n', (16347, 16355), True, 'import numpy as np\n'), ((16906, 16924), 'numpy.dot', 'dot', (['xi.T', 'phi_x_i'], {}), '(xi.T, phi_x_i)\n', (16909, 16924), False, 'from numpy import dot, exp\n'), ((17756, 17772), 'numpy.zeros', 'np.zeros', (['(1, d)'], {}), '((1, d))\n', (17764, 17772), True, 'import numpy as np\n'), ((19183, 19199), 'numpy.zeros', 'np.zeros', (['(1, d)'], {}), '((1, d))\n', (19191, 19199), True, 'import numpy as np\n'), ((19248, 19264), 'numpy.zeros', 'np.zeros', (['(1, d)'], {}), '((1, d))\n', (19256, 19264), True, 'import numpy as np\n'), ((19291, 19307), 'numpy.zeros', 'np.zeros', (['(d, d)'], {}), '((d, d))\n', (19299, 19307), True, 'import numpy as np\n'), ((20459, 20475), 'numpy.empty', 'np.empty', (['(0, d)'], {}), '((0, d))\n', (20467, 20475), True, 'import numpy as np\n'), ((20502, 20520), 'numpy.unique', 'np.unique', (['X.index'], {}), '(X.index)\n', (20511, 20520), True, 'import numpy as np\n'), ((21227, 21243), 'numpy.empty', 'np.empty', (['(0, d)'], {}), '((0, d))\n', (21235, 21243), True, 'import numpy as np\n'), ((21271, 21296), 'numpy.unique', 'np.unique', (['self._clusters'], {}), '(self._clusters)\n', (21280, 21296), True, 'import numpy as np\n'), ((21821, 21861), 'numpy.outer', 'np.outer', (['self._norm_std', 'self._norm_std'], {}), '(self._norm_std, self._norm_std)\n', (21829, 21861), True, 'import numpy as np\n'), ((22442, 22454), 'numpy.dot', 'dot', (['X', 'beta'], {}), '(X, beta)\n', (22445, 22454), False, 'from numpy import dot, exp\n'), ((26129, 26149), 'lifelines.utils.significance_code', 'significance_code', (['p'], {}), '(p)\n', (26146, 26149), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((26315, 26343), 'lifelines.utils.significance_codes_as_text', 'significance_codes_as_text', ([], {}), '()\n', (26341, 26343), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((28869, 28904), 'lifelines.utils.pass_for_numeric_dtypes_or_raise', 'pass_for_numeric_dtypes_or_raise', (['X'], {}), '(X)\n', (28901, 28904), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((29498, 29524), 'numpy.dot', 'np.dot', (['X', 'self.hazards_.T'], {}), '(X, self.hazards_.T)\n', (29504, 29524), True, 'import numpy as np\n'), ((30712, 30726), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (30724, 30726), True, 'import pandas as pd\n'), ((31600, 31613), 'lifelines.utils._get_index', '_get_index', (['v'], {}), '(v)\n', (31610, 31613), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((34430, 34456), 'scipy.integrate.trapz', 'trapz', (['v.values.T', 'v.index'], {}), '(v.values.T, v.index)\n', (34435, 34456), False, 'from scipy.integrate import trapz\n'), ((35425, 35450), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'index'}), '(index=index)\n', (35437, 35450), True, 'import pandas as pd\n'), ((40417, 40511), 'lifelines.utils.concordance_index', 'concordance_index', (['self.durations', '(-self._predicted_partial_hazards_)', 'self.event_observed'], {}), '(self.durations, -self._predicted_partial_hazards_, self.\n event_observed)\n', (40434, 40511), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((5783, 5803), 'numpy.ones', 'np.ones', (['df.shape[0]'], {}), '(df.shape[0])\n', (5790, 5803), True, 'import numpy as np\n'), ((6053, 6493), 'warnings.warn', 'warnings.warn', (['"""It appears your weights are not integers, possibly propensity or sampling scores then?\nIt\'s important to know that the naive variance estimates of the coefficients are biased. Instead a) set `robust=True` in the call to `fit`, or b) use Monte Carlo to\nestimate the variances. See paper "Variance estimation when using inverse probability of treatment weighting (IPTW) with survival analysis\\"\n"""', 'RuntimeWarning'], {}), '(\n """It appears your weights are not integers, possibly propensity or sampling scores then?\nIt\'s important to know that the naive variance estimates of the coefficients are biased. Instead a) set `robust=True` in the call to `fit`, or b) use Monte Carlo to\nestimate the variances. See paper "Variance estimation when using inverse probability of treatment weighting (IPTW) with survival analysis\\"\n"""\n , RuntimeWarning)\n', (6066, 6493), False, 'import warnings\n'), ((6643, 6671), 'numpy.ones', 'np.ones', (['(self._n_examples,)'], {}), '((self._n_examples,))\n', (6650, 6671), True, 'import numpy as np\n'), ((7749, 7768), 'numpy.linalg.inv', 'inv', (['self._hessian_'], {}), '(self._hessian_)\n', (7752, 7768), False, 'from numpy.linalg import norm, inv\n'), ((10674, 10714), 'numpy.zeros', 'np.zeros', (['(beta.shape[0], beta.shape[0])'], {}), '((beta.shape[0], beta.shape[0]))\n', (10682, 10714), True, 'import numpy as np\n'), ((10768, 10786), 'numpy.unique', 'np.unique', (['X.index'], {}), '(X.index)\n', (10777, 10786), True, 'import numpy as np\n'), ((11473, 11503), 'scipy.linalg.solve', 'spsolve', (['(-h)', 'g.T'], {'sym_pos': '(True)'}), '(-h, g.T, sym_pos=True)\n', (11480, 11503), True, 'from scipy.linalg import solve as spsolve\n'), ((12046, 12061), 'numpy.isnan', 'np.isnan', (['delta'], {}), '(delta)\n', (12054, 12061), True, 'import numpy as np\n'), ((12086, 12351), 'lifelines.utils.ConvergenceError', 'ConvergenceError', (['"""delta contains nan value(s). Convergence halted. Please see the following tips in the lifelines documentation:\nhttps://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model\n"""'], {}), '(\n """delta contains nan value(s). Convergence halted. Please see the following tips in the lifelines documentation:\nhttps://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model\n"""\n )\n', (12102, 12351), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((16487, 16499), 'numpy.dot', 'dot', (['X', 'beta'], {}), '(X, beta)\n', (16490, 16499), False, 'from numpy import dot, exp\n'), ((18768, 18803), 'numpy.dot', 'dot', (['(numer.T / denom)', '(numer / denom)'], {}), '(numer.T / denom, numer / denom)\n', (18771, 18803), False, 'from numpy import dot, exp\n'), ((29126, 29161), 'lifelines.utils.pass_for_numeric_dtypes_or_raise', 'pass_for_numeric_dtypes_or_raise', (['X'], {}), '(X)\n', (29158, 29161), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((31211, 31235), 'lifelines.utils._get_index', '_get_index', (['stratified_X'], {}), '(stratified_X)\n', (31221, 31235), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((31660, 31676), 'numpy.dot', 'np.dot', (['c_0', 'v.T'], {}), '(c_0, v.T)\n', (31666, 31676), True, 'import numpy as np\n'), ((35142, 35220), 'pandas.DataFrame', 'pd.DataFrame', (["(event_table['observed'] / event_table['hazards'])"], {'columns': '[name]'}), "(event_table['observed'] / event_table['hazards'], columns=[name])\n", (35154, 35220), True, 'import pandas as pd\n'), ((5180, 5197), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (5195, 5197), False, 'from datetime import datetime\n'), ((10632, 10651), 'numpy.zeros_like', 'np.zeros_like', (['beta'], {}), '(beta)\n', (10645, 10651), True, 'import numpy as np\n'), ((18908, 18927), 'numpy.log', 'np.log', (['denom[0][0]'], {}), '(denom[0][0])\n', (18914, 18927), True, 'import numpy as np\n'), ((19043, 19063), 'numpy.dot', 'dot', (['x_tie_sum', 'beta'], {}), '(x_tie_sum, beta)\n', (19046, 19063), False, 'from numpy import dot, exp\n'), ((29294, 29309), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (29306, 29309), True, 'import pandas as pd\n'), ((29322, 29357), 'lifelines.utils.pass_for_numeric_dtypes_or_raise', 'pass_for_numeric_dtypes_or_raise', (['X'], {}), '(X)\n', (29354, 29357), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((37412, 37424), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (37422, 37424), True, 'from matplotlib import pyplot as plt\n'), ((39897, 39909), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (39907, 39909), True, 'from matplotlib import pyplot as plt\n'), ((11611, 11897), 'lifelines.utils.ConvergenceError', 'ConvergenceError', (['"""hessian or gradient contains nan or inf value(s). Convergence halted. Please see the following tips in the lifelines documentation:\nhttps://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model\n"""'], {}), '(\n """hessian or gradient contains nan or inf value(s). Convergence halted. Please see the following tips in the lifelines documentation:\nhttps://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model\n"""\n )\n', (11627, 11897), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((30942, 31198), 'lifelines.utils.StatError', 'StatError', (['("""The stratum %s was not found in the original training data. For example, try\nthe following on the original dataset, df: `df.groupby(%s).size()`. Expected is that %s is not present in the output.\n"""\n % (stratum, self.strata, stratum))'], {}), '(\n """The stratum %s was not found in the original training data. For example, try\nthe following on the original dataset, df: `df.groupby(%s).size()`. Expected is that %s is not present in the output.\n"""\n % (stratum, self.strata, stratum))\n', (30951, 31198), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((31373, 31389), 'numpy.dot', 'np.dot', (['c_0', 'v.T'], {}), '(c_0, v.T)\n', (31379, 31389), True, 'import numpy as np\n'), ((38690, 38710), 'lifelines.utils.significance_code', 'significance_code', (['p'], {}), '(p)\n', (38707, 38710), False, 'from lifelines.utils import survival_table_from_events, inv_normal_cdf, normalize, significance_code, significance_codes_as_text, concordance_index, _get_index, qth_survival_times, pass_for_numeric_dtypes_or_raise, check_low_var, coalesce, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StepSizer, ConvergenceError, string_justify\n'), ((12805, 12816), 'time.time', 'time.time', ([], {}), '()\n', (12814, 12816), False, 'import time\n'), ((13599, 14001), 'warnings.warn', 'warnings.warn', (['"""The log-likelihood is getting suspciously close to 0 and the delta is still large. There may be complete separation in the dataset. This may result in incorrect inference of coefficients. See https://stats.idre.ucla.edu/other/mult-pkg/faq/general/faqwhat-is-complete-or-quasi-complete-separation-in-logisticprobit-regression-and-how-do-we-deal-with-them/ """', 'ConvergenceWarning'], {}), "(\n 'The log-likelihood is getting suspciously close to 0 and the delta is still large. There may be complete separation in the dataset. This may result in incorrect inference of coefficients. See https://stats.idre.ucla.edu/other/mult-pkg/faq/general/faqwhat-is-complete-or-quasi-complete-separation-in-logisticprobit-regression-and-how-do-we-deal-with-them/ '\n , ConvergenceWarning)\n", (13612, 14001), False, 'import warnings\n')]
|
from torch.utils.data import Dataset
from scipy import ndimage
from .augmentation import augmentation
import skimage
import imageio
import numpy as np
import h5py
import os
import random
class NeuroDataset(Dataset):
def __init__(self, data_path, phase='train', transform=False, target_channels="3"):
"""Custom PyTorch Dataset for nuclei dataset
Parameters
----------
data_path: str
path to the nuclei dataset hdf5 file
phase: str, optional
phase this dataset is used for (train, val. test)
"""
self.data_path = data_path
self.phase = phase
self.transform = transform
if "," in target_channels:
self.target_channels = [int(c) for c in targat_channels.split(',')]
else:
self.target_channels = [int(target_channels)]
self.target_dim = len(self.target_channels)
with h5py.File(self.data_path,"r") as h:
self.data_names = list(h.keys())
self.dim = 1
def __len__(self):
return len(self.data_names)
def __getitem__(self, idx):
with h5py.File(self.data_path,"r") as h:
data = h[self.data_names[idx]][:]
x = data[0]
x = np.expand_dims(x, axis=0)
y = data[self.target_channels]
if self.transform:
x, y = augmentation(x, y)
return x, y
|
[
"h5py.File",
"numpy.expand_dims"
] |
[((1287, 1312), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1301, 1312), True, 'import numpy as np\n'), ((951, 981), 'h5py.File', 'h5py.File', (['self.data_path', '"""r"""'], {}), "(self.data_path, 'r')\n", (960, 981), False, 'import h5py\n'), ((1172, 1202), 'h5py.File', 'h5py.File', (['self.data_path', '"""r"""'], {}), "(self.data_path, 'r')\n", (1181, 1202), False, 'import h5py\n')]
|
'''Code obtained from https://github.com/gitshanks/fer2013'''
# load json and create model
from __future__ import division
from keras.models import Sequential
from keras.layers import Dense
from keras.models import model_from_json
import numpy
import os
import numpy as np
json_file = open('web_app/keras_model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("web_app/keras_model_weights.h5")
print("Loaded model from disk")
truey=[]
predy=[]
x = np.load('data/keras_modXtest.npy')
y = np.load('data/keras_modytest.npy')
yhat= loaded_model.predict(x)
yh = yhat.tolist()
yt = y.tolist()
count = 0
for i in range(len(y)):
yy = max(yh[i])
yyt = max(yt[i])
predy.append(yh[i].index(yy))
truey.append(yt[i].index(yyt))
if(yh[i].index(yy)== yt[i].index(yyt)):
count+=1
acc = (count/len(y))*100
#saving values for confusion matrix and analysis
np.save('data/truey', truey)
np.save('data/predy', predy)
print("Predicted and true label values saved")
print("Accuracy on test set :"+str(acc)+"%")
|
[
"numpy.load",
"keras.models.model_from_json",
"numpy.save"
] |
[((394, 428), 'keras.models.model_from_json', 'model_from_json', (['loaded_model_json'], {}), '(loaded_model_json)\n', (409, 428), False, 'from keras.models import model_from_json\n'), ((574, 608), 'numpy.load', 'np.load', (['"""data/keras_modXtest.npy"""'], {}), "('data/keras_modXtest.npy')\n", (581, 608), True, 'import numpy as np\n'), ((613, 647), 'numpy.load', 'np.load', (['"""data/keras_modytest.npy"""'], {}), "('data/keras_modytest.npy')\n", (620, 647), True, 'import numpy as np\n'), ((996, 1024), 'numpy.save', 'np.save', (['"""data/truey"""', 'truey'], {}), "('data/truey', truey)\n", (1003, 1024), True, 'import numpy as np\n'), ((1025, 1053), 'numpy.save', 'np.save', (['"""data/predy"""', 'predy'], {}), "('data/predy', predy)\n", (1032, 1053), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import sys
sys.path.append("../")
import plotlib
import numpy
import pylab
import networkx
import pickle
import sys
G,pos=pickle.load(open("graph.pickle","rb"))
a_arr, m_hist, cor_hist=pickle.load(open("results.pickle","rb"))
e=numpy.loadtxt("eigenval.csv", delimiter=",")
v=numpy.loadtxt("eigenvec.csv", delimiter=",")
group_id=numpy.loadtxt("group_id.csv", delimiter=",")
sort_ind=numpy.argsort(group_id)
A=numpy.loadtxt("adjacency.csv",delimiter=",")
Dnorm=numpy.diag(numpy.sum(A,axis=1)**-1)
prob=Dnorm@A
P=v.shape[0]
for dim in range(1,5):
x=v[:,1:dim+1]
x=x/numpy.linalg.norm(x,axis=1,keepdims=True)
r=numpy.zeros(P)
for i in range(P):
r[i]=numpy.sum(prob[i,:]*0.5*(1-numpy.sum(x*x[i:i+1,:],axis=1)))
plotlib.plot_color_network_positive("subgoal_laplacian"+str(dim)+".svg",G,pos,r)
for a_ind in range(len(a_arr)):
a=numpy.around(a_arr[a_ind],decimals=1)
cor=cor_hist[a_ind]
r=numpy.zeros(P)
for i in range(P):
r[i]=numpy.sum(prob[i,:]*0.5*(1-cor[i,:]))
plotlib.plot_color_network_positive("subgoal_network"+str(a_ind).zfill(2)+".svg",G,pos,r,title=r"$\alpha$="+str(a))
|
[
"sys.path.append",
"numpy.sum",
"numpy.zeros",
"numpy.argsort",
"numpy.around",
"numpy.linalg.norm",
"numpy.loadtxt"
] |
[((35, 57), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (50, 57), False, 'import sys\n'), ((254, 298), 'numpy.loadtxt', 'numpy.loadtxt', (['"""eigenval.csv"""'], {'delimiter': '""","""'}), "('eigenval.csv', delimiter=',')\n", (267, 298), False, 'import numpy\n'), ((301, 345), 'numpy.loadtxt', 'numpy.loadtxt', (['"""eigenvec.csv"""'], {'delimiter': '""","""'}), "('eigenvec.csv', delimiter=',')\n", (314, 345), False, 'import numpy\n'), ((355, 399), 'numpy.loadtxt', 'numpy.loadtxt', (['"""group_id.csv"""'], {'delimiter': '""","""'}), "('group_id.csv', delimiter=',')\n", (368, 399), False, 'import numpy\n'), ((410, 433), 'numpy.argsort', 'numpy.argsort', (['group_id'], {}), '(group_id)\n', (423, 433), False, 'import numpy\n'), ((437, 482), 'numpy.loadtxt', 'numpy.loadtxt', (['"""adjacency.csv"""'], {'delimiter': '""","""'}), "('adjacency.csv', delimiter=',')\n", (450, 482), False, 'import numpy\n'), ((649, 663), 'numpy.zeros', 'numpy.zeros', (['P'], {}), '(P)\n', (660, 663), False, 'import numpy\n'), ((884, 922), 'numpy.around', 'numpy.around', (['a_arr[a_ind]'], {'decimals': '(1)'}), '(a_arr[a_ind], decimals=1)\n', (896, 922), False, 'import numpy\n'), ((952, 966), 'numpy.zeros', 'numpy.zeros', (['P'], {}), '(P)\n', (963, 966), False, 'import numpy\n'), ((499, 519), 'numpy.sum', 'numpy.sum', (['A'], {'axis': '(1)'}), '(A, axis=1)\n', (508, 519), False, 'import numpy\n'), ((601, 644), 'numpy.linalg.norm', 'numpy.linalg.norm', (['x'], {'axis': '(1)', 'keepdims': '(True)'}), '(x, axis=1, keepdims=True)\n', (618, 644), False, 'import numpy\n'), ((1003, 1048), 'numpy.sum', 'numpy.sum', (['(prob[i, :] * 0.5 * (1 - cor[i, :]))'], {}), '(prob[i, :] * 0.5 * (1 - cor[i, :]))\n', (1012, 1048), False, 'import numpy\n'), ((727, 763), 'numpy.sum', 'numpy.sum', (['(x * x[i:i + 1, :])'], {'axis': '(1)'}), '(x * x[i:i + 1, :], axis=1)\n', (736, 763), False, 'import numpy\n')]
|
from .helpers import dualLP, optimalLP, optimalLPConstant
import numpy as np
def computeCostMinPoA(n, B, f, options=None):
''' Authors: <NAME>, <NAME> and <NAME>
Copyright(c) 2020 <NAME>, <NAME>, <NAME>.
All rights reserved. See LICENSE file in the project root for full license information.
Description
-----------
Computes the price-of-anarchy of atomic congestion games
with congestion functions obtained as linear combinations of
{b_1(x),...,b_m(x)}, and n players
Parameters
----------
n : int
Number of players.
B : (m,n) ndarray
Basis congestion functions defined for 'N = {1, 2, ..., n}'.
f : (m,n) ndarray
Player cost functions defined for 'N = {1, 2, ..., n}'.
options : dict, optional
Optimization options.
Returns
-------
PoA : float
Price-of-anarchy.
'''
if options is None:
try:
from scipy.optimize import linprog
options = { 'solver' : linprog,
'method' : 'revised simplex' }
except ImportError:
msg = 'No optimization options were specified, and SciPy is not installed.'
raise RuntimeError(msg)
Btemp = np.pad(B, pad_width=((0,0),(1,1)), mode='constant').T
ftemp = np.pad(f, pad_width=((0,0),(1,1)), mode='constant').T
x, _, exitFlag, output = dualLP( n, Btemp, ftemp, True, options)
if exitFlag:
raise RuntimeError(output)
PoA = 1./x[1]
return PoA
def computeWelfareMaxPoA(n, B, f, options=None):
''' Authors: <NAME>, <NAME> and <NAME>
Copyright(c) 2020 <NAME>, <NAME>, <NAME>.
All rights reserved. See LICENSE file in the project root for full license information.
Description
-----------
Computes the price-of-anarchy of atomic congestion games
with welfare functions obtained as linear combinations of
{b_1(x),...,b_m(x)}, utility functions obtained as linear
combinations of {f_1(x),...,f_m(x)}, and n players.
Parameters
----------
n : int
Number of players.
B : (m,n) ndarray
Basis welfare functions defined for 'N = {1, 2, ..., n}'.
f : (m,n) ndarray
Player utility functions defined for 'N = {1, 2, ..., n}'.
options : dict, optional
Optimization options.
Returns
-------
PoA : float
Price-of-anarchy of optimal constant tolls.
'''
if options is None:
try:
from scipy.optimize import linprog
options = { 'solver' : linprog,
'method' : 'revised simplex' }
except ImportError:
msg = 'No optimization options were specified, and SciPy is not installed.'
raise RuntimeError(msg)
Btemp = np.pad(B, pad_width=((0,0),(1,1)), mode='constant').T
ftemp = np.pad(f, pad_width=((0,0),(1,1)), mode='constant').T
x, _, exitFlag, output = dualLP( n, Btemp, ftemp, False, options)
if exitFlag:
raise RuntimeError(output)
PoA = x[1]
return PoA
def optimizeCostMinPoA(n, B, options=None):
''' Authors: <NAME>, <NAME> and <NAME>
Copyright(c) 2020 <NAME>, <NAME>, <NAME>.
All rights reserved. See LICENSE file in the project root for full license information.
Description
-----------
Optimizes the price-of-anarchy of atomic congestion games
with congestion functions obtained as linear combination of
basis {b_1(x),...,b_m(x)}, and n players.
Parameters
----------
n : int
Number of players.
B : (m,n) ndarray
Basis cost functions defined for 'N = {1, 2, ..., n}'.
options : dict, optional
Optimization options.
Returns
-------
OptPoA : float
Price-of-anarchy of optimal constant tolls.
Optf : (m,n) ndarray
Functions used to generate optimal mechanism.
'''
if options is None:
try:
from scipy.optimize import linprog
options = { 'solver' : linprog,
'method' : 'revised simplex' }
except ImportError:
msg = 'No optimization options were specified, and SciPy is not installed.'
raise RuntimeError(msg)
m = np.shape( B )[0]
OptPoA = 0.
Optf = np.zeros( (m,n), dtype=np.float )
for currentBasis in np.arange(m):
w = B[currentBasis,:]
x, _, exitFlag, output = optimalLP(n, np.pad(w, pad_width=1, mode='constant'), True, options)
if exitFlag:
raise RuntimeError(output)
Optf[currentBasis,:] = x[0:n]
currentPoA = 1./x[n]
OptPoA = max(OptPoA, currentPoA)
return [ OptPoA, Optf ]
def optimizeCostMinPoAConstant(n, B, options=None):
''' Authors: <NAME>, <NAME> and <NAME>
Copyright(c) 2020 <NAME>, <NAME>, <NAME>.
All rights reserved. See LICENSE file in the project root for full license information.
Description
-----------
Optimizes the price-of-anarchy (using *constant* tolls) of atomic
congestion games with congestion functions obtained as linear combination
of basis {b_1(x),...,b_m(x)}, and n players.
Parameters
----------
n : int
Number of players.
B : (m,n) ndarray
Basis congestion functions defined for 'N = {1, 2, ..., n}'.
options : dict, optional
Optimization options.
Returns
-------
OptPoA : float
Price-of-anarchy of optimal constant mechanism.
OptTau : (m,) ndarray
Values used to generate optimal constant mechanism.
'''
if options is None:
try:
from scipy.optimize import linprog
options = { 'solver' : linprog,
'method' : 'revised simplex' }
except ImportError:
msg = 'No optimization options were specified, and SciPy is not installed.'
raise RuntimeError(msg)
m = np.shape( B )[0]
Btemp = np.pad(B, pad_width=((0,0),(1,1)), mode='constant').T
x, _, exitFlag, output = optimalLPConstant(n, Btemp, True, options)
if exitFlag:
raise RuntimeError(output)
OptPoA = 1./x[m+1]
OptTau = x[0:m]/x[m]
return [ OptPoA, OptTau ]
def optimizeWelfareMaxPoA(n, B, options=None):
''' Authors: <NAME>, <NAME> and <NAME>
Copyright(c) 2020 <NAME>, <NAME>, <NAME>.
All rights reserved. See LICENSE file in the project root for full license information.
Description
-----------
Optimizes the price-of-anarchy of atomic congestion games
with welfare functions obtained as linear combination of basis
{b_1(x),...,b_m(x)}, and n players.
Parameters
----------
n : int
Number of players.
B : (m,n) ndarray
Resource welfare function defined for 'N = {1, 2, ..., n}'.
options : dict, optional
Choice of solver and options.
Returns
-------
OptPoA : float
Optimal price-of-anarchy.
Optf : (m,n) ndarray
Functions used to generate optimal mechanism.
'''
if options is None:
try:
from scipy.optimize import linprog
options = { 'solver' : linprog,
'method' : 'revised simplex' }
except ImportError:
msg = 'No optimization options were specified, and SciPy is not installed.'
raise RuntimeError(msg)
m = np.shape( B )[0]
OptPoA = 0.
Optf = np.zeros( (m,n), dtype=np.float )
for currentBasis in np.arange(m):
w = B[currentBasis,:]
x, _, exitFlag, output = optimalLP(n, np.pad(w, pad_width=1, mode='constant'), False, options)
if exitFlag:
raise RuntimeError(output)
Optf[currentBasis,:] = x[0:n]
currentPoA = x[n]
OptPoA = max(OptPoA, currentPoA)
return [ OptPoA, Optf ]
|
[
"numpy.pad",
"numpy.shape",
"numpy.zeros",
"numpy.arange"
] |
[((4569, 4601), 'numpy.zeros', 'np.zeros', (['(m, n)'], {'dtype': 'np.float'}), '((m, n), dtype=np.float)\n', (4577, 4601), True, 'import numpy as np\n'), ((4628, 4640), 'numpy.arange', 'np.arange', (['m'], {}), '(m)\n', (4637, 4640), True, 'import numpy as np\n'), ((7957, 7989), 'numpy.zeros', 'np.zeros', (['(m, n)'], {'dtype': 'np.float'}), '((m, n), dtype=np.float)\n', (7965, 7989), True, 'import numpy as np\n'), ((8016, 8028), 'numpy.arange', 'np.arange', (['m'], {}), '(m)\n', (8025, 8028), True, 'import numpy as np\n'), ((1322, 1376), 'numpy.pad', 'np.pad', (['B'], {'pad_width': '((0, 0), (1, 1))', 'mode': '"""constant"""'}), "(B, pad_width=((0, 0), (1, 1)), mode='constant')\n", (1328, 1376), True, 'import numpy as np\n'), ((1388, 1442), 'numpy.pad', 'np.pad', (['f'], {'pad_width': '((0, 0), (1, 1))', 'mode': '"""constant"""'}), "(f, pad_width=((0, 0), (1, 1)), mode='constant')\n", (1394, 1442), True, 'import numpy as np\n'), ((2956, 3010), 'numpy.pad', 'np.pad', (['B'], {'pad_width': '((0, 0), (1, 1))', 'mode': '"""constant"""'}), "(B, pad_width=((0, 0), (1, 1)), mode='constant')\n", (2962, 3010), True, 'import numpy as np\n'), ((3022, 3076), 'numpy.pad', 'np.pad', (['f'], {'pad_width': '((0, 0), (1, 1))', 'mode': '"""constant"""'}), "(f, pad_width=((0, 0), (1, 1)), mode='constant')\n", (3028, 3076), True, 'import numpy as np\n'), ((4525, 4536), 'numpy.shape', 'np.shape', (['B'], {}), '(B)\n', (4533, 4536), True, 'import numpy as np\n'), ((6329, 6340), 'numpy.shape', 'np.shape', (['B'], {}), '(B)\n', (6337, 6340), True, 'import numpy as np\n'), ((6358, 6412), 'numpy.pad', 'np.pad', (['B'], {'pad_width': '((0, 0), (1, 1))', 'mode': '"""constant"""'}), "(B, pad_width=((0, 0), (1, 1)), mode='constant')\n", (6364, 6412), True, 'import numpy as np\n'), ((7913, 7924), 'numpy.shape', 'np.shape', (['B'], {}), '(B)\n', (7921, 7924), True, 'import numpy as np\n'), ((4727, 4766), 'numpy.pad', 'np.pad', (['w'], {'pad_width': '(1)', 'mode': '"""constant"""'}), "(w, pad_width=1, mode='constant')\n", (4733, 4766), True, 'import numpy as np\n'), ((8115, 8154), 'numpy.pad', 'np.pad', (['w'], {'pad_width': '(1)', 'mode': '"""constant"""'}), "(w, pad_width=1, mode='constant')\n", (8121, 8154), True, 'import numpy as np\n')]
|
import folium
import numpy as np
from folium.plugins import HeatMap, MarkerCluster
import pandas as pd
from math import sin, cos, acos, asin, atan2, radians, degrees
def plot_circle(lat, lon, radius, map=None, **kwargs):
"""
Plot a circle on a map (creating a new folium map instance if necessary).
Parameters
----------
lat: float
latitude of circle to plot (degrees)
lon: float
longitude of circle to plot (degrees)
radius: arraylike, float
List of distances specifying the radius of circle(s) to plot (m)
map: folium.Map
existing map object
Returns
-------
Folium map object
Examples
--------
>>> import folium
>>> armageddon.plot_circle(52.79, -2.95, 1e3, map=None)
"""
# If the radius is int or float, change radius to list
if isinstance(radius, (int, float)):
radius = [radius]
# If a map is not given, create a map
if not map:
map = folium.Map(location=[lat, lon], control_scale=True)
# Decide colors which are used for showing damage zone circles.
# zone1: purple, zone2: red, zone3: orange, zone4: yellow
colors = ['#9370DB', '#DC143C', '#FF8000', '#FFFF00']
# Plot color cicles with starting from zone1
# To do so, sort the list of radius to fit zone number = color index+1.
for i, rad in enumerate(sorted(radius, reverse=True)):
folium.Circle([lat, lon], rad, fill=True,
fillOpacity=1., color=colors[i],
**kwargs).add_to(map)
return map
def latlon_to_xyz(lat, lon):
"""Change lattitude and longitude into the rectangular coordinate system.
The equatorial plane is the xy plane,
and the axis of rotation is the z axis.
Parameters
----------
lat: float
latitude(degree)
lon: float
longitude(degree)
rlat: float
latitude(rad)
rlon: float
longitude(rad)
Returns
---------
float
Points on the rectangular coordinate system.
"""
# Change degrees to radians
rlat, rlon = radians(lat), radians(lon)
return cos(rlat) * cos(rlon), cos(rlat) * sin(rlon), sin(rlat)
def xyz_to_latlon(x, y, z):
"""Change coodinate from xyz coordinate system to
latitude & longitude coordinates.
Parameter
----------
x: float
x coordinate of Equatorial plane
y: float
y coodinate of Equatorial plane
z: float
z coodinate of Arctic direction
Returns
---------
float
Points on the earth surface(degree)
"""
rlat = asin(z)
coslat = cos(rlat)
return degrees(rlat), degrees(atan2(y / coslat, x / coslat))
def halfway_on_sphere(lat, lon, elat, elon, z):
"""
Calculate a point on the great circle rout of asteroid. If z= 0.5,
the return shows lat & lon of harfway point.
Parameter
---------
lat: float
latitude of zero point
lon: float
longitude of zero point
elat: float
latitude of entry point
elon: float
longitude of entry point
z: float
calculation point between entry and zero point.
Return
--------
list
latitude and longitude of interval point.
"""
# Cange lattitude & longitude to xyz coodinate
xyz0, xyz1 = latlon_to_xyz(lat, lon), latlon_to_xyz(elat, elon)
# Calculate a distance between entry point and zero point.
theta = acos(sum(x * y for x, y in zip(xyz0, xyz1)))
v0 = sin(theta * (1 - z)) / sin(theta)
v1 = sin(theta * z) / sin(theta)
# Calculate latitude a& longitude of interval point.
interval_lat, interval_lon = xyz_to_latlon(
*(x * v0 + y * v1 for x, y in zip(xyz0, xyz1)))
return [interval_lat, interval_lon]
def plot_line(lat, lon, elat, elon, map=None, n=100):
"""
Plot a black lineconnecting entry point and zero point.
Parameters
----------
lat: float
latitude of circle to plot (degrees)
lon: float
longitude of circle to plot (degrees)
elat: float
latitude of entry point (degrees)
elon: float
longitude of entry point(degrees)
n: int
number of rute divisions.Default value is 100.
map: folium.Map
existing map object
Returns
-------
Folium map object
Examples
--------
>>> plot_line(52.79, -2.95, 53.48, -2.24 , map=None)
"""
# If a map is not given, create a map.
if not map:
map = folium.Map(location=[lat, lon], control_scale=True)
Harf = []
# Calculate points of intervals between entry and zero pints.
for i in range(n):
Intervals = halfway_on_sphere(lat, lon, elat, elon, z=i / (n + 1))
Harf.append(Intervals)
# Make a list of plotting points : [entry point, harf point, zero point]
points = [[lat, lon], *Harf, [elat, elon]]
# Plotting a line on map.
folium.PolyLine(points,
color="black", weight=2.5, opacity=1).add_to(map)
return map
def get_lat_long_of_postcodes(postcodes, sector=False):
"""
Return location(latitude,longitude) of a list of postcode units or sectors.
Parameters
----------
postcodes : list of lists
list of postcode units or postcode sectors
sector : bool, optional
if true return populations for postcode sectors,
otherwise postcode units
Returns
-------
list of lists
Contains the latitude,longitude of input postcode units or sectors
Examples
--------
>>> get_lat_log_of_postcode([['SW7 2AZ','SW7 2BT','SW7 2BU','SW7 2DD']])
>>> get_lat_log_of_postcode([['SW7 2']], True)
"""
# Get postcodes from csv
postcodes_pd = pd.read_csv('./armageddon/resources/full_postcodes.csv')
# Modify postcodes to no spaces for processing
postcodes = [[x2.replace(" ", "_") for x2 in x1] for x1 in postcodes]
postcodes_pd['Postcode'] = postcodes_pd['Postcode'].str.replace(" ", "_")
# If sector flag is True―taking average of unit locations
if sector:
postcodes_pd = postcodes_pd.groupby(
postcodes_pd['Postcode'].str.slice(stop=5),
as_index=True).mean().reset_index()
# Select postcodes
select_postcodes = postcodes_pd[
postcodes_pd['Postcode'].isin(postcodes[0])][['Latitude', 'Longitude']]
return select_postcodes.values.tolist()
def heat_map_layer(locations, weights, map=None, radius=25):
"""
Return heat map layer for follium map from
a list of locations and a list of weights
Parameters
----------
locations : list of lists
list of latitutde and longitude coordinates
corresponding to postcode units or postcode sectors
weights : list of lists, array-like
list of weights to be plotted at locations
Returns
-------
Follium map
Examples
--------
>>> locations = get_lat_long_of_postcodes(postcodes, sector=False)
>>> weights = [['10000', '20000', '30000', '40000']]
>>> heat_map_layer(locations, weights, map = None, radius = 25)
"""
# Calculate an average of latitude and longitude of given locations
Avr_location = np.average(locations, axis=0)
# If a map is not given, create a map
if not map:
map = folium.Map(location=Avr_location, control_scale=True)
# Creating copy of locations
combo = locations.copy()
# Appending weight to the third column of combo.
for i, a in enumerate(combo):
a.append(float(weights[0][i]))
# Initialize Follium HeatMap instance
heat_map = HeatMap(combo, name=None, min_opacity=0.5,
max_zoom=18, radius=radius, blur=15, gradient=None,
overlay=False, control=True, show=True)
heat_map.add_to(map)
return map
def plot_marker(lat, lon, popup=None, map=None, **kwargs):
"""
Plot a point on a map (creating a new folium map instance if necessary).
Parameters
----------
lat: float
latitude of point to plot (degrees)
lon: float
longitude of point to plot (degrees)
popup: str
will plot a string label at point
map: folium.Map
existing map object
Returns
-------
Folium map object
Examples
--------
>>> import folium
>>> armageddon.plot_point(52.79, -2.95, 1e3, map=None)
"""
if popup is not None:
if isinstance(popup, (str)) is False:
popup = None
if not map:
map = folium.Map(location=[lat, lon], control_scale=True)
folium.map.Marker(location=[lat, lon], popup=popup,
tooltip=None, icon=None,
draggable=False, **kwargs).add_to(map)
return map
def plot_multiple_markers(locations, popups=None, map=None):
"""
Return heat cluster of markers for follium map from
a list of locations
Parameters
----------
locations : list of lists
list of latitutde and longitude coordinates
corresponding to postcode units or postcode sectors
popup: list of str
will plot a string label at points
map: folium.Map
existing map object
Returns
-------
Follium map
Examples
--------
>>> locations = get_lat_long_of_postcodes(postcodes, sector=False)
>>> plot_multiple_markers(locations, popups= None, map = None)
"""
Avr_location = np.average(locations, axis=0)
if not map:
map = folium.Map(location=Avr_location, control_scale=True)
map = MarkerCluster(locations=locations, popups=popups,
icons=None, name='Location Markers',
overlay=True, control=True,
show=True, icon_create_function=None,
options=None).add_to(map)
return map
|
[
"folium.map.Marker",
"numpy.average",
"math.asin",
"math.atan2",
"pandas.read_csv",
"math.radians",
"folium.plugins.HeatMap",
"math.sin",
"folium.Circle",
"math.cos",
"folium.Map",
"folium.plugins.MarkerCluster",
"folium.PolyLine",
"math.degrees"
] |
[((2603, 2610), 'math.asin', 'asin', (['z'], {}), '(z)\n', (2607, 2610), False, 'from math import sin, cos, acos, asin, atan2, radians, degrees\n'), ((2624, 2633), 'math.cos', 'cos', (['rlat'], {}), '(rlat)\n', (2627, 2633), False, 'from math import sin, cos, acos, asin, atan2, radians, degrees\n'), ((5734, 5790), 'pandas.read_csv', 'pd.read_csv', (['"""./armageddon/resources/full_postcodes.csv"""'], {}), "('./armageddon/resources/full_postcodes.csv')\n", (5745, 5790), True, 'import pandas as pd\n'), ((7200, 7229), 'numpy.average', 'np.average', (['locations'], {'axis': '(0)'}), '(locations, axis=0)\n', (7210, 7229), True, 'import numpy as np\n'), ((7605, 7744), 'folium.plugins.HeatMap', 'HeatMap', (['combo'], {'name': 'None', 'min_opacity': '(0.5)', 'max_zoom': '(18)', 'radius': 'radius', 'blur': '(15)', 'gradient': 'None', 'overlay': '(False)', 'control': '(True)', 'show': '(True)'}), '(combo, name=None, min_opacity=0.5, max_zoom=18, radius=radius, blur\n =15, gradient=None, overlay=False, control=True, show=True)\n', (7612, 7744), False, 'from folium.plugins import HeatMap, MarkerCluster\n'), ((9429, 9458), 'numpy.average', 'np.average', (['locations'], {'axis': '(0)'}), '(locations, axis=0)\n', (9439, 9458), True, 'import numpy as np\n'), ((974, 1025), 'folium.Map', 'folium.Map', ([], {'location': '[lat, lon]', 'control_scale': '(True)'}), '(location=[lat, lon], control_scale=True)\n', (984, 1025), False, 'import folium\n'), ((2096, 2108), 'math.radians', 'radians', (['lat'], {}), '(lat)\n', (2103, 2108), False, 'from math import sin, cos, acos, asin, atan2, radians, degrees\n'), ((2110, 2122), 'math.radians', 'radians', (['lon'], {}), '(lon)\n', (2117, 2122), False, 'from math import sin, cos, acos, asin, atan2, radians, degrees\n'), ((2181, 2190), 'math.sin', 'sin', (['rlat'], {}), '(rlat)\n', (2184, 2190), False, 'from math import sin, cos, acos, asin, atan2, radians, degrees\n'), ((2645, 2658), 'math.degrees', 'degrees', (['rlat'], {}), '(rlat)\n', (2652, 2658), False, 'from math import sin, cos, acos, asin, atan2, radians, degrees\n'), ((3507, 3527), 'math.sin', 'sin', (['(theta * (1 - z))'], {}), '(theta * (1 - z))\n', (3510, 3527), False, 'from math import sin, cos, acos, asin, atan2, radians, degrees\n'), ((3530, 3540), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (3533, 3540), False, 'from math import sin, cos, acos, asin, atan2, radians, degrees\n'), ((3550, 3564), 'math.sin', 'sin', (['(theta * z)'], {}), '(theta * z)\n', (3553, 3564), False, 'from math import sin, cos, acos, asin, atan2, radians, degrees\n'), ((3567, 3577), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (3570, 3577), False, 'from math import sin, cos, acos, asin, atan2, radians, degrees\n'), ((4499, 4550), 'folium.Map', 'folium.Map', ([], {'location': '[lat, lon]', 'control_scale': '(True)'}), '(location=[lat, lon], control_scale=True)\n', (4509, 4550), False, 'import folium\n'), ((7303, 7356), 'folium.Map', 'folium.Map', ([], {'location': 'Avr_location', 'control_scale': '(True)'}), '(location=Avr_location, control_scale=True)\n', (7313, 7356), False, 'import folium\n'), ((8523, 8574), 'folium.Map', 'folium.Map', ([], {'location': '[lat, lon]', 'control_scale': '(True)'}), '(location=[lat, lon], control_scale=True)\n', (8533, 8574), False, 'import folium\n'), ((9490, 9543), 'folium.Map', 'folium.Map', ([], {'location': 'Avr_location', 'control_scale': '(True)'}), '(location=Avr_location, control_scale=True)\n', (9500, 9543), False, 'import folium\n'), ((2135, 2144), 'math.cos', 'cos', (['rlat'], {}), '(rlat)\n', (2138, 2144), False, 'from math import sin, cos, acos, asin, atan2, radians, degrees\n'), ((2147, 2156), 'math.cos', 'cos', (['rlon'], {}), '(rlon)\n', (2150, 2156), False, 'from math import sin, cos, acos, asin, atan2, radians, degrees\n'), ((2158, 2167), 'math.cos', 'cos', (['rlat'], {}), '(rlat)\n', (2161, 2167), False, 'from math import sin, cos, acos, asin, atan2, radians, degrees\n'), ((2170, 2179), 'math.sin', 'sin', (['rlon'], {}), '(rlon)\n', (2173, 2179), False, 'from math import sin, cos, acos, asin, atan2, radians, degrees\n'), ((2668, 2697), 'math.atan2', 'atan2', (['(y / coslat)', '(x / coslat)'], {}), '(y / coslat, x / coslat)\n', (2673, 2697), False, 'from math import sin, cos, acos, asin, atan2, radians, degrees\n'), ((4922, 4983), 'folium.PolyLine', 'folium.PolyLine', (['points'], {'color': '"""black"""', 'weight': '(2.5)', 'opacity': '(1)'}), "(points, color='black', weight=2.5, opacity=1)\n", (4937, 4983), False, 'import folium\n'), ((8580, 8687), 'folium.map.Marker', 'folium.map.Marker', ([], {'location': '[lat, lon]', 'popup': 'popup', 'tooltip': 'None', 'icon': 'None', 'draggable': '(False)'}), '(location=[lat, lon], popup=popup, tooltip=None, icon=None,\n draggable=False, **kwargs)\n', (8597, 8687), False, 'import folium\n'), ((9555, 9730), 'folium.plugins.MarkerCluster', 'MarkerCluster', ([], {'locations': 'locations', 'popups': 'popups', 'icons': 'None', 'name': '"""Location Markers"""', 'overlay': '(True)', 'control': '(True)', 'show': '(True)', 'icon_create_function': 'None', 'options': 'None'}), "(locations=locations, popups=popups, icons=None, name=\n 'Location Markers', overlay=True, control=True, show=True,\n icon_create_function=None, options=None)\n", (9568, 9730), False, 'from folium.plugins import HeatMap, MarkerCluster\n'), ((1408, 1497), 'folium.Circle', 'folium.Circle', (['[lat, lon]', 'rad'], {'fill': '(True)', 'fillOpacity': '(1.0)', 'color': 'colors[i]'}), '([lat, lon], rad, fill=True, fillOpacity=1.0, color=colors[i],\n **kwargs)\n', (1421, 1497), False, 'import folium\n')]
|
import numpy as np
from matplotlib import pyplot as plt
from .interval import Interval
class Pbox(object):
def __init__(self, left=None, right=None, steps=200, shape=None, mean_left=None, mean_right=None, var_left=None, var_right=None, interpolation='linear'):
if (left is not None) and (right is None):
right = left
if left is None and right is None:
left = -np.inf
right = np.inf
if isinstance(left, Interval):
left = np.array([left.left()])
if isinstance(right, Interval):
right = np.array([right.right()])
if len(left) != steps:
left = interpolate(left, interpolation=interpolation, left=False, steps=steps)
if len(right) != steps:
right = interpolate(right, interpolation=interpolation, left=True, steps=steps)
self.left = left
self.right = right
self.steps = steps
self.n = self.steps
self.shape = shape
self.mean_left = -np.inf
self.mean_right = np.inf
self.var_left = 0
self.var_right = np.inf
self._computemoments()
if shape is not None: self.shape = shape
if mean_left is not None: self.mean_left = np.max([mean_left, self.mean_left])
if mean_right is not None: self.mean_right = np.min([mean_right, self.mean_right])
if var_left is not None: self.var_left = np.max([var_left, self.var_left])
if var_right is not None: self.var_right = np.min([var_right, self.var_right])
self._checkmoments()
def __repr__(self):
if self.mean_left == self.mean_right:
mean_text = f'{round(self.mean_left, 4)}'
else:
mean_text = f'[{round(self.mean_left, 4)}, {round(self.mean_right, 4)}]'
if self.var_left == self.var_right:
var_text = f'{round(self.var_left, 4)}'
else:
var_text = f'[{round(self.var_left, 4)}, {round(self.var_right, 4)}]'
range_text = f'[{round(np.min([self.left, self.right]), 4), round(np.max([self.left, self.right]), 4)}'
if self.shape is None:
shape_text = ' '
else:
shape_text = f' {self.shape}' # space to start; see below lacking space
return f'Pbox: ~{shape_text}(range={range_text}, mean={mean_text}, var={var_text})'
def __iter__(self):
for val in np.array([self.left,self.right]).flatten():
yield val
def __neg__(self):
if self.shape in ['uniform','normal','cauchy','triangular','skew-normal']:
s = self.shape
else:
s = ''
return Pbox(
left = -np.flip(self.right),
right = -np.flip(self.left),
shape = s,
mean_left = -self.mean_right,
mean_right = -self.mean_left,
var_left = self.var_left,
var_right = self.var_right
)
def __lt__(self,other):
return self.lt(other, method = 'f')
def __rlt__(self,other):
return self.ge(other, method = 'f')
def __le__(self,other):
return self.le(other, method = 'f')
def __rle__(self,other):
return self.gt(other, method = 'f')
def __gt__(self,other):
return self.gt(other, method = 'f')
def __rgt__(self,other):
return self.le(other, method = 'f')
def __ge__(self,other):
return self.ge(other, method = 'f')
def __rge__(self,other):
return self.lt(other, method = 'f')
def __and__(self, other):
return self.logicaland(other, method = 'f')
def __rand__(self,other):
return self.logicaland(other, method = 'f')
def __or__(self, other):
return self.logicalor(other, method = 'f')
def __ror__(self,other):
return self.logicalor(other, method = 'f')
def __add__(self, other):
return self.add(other, method = 'f')
def __radd__(self,other):
return self.add(other, method = 'f')
def __sub__(self,other):
return self.sub(other, method = 'f')
def __rsub__(self,other):
self = - self
return self.add(other, method = 'f')
def __mul__(self,other):
return self.mul(other, method = 'f')
def __rmul__(self,other):
return self.mul(other, method = 'f')
def __truediv__(self, other):
return self.div(other, method = 'f')
def __rtruediv__(self,other):
try:
return other * self.recip()
except:
return NotImplemented
### Local functions ###
def _computemoments(self): # should we compute mean if it is a Cauchy, var if it's a t distribution?
self.mean_left = np.max([self.mean_left, np.mean(self.left)])
self.mean_right = np.min([self.mean_right, np.mean(self.right)])
if not (np.any(self.left <= -np.inf) or np.any(np.inf <= self.right)):
V, JJ = 0, 0
j = np.array(range(self.n))
for J in np.array(range(self.n)) - 1:
ud = [*self.left[j < J], *self.right[J <= j]]
v = sideVariance(ud)
if V < v:
JJ = J
V = v
self.var_right = V
def _checkmoments(self):
a = Interval(self.mean_left, self.mean_right) #mean(x)
b = dwMean(self)
self.mean_left = np.max([left(a), left(b)])
self.mean_right = np.min([right(a), right(b)])
if self.mean_right < self.mean_left:
# use the observed mean
self.mean_left = left(b)
self.mean_right = right(b)
a = Interval(self.var_left, self.var_right) #var(x)
b = dwVariance(self)
self.var_left = np.max([left(a), left(b)])
self.var_right = np.min([right(a),right(b)])
if self.var_right < self.var_left:
# use the observed variance
self.var_left = left(b)
self.var_right = right(b)
### Public functions ###
# "%<%" <- lt <- function(x,y) prob.pbox(frechetconv.pbox(x,negate(y),'+'),0);
# "%>%" <- gt <- function(x,y) xprob.pbox(frechetconv.pbox(y,negate(x),'+'),0)
# "%<=%" <- lte <- function(x,y) xprob.pbox(frechetconv.pbox(x,negate(y),'+'),0);
# "%>=%" <- gte <- function(x,y) prob.pbox(frechetconv.pbox(y,negate(x),'+'),0)
# "%&%" <- function(x,y) and.pbox(x,y);
# "%|%" <- function(x,y) or.pbox(x,y)
def lt(self, other, method = 'f'):
b = self.add(-other, method)
return(b.get_probability(0)) # return (self.add(-other, method)).get_probability(0)
def le(self, other, method = 'f'):
b = self.add(-other, method)
return(b.get_probability(0)) # how is the "or equal to" affecting the calculation?
def gt(self, other, method = 'f'):
self = - self
b = self.add(other, method)
return(b.get_probability(0)) # maybe 1-prob ?
def ge(self, other, method = 'f'):
self = - self
b = self.add(other, method)
return(b.get_probability(0))
#pmin.pbox <- function (..., na.rm = FALSE) {
# elts <- makepbox(...)
# m <- elts[[1]]
# for (each in elts[-1]) m <- frechetconv.pbox(m, each, 'pmin')
# m
# }
#
#pmax.pbox <- function (..., na.rm = FALSE) {
# elts <- makepbox(...)
# m <- elts[[1]]
# for (each in elts[-1]) m <- frechetconv.pbox(m, each, 'pmax')
# m
# }
#
#pminI.pbox <- function (..., na.rm = FALSE) {
# elts <- makepbox(...)
# m <- elts[[1]]
# for (each in elts[-1]) m <- conv.pbox(m, each, 'pmin')
# m
# }
#
#pmaxI.pbox <- function (..., na.rm = FALSE) {
# elts <- makepbox(...)
# m <- elts[[1]]
# for (each in elts[-1]) m <- conv.pbox(m, each, 'pmax')
# m
# }
def logicaland(self, other, method = 'f'): # conjunction
if method=='i': return(self.mul(other,method)) # independence a * b
# else if method=='p': return(self.min(other,method)) # perfect min(a, b)
# else if method=='o': return(max(self.add(other,method)-1, 0)) # opposite max(a + b – 1, 0)
# else if method=='+': return(self.min(other,method)) # positive env(a * b, min(a, b))
# else if method=='-': return(self.min(other,method)) # negative env(max(a + b – 1, 0), a * b)
# otherwise method=='f' :
return(env(max(0, self.add(other,method) - 1), self.min(other,method)))
def logicalor(self, other, method = 'f'): # disjunction
if method=='i': return(1 - (1-self) * (1-other)) # independent 1 – (1 – a) * (1 – b)
# else if method=='p': return(self.max(other,method)) # perfect max(a, b)
# else if method=='o': return(min(self.add(other,method),1)) # opposite min(1, a + b)
# else if method=='+': return(env(,min(self.add(other,method),1)) # positive env(max(a, b), 1 – (1 – a) * (1 – b))
# else if method=='-': return() # negative env(1 – (1 – a) * (1 – b), min(1, a + b))
# otherwise method=='f' :
return(env(self.max(other,method), min(self.add(other,method),1)))
def env(self, other):
if other.__class__.__name__ == 'Interval':
other = Pbox(other, steps = self.steps)
if other.__class__.__name__ == 'Pbox':
if self.steps != other.steps:
raise ArithmeticError("Both Pboxes must have the same number of steps")
nleft = np.minimum(self.left, other.left)
nright = np.maximum(self.right, other.right)
return Pbox(
left = nleft,
right = nright,
steps = self.steps
)
return NotImplemented
def add(self, other, method = 'f'):
if method not in ['f','p','o','i']:
raise ArithmeticError("Calculation method unkown")
if other.__class__.__name__ == 'Interval':
other = Pbox(other, steps = self.steps)
if other.__class__.__name__ == 'Pbox':
if self.steps != other.steps:
raise ArithmeticError("Both Pboxes must have the same number of steps")
if method == 'f':
nleft = np.empty(self.steps)
nright = np.empty(self.steps)
for i in range(0,self.steps):
j = np.array(range(i, self.steps))
k = np.array(range(self.steps - 1, i-1, -1))
nleft[i] = np.min(self.right[j] + other.right[k])
jj = np.array(range(0, i + 1))
kk = np.array(range(i, -1 , -1))
nright[i] = np.max(self.left[jj] + other.left[kk])
elif method == 'p':
nleft = self.left + other.left
nright = self.right + other.right
elif method == 'o':
nleft = self.left + np.flip(other.left)
nright = self.right + np.flip(other.right)
elif method == 'i':
nleft = []
nright = []
for i in self.left:
for j in other.left:
nleft.append(i+j)
for ii in self.right:
for jj in other.right:
nright.append(ii+jj)
nleft.sort()
nright.sort()
return Pbox(
left = nleft,
right = nright,
steps = self.steps
)
else:
try:
# Try adding constant
if self.shape in ['uniform','normal','cauchy','triangular','skew-normal']:
s = self.shape
else:
s = ''
return Pbox(
left = self.left + other,
right = self.right + other,
shape = s,
mean_left = self.mean_left + other,
mean_right = self.mean_right + other,
var_left = self.var_left,
var_right = self.var_right,
steps = self.steps
)
except:
return NotImplemented
def sub(self, other, method = 'f'):
if method == 'o':
method = 'p'
elif method == 'p':
method = 'o'
return self.add(-other, method)
def mul(self, other, method = 'f'):
if method not in ['f','p','o','i']:
raise ArithmeticError("Calculation method unkown")
if other.__class__.__name__ == 'Interval':
other = Pbox(other, steps = self.steps)
if other.__class__.__name__ == 'Pbox':
if self.steps != other.steps:
raise ArithmeticError("Both Pboxes must have the same number of steps")
if method == 'f':
nleft = np.empty(self.steps)
nright = np.empty(self.steps)
for i in range(0,self.steps):
j = np.array(range(i, self.steps))
k = np.array(range(self.steps - 1, i-1, -1))
nleft[i] = np.min(self.right[j] * other.right[k])
jj = np.array(range(0, i + 1))
kk = np.array(range(i, -1 , -1))
nright[i] = np.max(self.left[jj] * other.left[kk])
elif method == 'p':
nleft = self.left * other.left
nright = self.right * other.right
elif method == 'o':
nleft = self.left * np.flip(other.left)
nright = self.right * np.flip(other.right)
elif method == 'i':
nleft = []
nright = []
for i in self.left:
for j in other.left:
nleft.append(i*j)
for ii in self.right:
for jj in other.right:
nright.append(ii*jj)
nleft.sort()
nright.sort()
return Pbox(
left = nleft,
right = nright,
steps = self.steps
)
else:
try:
# Try adding constant
if self.shape in ['uniform','normal','cauchy','triangular','skew-normal']:
s = self.shape
else:
s = ''
return Pbox(
left = self.left * other,
right = self.right * other,
shape = s,
mean_left = self.mean_left * other,
mean_right = self.mean_right * other,
var_left = self.var_left,
var_right = self.var_right,
steps = self.steps
)
except:
return NotImplemented
def div(self, other, method = 'f'):
if method == 'o':
method = 'p'
elif method == 'p':
method = 'o'
return self.mul(1/other, method)
def recip(self):
return Pbox(
left = 1 / np.flip(self.right),
right = 1 / np.flip(self.left),
steps = self.steps
)
def show(self,now = True,**kwargs):
# If you want to know why numpy is the WORST thing about Python
# see the get_x code
left, right = self.get_x()
y = self.get_y()
plt.plot(left,y,**kwargs)
plt.plot(right,y,**kwargs)
if now:
plt.show()
else:
return plt
def get_interval(self, *args):
if len(args) == 1:
if args[0] == 1:
# asking for whole pbox bounds
return Interval(min(self.left),max(self.right))
p1 = (1-args[0])/2
p2 = 1-p1
elif len(args) == 2:
p1 = args[0]
p2 = args[1]
else:
raise Exception('Too many inputs')
y = np.append(np.insert(np.linspace(0,1,self.steps),0,0),1)
y1 = 0
while y[y1] < p1:
y1 += 1
y2 = len(y)-1
while y[y2] > p2:
y2 -= 1
x1 = self.left[y1]
x2 = self.right[y2]
return Interval(x1,x2)
def get_probability(self, val):
p = np.append(np.insert(np.linspace(0,1,self.steps),0,0),1)
i = 0
while i < self.steps and self.left[i] < val:
i += 1
ub = p[i]
j = 0
while j < self.steps and self.right[j] < val:
j += 1
lb = p[j]
return Interval(lb,ub)
def support(self):
return np.linspace(0,1,self.steps)
def get_x(self):
# returns the x values for plotting
left = np.append(np.insert(self.left,0,min(self.left)),max(self.right))
right = np.append(np.insert(self.right,0,min(self.left)),max(self.right))
return left, right
def get_y(self):
# returns y values for plotting
return np.append(np.insert(np.linspace(0,1,self.steps),0,0),1)
# Public functions
# Functions
def env_int(*args):
left = min([min(i) if is_iterable(i) else i for i in args])
right = max([max(i) if is_iterable(i) else i for i in args])
return Interval(left, right)
def left(imp):
if isinstance(imp, Interval) or isinstance(imp, pbox.Pbox):
return imp.left()
elif is_iterable(imp):
return min(imp)
else:
return imp
def right(imp):
if isinstance(imp, Interval) or isinstance(imp, pbox.Pbox):
return imp.right()
elif is_iterable(imp):
return max(imp)
else:
return imp
def left_list(implist, verbose=False):
if not is_iterable(implist):
return np.array(implist)
return np.array([left(imp) for imp in implist])
def right_list(implist, verbose=False):
if not is_iterable(implist):
return np.array(implist)
return np.array([right(imp) for imp in implist])
def qleftquantiles(pp, x, p): # if first p is not zero, the left tail will be -Inf
return [max(left_list(x)[right_list(p) <= P]) for P in pp]
def qrightquantiles(pp, x, p): # if last p is not one, the right tail will be Inf
return [min(right_list(x)[P <= left_list(p)]) for P in pp]
def quantiles(x, p, steps=200):
left = qleftquantiles(ii(steps=steps), x, p)
right = qrightquantiles(jj(steps=steps), x, p)
return pbox.Pbox(left=left, right=right) # quantiles are in x and the associated cumulative probabilities are in p
def interp_step(u, steps=200):
u = np.sort(u)
seq = np.linspace(start=0, stop=len(u) - 0.00001, num=steps, endpoint=True)
seq = np.array([trunc(seq_val) for seq_val in seq])
return u[seq]
def interp_cubicspline(vals, steps=200):
vals = np.sort(vals) # sort
vals_steps = np.array(range(len(vals))) + 1
vals_steps = vals_steps / len(vals_steps)
steps = np.array(range(steps)) + 1
steps = steps / len(steps)
interped = interp.CubicSpline(vals_steps, vals)
return interped(steps)
def interp_left(u, steps=200):
p = np.array(range(len(u))) / (len(u) - 1)
pp, x = ii(steps=steps), u
return qleftquantiles(pp, x, p)
def interp_right(d, steps=200):
p = np.array(range(len(d))) / (len(d) - 1)
pp, x = jj(steps=steps), d
return qrightquantiles(pp, x, p)
def interp_outer(x, left, steps=200):
if (left) :
return interp_left(x, steps=steps)
else:
return interp_right(x, steps=steps)
def interp_linear(V, steps=200):
m = len(V) - 1
if m == 0: return np.repeat(V, steps)
if steps == 1: return np.array([min(V), max(V)])
d = 1 / m
n = round(d * steps * 200)
if n == 0:
c = V
else:
c = []
for i in range(m):
v = V[i]
w = V[i + 1]
c.extend(np.linspace(start=v, stop=w, num=n))
u = [c[round((len(c) - 1) * (k + 0) / (steps - 1))] for k in range(steps)]
return np.array(u)
def interpolate(u, interpolation='linear', left=True, steps=200):
if interpolation == 'outer':
return interp_outer(u, left, steps=steps)
elif interpolation == 'spline':
return interp_cubicspline(u, steps=steps)
elif interpolation == 'step':
return interp_step(u, steps=steps)
else:
return interp_linear(u, steps=steps)
def sideVariance(w, mu=None):
if not isinstance(w, np.ndarray): w = np.array(w)
if mu is None: mu = np.mean(w)
return max(0, np.mean((w - mu) ** 2))
def dwMean(pbox):
return Interval(np.mean(pbox.right), np.mean(pbox.left))
def dwVariance(pbox):
if np.any(np.isinf(pbox.left)) or np.any(np.isinf(pbox.right)):
return Interval(0, np.inf)
if np.all(pbox.right[0] == pbox.right) and np.all(pbox.left[0] == pbox.left):
return Interval(0, (pbox.right[0] - pbox.left[0]) ** (2 / 4))
vr = sideVariance(pbox.left, np.mean(pbox.left))
w = np.copy(pbox.left)
n = len(pbox.left)
for i in reversed(range(n)):
w[i] = pbox.right[i]
v = sideVariance(w, np.mean(w))
if np.isnan(vr) or np.isnan(v):
vr = np.inf
elif vr < v:
vr = v
if pbox.left[n - 1] <= pbox.right[0]:
vl = 0.0
else:
x = pbox.right
vl = sideVariance(w, np.mean(w))
for i in reversed(range(n)):
w[i] = pbox.left[i]
here = w[i]
if 1 < i:
for j in reversed(range(i-1)):
if w[i] < w[j]:
w[j] = here
v = sideVariance(w, np.mean(w))
if np.isnan(vl) or np.isnan(v):
vl = 0
elif v < vl:
vl = v
return Interval(vl, vr)
def straddles(x):
return (left(x) <= 0) and (0 <= right(x)) # includes zero
def straddlingzero(x):
return (left(x) < 0) and (0 < right(x)) # neglects zero as an endpoint
def env(x,y):
return x.env(y)
def pnt(a):
if type(a) == pba.pbox.Pbox:
return (a.mean_left + a.mean_right) / 2
elif type(a) == list:
return [pnt(b) for b in a]
else:
return (a)
def rng(a):
if type(a) == pba.pbox.Pbox:
# return pba.Interval(a.mean_left, a.mean_right)
# return pba.Interval(a.mean_left-np.sqrt(a.var_right),
# a.mean_right+np.sqrt(a.var_right))
return a.get_interval(0.025, 0.975)
elif type(a) == list:
return [pnt(b) for b in a]
else:
return (a)
def pltem(ax, t, y, simple=True):
if simple:
y = [rng(v) for v in y]
y1 = [v.left() for v in y]
y2 = [v.right() for v in y]
ax.plot(t, y1)
ax.plot(t, y2)
else:
pass
|
[
"numpy.minimum",
"numpy.maximum",
"matplotlib.pyplot.show",
"numpy.copy",
"matplotlib.pyplot.plot",
"numpy.flip",
"numpy.empty",
"numpy.isinf",
"numpy.isnan",
"numpy.any",
"numpy.sort",
"numpy.max",
"numpy.mean",
"numpy.array",
"numpy.min",
"numpy.linspace",
"numpy.all",
"numpy.repeat"
] |
[((18611, 18621), 'numpy.sort', 'np.sort', (['u'], {}), '(u)\n', (18618, 18621), True, 'import numpy as np\n'), ((18830, 18843), 'numpy.sort', 'np.sort', (['vals'], {}), '(vals)\n', (18837, 18843), True, 'import numpy as np\n'), ((20015, 20026), 'numpy.array', 'np.array', (['u'], {}), '(u)\n', (20023, 20026), True, 'import numpy as np\n'), ((20978, 20996), 'numpy.copy', 'np.copy', (['pbox.left'], {}), '(pbox.left)\n', (20985, 20996), True, 'import numpy as np\n'), ((9387, 9420), 'numpy.minimum', 'np.minimum', (['self.left', 'other.left'], {}), '(self.left, other.left)\n', (9397, 9420), True, 'import numpy as np\n'), ((9438, 9473), 'numpy.maximum', 'np.maximum', (['self.right', 'other.right'], {}), '(self.right, other.right)\n', (9448, 9473), True, 'import numpy as np\n'), ((15480, 15507), 'matplotlib.pyplot.plot', 'plt.plot', (['left', 'y'], {}), '(left, y, **kwargs)\n', (15488, 15507), True, 'from matplotlib import pyplot as plt\n'), ((15514, 15542), 'matplotlib.pyplot.plot', 'plt.plot', (['right', 'y'], {}), '(right, y, **kwargs)\n', (15522, 15542), True, 'from matplotlib import pyplot as plt\n'), ((16700, 16729), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'self.steps'], {}), '(0, 1, self.steps)\n', (16711, 16729), True, 'import numpy as np\n'), ((17792, 17809), 'numpy.array', 'np.array', (['implist'], {}), '(implist)\n', (17800, 17809), True, 'import numpy as np\n'), ((17952, 17969), 'numpy.array', 'np.array', (['implist'], {}), '(implist)\n', (17960, 17969), True, 'import numpy as np\n'), ((19618, 19637), 'numpy.repeat', 'np.repeat', (['V', 'steps'], {}), '(V, steps)\n', (19627, 19637), True, 'import numpy as np\n'), ((20468, 20479), 'numpy.array', 'np.array', (['w'], {}), '(w)\n', (20476, 20479), True, 'import numpy as np\n'), ((20504, 20514), 'numpy.mean', 'np.mean', (['w'], {}), '(w)\n', (20511, 20514), True, 'import numpy as np\n'), ((20533, 20555), 'numpy.mean', 'np.mean', (['((w - mu) ** 2)'], {}), '((w - mu) ** 2)\n', (20540, 20555), True, 'import numpy as np\n'), ((20596, 20615), 'numpy.mean', 'np.mean', (['pbox.right'], {}), '(pbox.right)\n', (20603, 20615), True, 'import numpy as np\n'), ((20617, 20635), 'numpy.mean', 'np.mean', (['pbox.left'], {}), '(pbox.left)\n', (20624, 20635), True, 'import numpy as np\n'), ((20771, 20806), 'numpy.all', 'np.all', (['(pbox.right[0] == pbox.right)'], {}), '(pbox.right[0] == pbox.right)\n', (20777, 20806), True, 'import numpy as np\n'), ((20811, 20844), 'numpy.all', 'np.all', (['(pbox.left[0] == pbox.left)'], {}), '(pbox.left[0] == pbox.left)\n', (20817, 20844), True, 'import numpy as np\n'), ((20950, 20968), 'numpy.mean', 'np.mean', (['pbox.left'], {}), '(pbox.left)\n', (20957, 20968), True, 'import numpy as np\n'), ((1253, 1288), 'numpy.max', 'np.max', (['[mean_left, self.mean_left]'], {}), '([mean_left, self.mean_left])\n', (1259, 1288), True, 'import numpy as np\n'), ((1342, 1379), 'numpy.min', 'np.min', (['[mean_right, self.mean_right]'], {}), '([mean_right, self.mean_right])\n', (1348, 1379), True, 'import numpy as np\n'), ((1429, 1462), 'numpy.max', 'np.max', (['[var_left, self.var_left]'], {}), '([var_left, self.var_left])\n', (1435, 1462), True, 'import numpy as np\n'), ((1514, 1549), 'numpy.min', 'np.min', (['[var_right, self.var_right]'], {}), '([var_right, self.var_right])\n', (1520, 1549), True, 'import numpy as np\n'), ((15569, 15579), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15577, 15579), True, 'from matplotlib import pyplot as plt\n'), ((20674, 20693), 'numpy.isinf', 'np.isinf', (['pbox.left'], {}), '(pbox.left)\n', (20682, 20693), True, 'import numpy as np\n'), ((20705, 20725), 'numpy.isinf', 'np.isinf', (['pbox.right'], {}), '(pbox.right)\n', (20713, 20725), True, 'import numpy as np\n'), ((21111, 21121), 'numpy.mean', 'np.mean', (['w'], {}), '(w)\n', (21118, 21121), True, 'import numpy as np\n'), ((21135, 21147), 'numpy.isnan', 'np.isnan', (['vr'], {}), '(vr)\n', (21143, 21147), True, 'import numpy as np\n'), ((21151, 21162), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (21159, 21162), True, 'import numpy as np\n'), ((21350, 21360), 'numpy.mean', 'np.mean', (['w'], {}), '(w)\n', (21357, 21360), True, 'import numpy as np\n'), ((2405, 2438), 'numpy.array', 'np.array', (['[self.left, self.right]'], {}), '([self.left, self.right])\n', (2413, 2438), True, 'import numpy as np\n'), ((4740, 4758), 'numpy.mean', 'np.mean', (['self.left'], {}), '(self.left)\n', (4747, 4758), True, 'import numpy as np\n'), ((4812, 4831), 'numpy.mean', 'np.mean', (['self.right'], {}), '(self.right)\n', (4819, 4831), True, 'import numpy as np\n'), ((4851, 4879), 'numpy.any', 'np.any', (['(self.left <= -np.inf)'], {}), '(self.left <= -np.inf)\n', (4857, 4879), True, 'import numpy as np\n'), ((4883, 4911), 'numpy.any', 'np.any', (['(np.inf <= self.right)'], {}), '(np.inf <= self.right)\n', (4889, 4911), True, 'import numpy as np\n'), ((10141, 10161), 'numpy.empty', 'np.empty', (['self.steps'], {}), '(self.steps)\n', (10149, 10161), True, 'import numpy as np\n'), ((10187, 10207), 'numpy.empty', 'np.empty', (['self.steps'], {}), '(self.steps)\n', (10195, 10207), True, 'import numpy as np\n'), ((12861, 12881), 'numpy.empty', 'np.empty', (['self.steps'], {}), '(self.steps)\n', (12869, 12881), True, 'import numpy as np\n'), ((12907, 12927), 'numpy.empty', 'np.empty', (['self.steps'], {}), '(self.steps)\n', (12915, 12927), True, 'import numpy as np\n'), ((16053, 16082), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'self.steps'], {}), '(0, 1, self.steps)\n', (16064, 16082), True, 'import numpy as np\n'), ((16377, 16406), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'self.steps'], {}), '(0, 1, self.steps)\n', (16388, 16406), True, 'import numpy as np\n'), ((17080, 17109), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'self.steps'], {}), '(0, 1, self.steps)\n', (17091, 17109), True, 'import numpy as np\n'), ((19886, 19921), 'numpy.linspace', 'np.linspace', ([], {'start': 'v', 'stop': 'w', 'num': 'n'}), '(start=v, stop=w, num=n)\n', (19897, 19921), True, 'import numpy as np\n'), ((21631, 21641), 'numpy.mean', 'np.mean', (['w'], {}), '(w)\n', (21638, 21641), True, 'import numpy as np\n'), ((21659, 21671), 'numpy.isnan', 'np.isnan', (['vl'], {}), '(vl)\n', (21667, 21671), True, 'import numpy as np\n'), ((21675, 21686), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (21683, 21686), True, 'import numpy as np\n'), ((2680, 2699), 'numpy.flip', 'np.flip', (['self.right'], {}), '(self.right)\n', (2687, 2699), True, 'import numpy as np\n'), ((2722, 2740), 'numpy.flip', 'np.flip', (['self.left'], {}), '(self.left)\n', (2729, 2740), True, 'import numpy as np\n'), ((10407, 10445), 'numpy.min', 'np.min', (['(self.right[j] + other.right[k])'], {}), '(self.right[j] + other.right[k])\n', (10413, 10445), True, 'import numpy as np\n'), ((10584, 10622), 'numpy.max', 'np.max', (['(self.left[jj] + other.left[kk])'], {}), '(self.left[jj] + other.left[kk])\n', (10590, 10622), True, 'import numpy as np\n'), ((13127, 13165), 'numpy.min', 'np.min', (['(self.right[j] * other.right[k])'], {}), '(self.right[j] * other.right[k])\n', (13133, 13165), True, 'import numpy as np\n'), ((13304, 13342), 'numpy.max', 'np.max', (['(self.left[jj] * other.left[kk])'], {}), '(self.left[jj] * other.left[kk])\n', (13310, 13342), True, 'import numpy as np\n'), ((15160, 15179), 'numpy.flip', 'np.flip', (['self.right'], {}), '(self.right)\n', (15167, 15179), True, 'import numpy as np\n'), ((15205, 15223), 'numpy.flip', 'np.flip', (['self.left'], {}), '(self.left)\n', (15212, 15223), True, 'import numpy as np\n'), ((2028, 2059), 'numpy.min', 'np.min', (['[self.left, self.right]'], {}), '([self.left, self.right])\n', (2034, 2059), True, 'import numpy as np\n'), ((2071, 2102), 'numpy.max', 'np.max', (['[self.left, self.right]'], {}), '([self.left, self.right])\n', (2077, 2102), True, 'import numpy as np\n'), ((10826, 10845), 'numpy.flip', 'np.flip', (['other.left'], {}), '(other.left)\n', (10833, 10845), True, 'import numpy as np\n'), ((10884, 10904), 'numpy.flip', 'np.flip', (['other.right'], {}), '(other.right)\n', (10891, 10904), True, 'import numpy as np\n'), ((13546, 13565), 'numpy.flip', 'np.flip', (['other.left'], {}), '(other.left)\n', (13553, 13565), True, 'import numpy as np\n'), ((13604, 13624), 'numpy.flip', 'np.flip', (['other.right'], {}), '(other.right)\n', (13611, 13624), True, 'import numpy as np\n')]
|
""" test automol.graph
"""
import numpy
import automol
from automol import graph
C8H13O_CGR = (
{0: ('C', 3, None), 1: ('C', 2, None), 2: ('C', 3, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None),
6: ('C', 1, None), 7: ('C', 1, None), 8: ('O', 0, None)},
{frozenset({1, 4}): (1, None), frozenset({4, 6}): (1, None),
frozenset({0, 3}): (1, None), frozenset({2, 6}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None),
frozenset({3, 5}): (1, None), frozenset({5, 7}): (1, None)})
C8H13O_RGR = (
{0: ('C', 3, None), 1: ('C', 2, None), 2: ('C', 3, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None),
6: ('C', 1, None), 7: ('C', 1, None), 8: ('O', 0, None)},
{frozenset({1, 4}): (2, None), frozenset({4, 6}): (1, None),
frozenset({0, 3}): (1, None), frozenset({2, 6}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None),
frozenset({3, 5}): (2, None), frozenset({5, 7}): (1, None)})
C8H13O_SGR = (
{0: ('C', 3, None), 1: ('C', 2, None), 2: ('C', 3, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None),
6: ('C', 1, False), 7: ('C', 1, False), 8: ('O', 0, None)},
{frozenset({1, 4}): (1, None), frozenset({4, 6}): (1, None),
frozenset({0, 3}): (1, None), frozenset({2, 6}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None),
frozenset({3, 5}): (1, False), frozenset({5, 7}): (1, None)})
C3H3_CGR = (
{0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({0, 1}): (1, None), frozenset({1, 2}): (1, None),
frozenset({2, 0}): (1, None)})
C3H3_RGRS = (
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({0, 1}): (1, None), frozenset({1, 2}): (1, None),
frozenset({2, 0}): (1, None)}),
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({0, 1}): (1, None), frozenset({1, 2}): (2, None),
frozenset({2, 0}): (1, None)}),
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({0, 1}): (1, None), frozenset({1, 2}): (1, None),
frozenset({2, 0}): (2, None)}),
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({0, 1}): (2, None), frozenset({1, 2}): (1, None),
frozenset({2, 0}): (1, None)}),
)
C2_CGR = ({0: ('C', 0, None), 1: ('C', 0, None)},
{frozenset({0, 1}): (1, None)})
C2_RGRS = (
({0: ('C', 0, None), 1: ('C', 0, None)},
{frozenset({0, 1}): (1, None)}),
({0: ('C', 0, None), 1: ('C', 0, None)},
{frozenset({0, 1}): (2, None)}),
({0: ('C', 0, None), 1: ('C', 0, None)},
{frozenset({0, 1}): (3, None)}),
)
CH2FH2H_CGR_IMP = (
{0: ('F', 0, None), 1: ('C', 2, None), 2: ('H', 1, None),
3: ('H', 0, None)},
{frozenset({0, 1}): (1, None)})
CH2FH2H_CGR_EXP = (
{0: ('F', 0, None), 1: ('C', 0, None), 2: ('H', 0, None),
3: ('H', 0, None), 4: ('H', 0, None), 5: ('H', 0, None),
6: ('H', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({1, 4}): (1, None),
frozenset({1, 5}): (1, None), frozenset({2, 6}): (1, None)})
C2H2CL2F2_CGR = (
{0: ('C', 1, None), 1: ('C', 1, None), 2: ('F', 0, None),
3: ('Cl', 0, None), 4: ('F', 0, None), 5: ('Cl', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 3}): (1, None), frozenset({1, 4}): (1, None),
frozenset({1, 5}): (1, None)})
C2H2CL2F2_SGRS = (
({0: ('C', 1, False), 1: ('C', 1, False), 2: ('F', 0, None),
3: ('Cl', 0, None), 4: ('F', 0, None), 5: ('Cl', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 3}): (1, None), frozenset({1, 4}): (1, None),
frozenset({1, 5}): (1, None)}),
({0: ('C', 1, False), 1: ('C', 1, True), 2: ('F', 0, None),
3: ('Cl', 0, None), 4: ('F', 0, None), 5: ('Cl', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 3}): (1, None), frozenset({1, 4}): (1, None),
frozenset({1, 5}): (1, None)}),
({0: ('C', 1, True), 1: ('C', 1, False), 2: ('F', 0, None),
3: ('Cl', 0, None), 4: ('F', 0, None), 5: ('Cl', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 3}): (1, None), frozenset({1, 4}): (1, None),
frozenset({1, 5}): (1, None)}),
({0: ('C', 1, True), 1: ('C', 1, True), 2: ('F', 0, None),
3: ('Cl', 0, None), 4: ('F', 0, None), 5: ('Cl', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 3}): (1, None), frozenset({1, 4}): (1, None),
frozenset({1, 5}): (1, None)})
)
C3H3CL2F3_CGR = (
{0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None),
3: ('Cl', 0, None), 4: ('Cl', 0, None), 5: ('F', 0, None),
6: ('F', 0, None), 7: ('F', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 5}): (1, None), frozenset({2, 4}): (1, None),
frozenset({1, 3}): (1, None), frozenset({1, 6}): (1, None),
frozenset({2, 7}): (1, None)})
C3H3CL2F3_SGRS = (
({0: ('C', 1, None), 1: ('C', 1, False), 2: ('C', 1, False),
3: ('Cl', 0, None), 4: ('Cl', 0, None), 5: ('F', 0, None),
6: ('F', 0, None), 7: ('F', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 5}): (1, None), frozenset({2, 4}): (1, None),
frozenset({1, 3}): (1, None), frozenset({1, 6}): (1, None),
frozenset({2, 7}): (1, None)}),
({0: ('C', 1, None), 1: ('C', 1, True), 2: ('C', 1, True),
3: ('Cl', 0, None), 4: ('Cl', 0, None), 5: ('F', 0, None),
6: ('F', 0, None), 7: ('F', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 5}): (1, None), frozenset({2, 4}): (1, None),
frozenset({1, 3}): (1, None), frozenset({1, 6}): (1, None),
frozenset({2, 7}): (1, None)}),
({0: ('C', 1, False), 1: ('C', 1, False), 2: ('C', 1, True),
3: ('Cl', 0, None), 4: ('Cl', 0, None), 5: ('F', 0, None),
6: ('F', 0, None), 7: ('F', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 5}): (1, None), frozenset({2, 4}): (1, None),
frozenset({1, 3}): (1, None), frozenset({1, 6}): (1, None),
frozenset({2, 7}): (1, None)}),
({0: ('C', 1, False), 1: ('C', 1, True), 2: ('C', 1, False),
3: ('Cl', 0, None), 4: ('Cl', 0, None), 5: ('F', 0, None),
6: ('F', 0, None), 7: ('F', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 5}): (1, None), frozenset({2, 4}): (1, None),
frozenset({1, 3}): (1, None), frozenset({1, 6}): (1, None),
frozenset({2, 7}): (1, None)}),
({0: ('C', 1, True), 1: ('C', 1, False), 2: ('C', 1, True),
3: ('Cl', 0, None), 4: ('Cl', 0, None), 5: ('F', 0, None),
6: ('F', 0, None), 7: ('F', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 5}): (1, None), frozenset({2, 4}): (1, None),
frozenset({1, 3}): (1, None), frozenset({1, 6}): (1, None),
frozenset({2, 7}): (1, None)}),
({0: ('C', 1, True), 1: ('C', 1, True), 2: ('C', 1, False),
3: ('Cl', 0, None), 4: ('Cl', 0, None), 5: ('F', 0, None),
6: ('F', 0, None), 7: ('F', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 5}): (1, None), frozenset({2, 4}): (1, None),
frozenset({1, 3}): (1, None), frozenset({1, 6}): (1, None),
frozenset({2, 7}): (1, None)}),
)
C3H5N3_CGR = (
{0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 0, None),
3: ('N', 1, None), 4: ('N', 1, None), 5: ('N', 1, None)},
{frozenset({1, 4}): (1, None), frozenset({1, 2}): (1, None),
frozenset({0, 3}): (1, None), frozenset({0, 2}): (1, None),
frozenset({2, 5}): (1, None)})
C3H5N3_SGRS = (
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 0, None),
3: ('N', 1, None), 4: ('N', 1, None), 5: ('N', 1, None)},
{frozenset({1, 4}): (1, False), frozenset({1, 2}): (1, None),
frozenset({0, 3}): (1, False), frozenset({0, 2}): (1, None),
frozenset({2, 5}): (1, None)}),
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 0, None),
3: ('N', 1, None), 4: ('N', 1, None), 5: ('N', 1, None)},
{frozenset({1, 4}): (1, True), frozenset({1, 2}): (1, None),
frozenset({0, 3}): (1, False), frozenset({0, 2}): (1, None),
frozenset({2, 5}): (1, False)}),
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 0, None),
3: ('N', 1, None), 4: ('N', 1, None), 5: ('N', 1, None)},
{frozenset({1, 4}): (1, True), frozenset({1, 2}): (1, None),
frozenset({0, 3}): (1, False), frozenset({0, 2}): (1, None),
frozenset({2, 5}): (1, True)}),
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 0, None),
3: ('N', 1, None), 4: ('N', 1, None), 5: ('N', 1, None)},
{frozenset({1, 4}): (1, False), frozenset({1, 2}): (1, None),
frozenset({0, 3}): (1, True), frozenset({0, 2}): (1, None),
frozenset({2, 5}): (1, False)}),
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 0, None),
3: ('N', 1, None), 4: ('N', 1, None), 5: ('N', 1, None)},
{frozenset({1, 4}): (1, False), frozenset({1, 2}): (1, None),
frozenset({0, 3}): (1, True), frozenset({0, 2}): (1, None),
frozenset({2, 5}): (1, True)}),
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 0, None),
3: ('N', 1, None), 4: ('N', 1, None), 5: ('N', 1, None)},
{frozenset({1, 4}): (1, True), frozenset({1, 2}): (1, None),
frozenset({0, 3}): (1, True), frozenset({0, 2}): (1, None),
frozenset({2, 5}): (1, None)}),
)
C8H13O_SGRS = (
({0: ('C', 3, None), 1: ('C', 2, None), 2: ('C', 3, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None),
6: ('C', 1, False), 7: ('C', 1, False), 8: ('O', 0, None)},
{frozenset({1, 4}): (1, None), frozenset({4, 6}): (1, None),
frozenset({0, 3}): (1, None), frozenset({2, 6}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None),
frozenset({3, 5}): (1, False), frozenset({5, 7}): (1, None)}),
({0: ('C', 3, None), 1: ('C', 2, None), 2: ('C', 3, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None),
6: ('C', 1, False), 7: ('C', 1, False), 8: ('O', 0, None)},
{frozenset({1, 4}): (1, None), frozenset({4, 6}): (1, None),
frozenset({0, 3}): (1, None), frozenset({2, 6}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None),
frozenset({3, 5}): (1, True), frozenset({5, 7}): (1, None)}),
({0: ('C', 3, None), 1: ('C', 2, None), 2: ('C', 3, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None),
6: ('C', 1, False), 7: ('C', 1, True), 8: ('O', 0, None)},
{frozenset({1, 4}): (1, None), frozenset({4, 6}): (1, None),
frozenset({0, 3}): (1, None), frozenset({2, 6}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None),
frozenset({3, 5}): (1, False), frozenset({5, 7}): (1, None)}),
({0: ('C', 3, None), 1: ('C', 2, None), 2: ('C', 3, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None),
6: ('C', 1, False), 7: ('C', 1, True), 8: ('O', 0, None)},
{frozenset({1, 4}): (1, None), frozenset({4, 6}): (1, None),
frozenset({0, 3}): (1, None), frozenset({2, 6}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None),
frozenset({3, 5}): (1, True), frozenset({5, 7}): (1, None)}),
({0: ('C', 3, None), 1: ('C', 2, None), 2: ('C', 3, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None),
6: ('C', 1, True), 7: ('C', 1, False), 8: ('O', 0, None)},
{frozenset({1, 4}): (1, None), frozenset({4, 6}): (1, None),
frozenset({0, 3}): (1, None), frozenset({2, 6}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None),
frozenset({3, 5}): (1, False), frozenset({5, 7}): (1, None)}),
({0: ('C', 3, None), 1: ('C', 2, None), 2: ('C', 3, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None),
6: ('C', 1, True), 7: ('C', 1, False), 8: ('O', 0, None)},
{frozenset({1, 4}): (1, None), frozenset({4, 6}): (1, None),
frozenset({0, 3}): (1, None), frozenset({2, 6}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None),
frozenset({3, 5}): (1, True), frozenset({5, 7}): (1, None)}),
({0: ('C', 3, None), 1: ('C', 2, None), 2: ('C', 3, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None),
6: ('C', 1, True), 7: ('C', 1, True), 8: ('O', 0, None)},
{frozenset({1, 4}): (1, None), frozenset({4, 6}): (1, None),
frozenset({0, 3}): (1, None), frozenset({2, 6}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None),
frozenset({3, 5}): (1, False), frozenset({5, 7}): (1, None)}),
({0: ('C', 3, None), 1: ('C', 2, None), 2: ('C', 3, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None),
6: ('C', 1, True), 7: ('C', 1, True), 8: ('O', 0, None)},
{frozenset({1, 4}): (1, None), frozenset({4, 6}): (1, None),
frozenset({0, 3}): (1, None), frozenset({2, 6}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None),
frozenset({3, 5}): (1, True), frozenset({5, 7}): (1, None)}),
)
def test__from_data():
""" test getters
"""
cgr = automol.graph.from_data(
atm_sym_dct=graph.atom_symbols(C8H13O_CGR),
bnd_keys=graph.bond_keys(C8H13O_CGR),
atm_imp_hyd_vlc_dct=(
graph.atom_implicit_hydrogen_valences(C8H13O_CGR)),
)
assert cgr == C8H13O_CGR
rgr = automol.graph.from_data(
atm_sym_dct=graph.atom_symbols(C8H13O_RGR),
bnd_keys=graph.bond_keys(C8H13O_RGR),
atm_imp_hyd_vlc_dct=(
graph.atom_implicit_hydrogen_valences(C8H13O_RGR)),
bnd_ord_dct=graph.bond_orders(C8H13O_RGR),
)
assert rgr == C8H13O_RGR
sgr = automol.graph.from_data(
atm_sym_dct=graph.atom_symbols(C8H13O_SGR),
bnd_keys=graph.bond_keys(C8H13O_SGR),
atm_imp_hyd_vlc_dct=(
graph.atom_implicit_hydrogen_valences(C8H13O_SGR)),
atm_ste_par_dct=graph.atom_stereo_parities(C8H13O_SGR),
bnd_ste_par_dct=graph.bond_stereo_parities(C8H13O_SGR)
)
assert sgr == C8H13O_SGR
def test__set_atom_implicit_hydrogen_valences():
""" test graph.set_atom_implicit_hydrogen_valences
"""
atm_keys = graph.atom_keys(C8H13O_CGR)
cgr = graph.set_atom_implicit_hydrogen_valences(
C8H13O_CGR, {atm_key: 0 for atm_key in atm_keys})
assert cgr == automol.graph.from_data(
graph.atom_symbols(C8H13O_CGR), graph.bond_keys(C8H13O_CGR))
def test__string():
""" test graph.string and graph.from_string
"""
for sgr in C8H13O_SGRS:
assert sgr == automol.graph.from_string(automol.graph.string(sgr))
def test__without_bond_orders():
""" test graph.without_bond_orders
"""
assert C8H13O_CGR == graph.without_bond_orders(C8H13O_RGR)
def test__without_stereo_parities():
""" test graph.without_stereo_parities
"""
assert C8H13O_CGR == graph.without_stereo_parities(C8H13O_SGR)
def test__electron_count():
""" test graph.electron_count
"""
assert graph.electron_count(C8H13O_CGR) == 69
def test__atom_count():
""" test graph.electron_count
"""
assert graph.atom_count(C8H13O_CGR) == 22
assert graph.atom_count(C8H13O_CGR, with_implicit=False) == 9
def test__heavy_atom_count():
""" test graph.explicit_hydrogen_count
"""
cgr = graph.explicit(C8H13O_CGR)
assert graph.heavy_atom_count(cgr) == 9
def test__atoms_neighbor_atom_keys():
""" test graph.atoms_neighbor_atom_keys
"""
assert graph.atoms_neighbor_atom_keys(C8H13O_CGR) == {
0: frozenset({3}),
1: frozenset({4}),
2: frozenset({6}),
3: frozenset({0, 5}),
4: frozenset({1, 6}),
5: frozenset({3, 7}),
6: frozenset({2, 4, 7}),
7: frozenset({8, 5, 6}),
8: frozenset({7})
}
def test__atoms_second_degree_neighbor_atom_keys():
""" test graph.atoms_neighbor_atom_keys
"""
assert graph.atoms_second_degree_neighbor_atom_keys(C8H13O_CGR) == {
0: frozenset({5}),
1: frozenset({6}),
2: frozenset({4, 7}),
3: frozenset({7}),
4: frozenset({2, 7}),
5: frozenset({0, 8, 6}),
6: frozenset({8, 1, 5}),
7: frozenset({2, 3, 4}),
8: frozenset({5, 6}),
}
def test__atoms_bond_keys():
""" test graph.atoms_neighbor_atom_keys
"""
assert graph.atoms_bond_keys(C8H13O_CGR) == {
0: frozenset({frozenset({0, 3})}),
1: frozenset({frozenset({1, 4})}),
2: frozenset({frozenset({2, 6})}),
3: frozenset({frozenset({3, 5}), frozenset({0, 3})}),
4: frozenset({frozenset({1, 4}), frozenset({4, 6})}),
5: frozenset({frozenset({3, 5}), frozenset({5, 7})}),
6: frozenset({frozenset({6, 7}), frozenset({4, 6}),
frozenset({2, 6})}),
7: frozenset({frozenset({6, 7}), frozenset({5, 7}),
frozenset({8, 7})}),
8: frozenset({frozenset({8, 7})})
}
# # bond properties
def test__bonds_neighbor_atom_keys():
""" test graph.bonds_neighbor_atom_keys
"""
assert graph.bonds_neighbor_atom_keys(C8H13O_CGR) == {
frozenset({1, 4}): frozenset({6}),
frozenset({4, 6}): frozenset({1, 2, 7}),
frozenset({2, 6}): frozenset({4, 7}),
frozenset({0, 3}): frozenset({5}),
frozenset({6, 7}): frozenset({8, 2, 4, 5}),
frozenset({8, 7}): frozenset({5, 6}),
frozenset({3, 5}): frozenset({0, 7}),
frozenset({5, 7}): frozenset({8, 3, 6})
}
# # other properties
def test__branch():
""" test graph.branch
"""
assert graph.branch(C8H13O_CGR, 6, frozenset({6, 4})) == (
{1: ('C', 2, None), 4: ('C', 1, None), 6: ('C', 1, None)},
{frozenset({1, 4}): (1, None), frozenset({4, 6}): (1, None)}
)
def test__connected_components():
""" test graph.connected_components
"""
gra1 = C3H3_CGR
gra2 = C2_CGR
gra1_natms = automol.formula.atom_count(graph.formula(C3H3_CGR))
gra2 = graph.transform_keys(gra2, lambda x: x + gra1_natms)
gra = graph.union(gra1, gra2)
cmp_gras = graph.connected_components(gra)
assert cmp_gras in [(gra1, gra2), (gra2, gra1)]
def test__subgraph():
""" test graph.subgraph
"""
assert graph.subgraph(C3H3_CGR, (1, 2)) == (
{1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({1, 2}): (1, None)})
def test__bond_induced_subgraph():
""" test graph.bond_induced_subgraph
"""
assert graph.bond_induced_subgraph(
C3H3_CGR, [frozenset({0, 1}), frozenset({1, 2})]) == (
{0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({0, 1}): (1, None), frozenset({1, 2}): (1, None)})
# # transformations
def test__relabel():
""" test graph.relabel
"""
assert graph.relabel(C3H3_CGR, {0: 10, 1: 11, 2: 12}) == (
{10: ('C', 1, None), 11: ('C', 1, None), 12: ('C', 1, None)},
{frozenset({10, 11}): (1, None), frozenset({11, 12}): (1, None),
frozenset({12, 10}): (1, None)})
def test__remove_atoms():
""" test graph.remove_atoms
"""
assert graph.remove_atoms(C3H3_CGR, (0,)) == (
{1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({1, 2}): (1, None)})
def test__remove_bonds():
""" test graph.remove_bonds
"""
assert graph.remove_bonds(C3H3_CGR, [frozenset({1, 2})]) == (
{0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({0, 1}): (1, None), frozenset({2, 0}): (1, None)})
# implicit/explicit hydrogen functions
# # atom properties
def test__atom_explicit_hydrogen_valences():
""" test graph.atom_explicit_hydrogen_valences
"""
assert graph.atom_explicit_hydrogen_valences(CH2FH2H_CGR_EXP) == {
0: 0, 1: 2, 2: 1, 3: 0, 4: 0, 5: 0, 6: 0
}
def test__atom_explicit_hydrogen_keys():
""" test graph.atom_explicit_hydrogen_keys
"""
assert graph.atom_explicit_hydrogen_keys(CH2FH2H_CGR_EXP) == {
0: frozenset(),
1: frozenset({4, 5}),
2: frozenset({6}),
3: frozenset(),
4: frozenset(),
5: frozenset(),
6: frozenset()
}
# # other properties
def test__backbone_keys():
""" test graph.backbone_keys
"""
assert graph.backbone_keys(CH2FH2H_CGR_EXP) == frozenset({0, 1, 2, 3})
def test__explicit_hydrogen_keys():
""" test graph.explicit_hydrogen_keys
"""
assert graph.explicit_hydrogen_keys(CH2FH2H_CGR_EXP) == frozenset(
{4, 5, 6})
def test__explicit():
""" test graph.explicit
"""
assert CH2FH2H_CGR_EXP == graph.explicit(CH2FH2H_CGR_IMP)
def test__implicit():
""" test graph.implicit
"""
assert CH2FH2H_CGR_IMP == graph.implicit(graph.explicit(CH2FH2H_CGR_IMP))
# # comparisons
def test__backbone_isomorphic():
""" test graph.backbone_isomorphic
"""
assert graph.backbone_isomorphic(CH2FH2H_CGR_IMP, CH2FH2H_CGR_EXP)
cgr = C8H13O_CGR
natms = len(graph.atoms(cgr))
for _ in range(10):
pmt_dct = dict(enumerate(numpy.random.permutation(natms)))
cgr_pmt = graph.relabel(cgr, pmt_dct)
assert graph.backbone_isomorphic(cgr, cgr_pmt)
def test__backbone_isomorphism():
""" test graph.backbone_isomorphism
"""
cgr = C8H13O_CGR
natms = len(graph.atoms(cgr))
for _ in range(10):
pmt_dct = dict(enumerate(numpy.random.permutation(natms)))
cgr_pmt = graph.relabel(cgr, pmt_dct)
assert graph.backbone_isomorphism(cgr, cgr_pmt) == pmt_dct
def test__backbone_unique():
""" test graph.backbone_unique
"""
assert graph.backbone_unique(C3H3_RGRS) == C3H3_RGRS[:2]
# chemistry library
def test__atom_element_valences():
""" test graph.atom_element_valences
"""
assert graph.atom_element_valences(C8H13O_CGR) == {
0: 4, 1: 4, 2: 4, 3: 4, 4: 4, 5: 4, 6: 4, 7: 4, 8: 2}
def test__atom_lone_pair_counts():
""" test graph.atom_lone_pair_counts
"""
assert graph.atom_lone_pair_counts(C8H13O_CGR) == {
0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 2}
def test__atom_bond_valences():
""" test graph.atom_bond_valences
"""
assert graph.atom_bond_valences(C8H13O_CGR) == {
0: 4, 1: 3, 2: 4, 3: 3, 4: 3, 5: 3, 6: 4, 7: 4, 8: 1}
def test__atom_unsaturated_valences():
""" test graph.atom_unsaturated_valences
"""
assert graph.atom_unsaturated_valences(C8H13O_CGR) == {
0: 0, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1, 6: 0, 7: 0, 8: 1}
def test__unsaturated_atom_keys():
""" test graph.unsaturated_atom_keys
"""
assert graph.unsaturated_atom_keys(C8H13O_CGR) == frozenset(
{1, 3, 4, 5, 8})
def test__maximum_spin_multiplicity():
""" test graph.maximum_spin_multiplicity
"""
assert graph.maximum_spin_multiplicity(C2_CGR) == 7
def test__possible_spin_multiplicities():
""" test graph.possible_spin_multiplicities
"""
assert graph.possible_spin_multiplicities(C2_CGR) == (1, 3, 5, 7)
# miscellaneous
def test__bond_symmetry_numbers():
""" test graph.bond_symmetry_numbers
"""
assert graph.bond_symmetry_numbers(C8H13O_CGR) == {
frozenset({1, 4}): 1, frozenset({4, 6}): 1, frozenset({2, 6}): 3,
frozenset({0, 3}): 3, frozenset({6, 7}): 1, frozenset({8, 7}): 1,
frozenset({3, 5}): 1, frozenset({5, 7}): 1}
# resonance graph library
# # atom properties
def test__resonance_dominant_atom_hybridizations():
""" test graph.resonance_dominant_atom_hybridizations
"""
assert graph.resonance_dominant_atom_hybridizations(C3H3_CGR) == {
0: 2, 1: 2, 2: 2}
assert graph.resonance_dominant_atom_hybridizations(C8H13O_CGR) == {
0: 3, 1: 2, 2: 3, 3: 2, 4: 2, 5: 2, 6: 3, 7: 3, 8: 3}
cgr = ({0: ('C', 0, None), 1: ('C', 0, None), 2: ('O', 0, None),
3: ('H', 0, None), 4: ('H', 0, None), 5: ('H', 0, None),
6: ('X', 0, None)},
{frozenset({1, 4}): (1, None), frozenset({1, 2}): (1, None),
frozenset({0, 3}): (1, None), frozenset({0, 1}): (1, None),
frozenset({2, 5}): (1, None)})
print(graph.resonance_dominant_atom_hybridizations(cgr))
def test__resonance_dominant_atom_centered_cumulene_keys():
""" test graph.resonance_dominant_atom_centered_cumulene_keys
"""
cgr = ({0: ('C', 1, None), 1: ('C', 2, None), 2: ('C', 0, None),
3: ('C', 0, None), 4: ('C', 1, None), 5: ('C', 0, None),
6: ('C', 0, None)},
{frozenset({4, 6}): (1, None), frozenset({0, 2}): (1, None),
frozenset({2, 4}): (1, None), frozenset({5, 6}): (1, None),
frozenset({3, 5}): (1, None), frozenset({1, 3}): (1, None)})
assert (graph.resonance_dominant_atom_centered_cumulene_keys(cgr) ==
frozenset({(frozenset({1, 4}), 5)}))
def test__resonance_dominant_bond_centered_cumulene_keys():
""" test graph.resonance_dominant_bond_centered_cumulene_keys
"""
cgr = ({0: ('C', 1, None), 1: ('C', 2, None), 2: ('C', 0, None),
3: ('C', 0, None), 4: ('C', 1, None), 5: ('C', 0, None)},
{frozenset({4, 5}): (1, None), frozenset({0, 2}): (1, None),
frozenset({2, 4}): (1, None), frozenset({3, 5}): (1, None),
frozenset({1, 3}): (1, None)})
assert (graph.resonance_dominant_bond_centered_cumulene_keys(cgr) ==
frozenset({(frozenset({1, 4}), frozenset({3, 5}))}))
def test__resonance_dominant_radical_atom_keys():
""" test graph.resonance_dominant_radical_atom_keys
"""
assert graph.resonance_dominant_radical_atom_keys(C3H3_CGR) == frozenset(
{0, 1, 2})
assert graph.resonance_dominant_radical_atom_keys(C8H13O_CGR) == frozenset(
{8})
def test__sigma_radical_atom_keys():
""" test graph.sigma_radical_atom_keys
"""
# CCC#[C]
gra = ({0: ('C', 3, None), 1: ('C', 0, None), 2: ('C', 2, None),
3: ('C', 0, None)},
{frozenset({0, 2}): (1, None), frozenset({1, 3}): (1, None),
frozenset({2, 3}): (1, None)})
assert graph.sigma_radical_atom_keys(gra) == frozenset({1})
# [C]#CC(CC)(CCC#[C])CC#[C]
gra = ({0: ('C', 0, None), 1: ('C', 0, None), 2: ('C', 3, None),
3: ('C', 0, None), 4: ('C', 0, None), 5: ('C', 0, None),
6: ('C', 2, None), 7: ('C', 0, None), 8: ('C', 2, None),
9: ('C', 2, None), 10: ('C', 2, None), 11: ('C', 0, None)},
{frozenset({8, 4}): (1, None), frozenset({3, 7}): (1, None),
frozenset({2, 6}): (1, None), frozenset({0, 4}): (1, None),
frozenset({8, 10}): (1, None), frozenset({9, 11}): (1, None),
frozenset({1, 5}): (1, None), frozenset({9, 5}): (1, None),
frozenset({11, 7}): (1, None), frozenset({10, 11}): (1, None),
frozenset({11, 6}): (1, None)})
assert graph.sigma_radical_atom_keys(gra) == frozenset({0, 1, 3})
# # bond properties
def test__resonance_dominant_bond_orders():
""" test graph.resonance_dominant_bond_orders
"""
assert graph.resonance_dominant_bond_orders(C3H3_CGR) == {
frozenset({0, 1}): frozenset({1, 2}),
frozenset({0, 2}): frozenset({1, 2}),
frozenset({1, 2}): frozenset({1, 2})
}
# # transformations
def test__resonances():
""" test graph.resonances
"""
assert graph.resonances(C3H3_CGR) == C3H3_RGRS
def test__subresonances():
""" test graph.subresonances
"""
assert graph.subresonances(C2_RGRS[1]) == C2_RGRS[1:]
def test__dominant_resonances():
""" test graph.dominant_resonances
"""
assert graph.dominant_resonances(C3H3_CGR) == C3H3_RGRS[1:]
def test__dominant_resonance():
""" test graph.dominant_resonance
"""
assert graph.dominant_resonance(C3H3_CGR) == C3H3_RGRS[1]
def test__rotational_bond_keys():
""" test graph.rotational_bond_keys
"""
cgr = ({0: ('C', 2, None), 1: ('C', 2, None), 2: ('C', 1, None),
3: ('C', 1, None)},
{frozenset({0, 2}): (1, None), frozenset({1, 3}): (1, None),
frozenset({2, 3}): (1, None)})
cgr = automol.graph.explicit(cgr)
assert (automol.graph.rotational_bond_keys(cgr) ==
frozenset({frozenset({2, 3})}))
cgr = ({0: ('C', 3, None), 1: ('C', 3, None), 2: ('C', 2, None),
3: ('C', 2, None)},
{frozenset({0, 2}): (1, None), frozenset({1, 3}): (1, None),
frozenset({2, 3}): (1, None)})
assert (automol.graph.rotational_bond_keys(cgr) ==
frozenset({frozenset({0, 2}), frozenset({1, 3}),
frozenset({2, 3})}))
assert (automol.graph.rotational_bond_keys(cgr, with_h_rotors=False) ==
frozenset({frozenset({2, 3})}))
# stereo graph library
def test__stereogenic_atom_keys():
""" test graph.stereogenic_atom_keys
"""
assert graph.stereogenic_atom_keys(C8H13O_CGR) == frozenset({6, 7})
assert graph.stereogenic_atom_keys(C3H3CL2F3_CGR) == frozenset({1, 2})
cgr = ({0: ('C', 2, None), 1: ('C', 3, None), 2: ('C', 1, None),
3: ('O', 1, None)},
{frozenset({0, 2}): (1, None), frozenset({2, 3}): (1, None),
frozenset({1, 2}): (1, None)})
assert graph.stereogenic_atom_keys(cgr) == frozenset({2})
def test__stereogenic_bond_keys():
""" test graph.stereogenic_bond_keys
"""
print(graph.stereogenic_bond_keys(C8H13O_CGR))
print(graph.stereogenic_bond_keys(C3H5N3_CGR))
assert graph.stereogenic_bond_keys(C8H13O_CGR) == frozenset(
{frozenset({3, 5})})
assert graph.stereogenic_bond_keys(C3H5N3_CGR) == frozenset(
{frozenset({1, 4}), frozenset({0, 3})})
def test__stereomers():
""" test graph.stereomers
"""
assert graph.stereomers(C2H2CL2F2_CGR) == C2H2CL2F2_SGRS
assert graph.stereomers(C3H3CL2F3_CGR) == C3H3CL2F3_SGRS
assert graph.stereomers(C3H5N3_CGR) == C3H5N3_SGRS
assert graph.stereomers(C8H13O_CGR) == C8H13O_SGRS
def test__to_index_based_stereo():
""" test graph.stereomers
"""
for sgr in C2H2CL2F2_SGRS:
sgr = graph.explicit(sgr)
idx_sgr = graph.to_index_based_stereo(sgr)
assert sgr == graph.from_index_based_stereo(idx_sgr)
for sgr in C3H3CL2F3_SGRS:
sgr = graph.explicit(sgr)
idx_sgr = graph.to_index_based_stereo(sgr)
assert sgr == graph.from_index_based_stereo(idx_sgr)
for sgr in C3H5N3_SGRS:
sgr = graph.explicit(sgr)
idx_sgr = graph.to_index_based_stereo(sgr)
assert sgr == graph.from_index_based_stereo(idx_sgr)
for sgr in C8H13O_SGRS:
sgr = graph.explicit(sgr)
idx_sgr = graph.to_index_based_stereo(sgr)
assert sgr == graph.from_index_based_stereo(idx_sgr)
def test__ring_systems():
""" test graph.ring_systems
"""
ich = automol.smiles.inchi('C12CC(C1)C2CC3C(C3)CCC4C5CCC(CC5)C4')
gra = automol.inchi.graph(ich)
rsys = sorted(graph.ring_systems(gra), key=graph.atom_count)
assert list(map(graph.atom_count, rsys)) == [7, 12, 21]
# ISOBUTANE
C4H10_GRA = ({0: ('C', 0, None), 1: ('C', 0, None), 2: ('C', 0, None),
3: ('C', 0, None), 4: ('H', 0, None), 5: ('H', 0, None),
6: ('H', 0, None), 7: ('H', 0, None), 8: ('H', 0, None),
9: ('H', 0, None), 10: ('H', 0, None), 11: ('H', 0, None),
12: ('H', 0, None), 13: ('H', 0, None)},
{frozenset({0, 3}): (1, None), frozenset({0, 4}): (1, None),
frozenset({0, 5}): (1, None), frozenset({0, 6}): (1, None),
frozenset({1, 3}): (1, None), frozenset({1, 7}): (1, None),
frozenset({8, 1}): (1, None), frozenset({1, 9}): (1, None),
frozenset({2, 3}): (1, None), frozenset({2, 10}): (1, None),
frozenset({2, 11}): (1, None), frozenset({2, 12}): (1, None),
frozenset({3, 13}): (1, None)})
def test__equivalent_atoms():
""" test graph.equivalent_atoms
"""
# central carbon
assert graph.equivalent_atoms(C4H10_GRA, 3) == {3}
# central hydrogen
assert graph.equivalent_atoms(C4H10_GRA, 13) == {13}
# terminal carbons
assert graph.equivalent_atoms(C4H10_GRA, 0) == {0, 1, 2}
assert graph.equivalent_atoms(C4H10_GRA, 1) == {0, 1, 2}
assert graph.equivalent_atoms(C4H10_GRA, 2) == {0, 1, 2}
# terminal hydrogens
assert graph.equivalent_atoms(C4H10_GRA, 4) == {4, 5, 6, 7, 8, 9, 10,
11, 12}
assert graph.equivalent_atoms(C4H10_GRA, 5) == {4, 5, 6, 7, 8, 9, 10,
11, 12}
assert graph.equivalent_atoms(C4H10_GRA, 6) == {4, 5, 6, 7, 8, 9, 10,
11, 12}
assert graph.equivalent_atoms(C4H10_GRA, 11) == {4, 5, 6, 7, 8, 9, 10,
11, 12}
assert graph.equivalent_atoms(C4H10_GRA, 12) == {4, 5, 6, 7, 8, 9, 10,
11, 12}
def test__equivalent_bonds():
""" test graph.equivalent_atoms
"""
assert graph.equivalent_bonds(C4H10_GRA, (2, 3)) == {
(0, 3), (1, 3), (2, 3)}
def test__vmat__vmatrix():
""" test graph.vmat.vmatrix
"""
ich = automol.smiles.inchi('C12CC(C1)C2CC3C(C3)CCC4C5CCC(CC5)C4')
gra = automol.inchi.graph(ich)
_, zma_keys = graph.vmat.vmatrix(gra)
assert set(zma_keys) == graph.atom_keys(gra)
# FC=CC=CF + [OH] => FC=C[CH]C(O)F
C4H5F2O_TSG = ({0: ('C', 0, None), 1: ('C', 0, None), 2: ('C', 0, None),
3: ('C', 0, None), 4: ('F', 0, None), 5: ('F', 0, None),
6: ('H', 0, None), 7: ('H', 0, None), 8: ('H', 0, None),
9: ('H', 0, None), 10: ('O', 0, None), 11: ('H', 0, None)},
{frozenset({8, 2}): (1, None), frozenset({2, 10}): (0.1, None),
frozenset({0, 6}): (1, None), frozenset({1, 7}): (1, None),
frozenset({9, 3}): (1, None), frozenset({0, 1}): (1, None),
frozenset({0, 2}): (1, True), frozenset({2, 4}): (1, None),
frozenset({3, 5}): (1, None), frozenset({10, 11}): (1, None),
frozenset({1, 3}): (1, False)})
# FC=C(C(O)F)C(O)F + [OH] => FC(O)[C](C(O)F)C(O)F
C4H5F3O2_TSG = ({0: ('C', 0, None), 1: ('C', 0, None), 2: ('C', 0, False),
3: ('C', 0, True), 4: ('F', 0, None), 5: ('F', 0, None),
6: ('F', 0, None), 7: ('O', 0, None), 8: ('O', 0, None),
9: ('H', 0, None), 10: ('H', 0, None), 11: ('H', 0, None),
12: ('H', 0, None), 13: ('H', 0, None), 14: ('O', 0, None),
15: ('H', 0, None)},
{frozenset({12, 7}): (1, None), frozenset({2, 10}): (1, None),
frozenset({1, 2}): (1, None), frozenset({0, 1}): (1, True),
frozenset({3, 6}): (1, None), frozenset({2, 7}): (1, None),
frozenset({2, 5}): (1, None), frozenset({0, 4}): (1, None),
frozenset({8, 3}): (1, None), frozenset({0, 14}): (0.1, None),
frozenset({8, 13}): (1, None), frozenset({14, 15}): (1, None),
frozenset({11, 3}): (1, None), frozenset({1, 3}): (1, None),
frozenset({0, 9}): (1, None)})
def test__ts__nonconserved_atom_stereo_keys():
""" test graph.ts.nonconserved_atom_stereo_keys
"""
assert graph.ts.nonconserved_atom_stereo_keys(C4H5F2O_TSG) == (
(frozenset({2}), frozenset()))
assert graph.ts.nonconserved_atom_stereo_keys(C4H5F3O2_TSG) == (
(frozenset({0}), frozenset()))
def test__ts__nonconserved_bond_stereo_keys():
""" test graph.ts.nonconserved_bond_stereo_keys
"""
assert graph.ts.nonconserved_bond_stereo_keys(C4H5F2O_TSG) == (
(frozenset({frozenset({0, 1})}), frozenset({frozenset({0, 2})})))
assert graph.ts.nonconserved_bond_stereo_keys(C4H5F3O2_TSG) == (
(frozenset(), frozenset({frozenset({0, 1})})))
def test__ts__compatible_reverse_stereomers():
""" test graph.ts.stereo_expand_reverse_graphs
"""
for ste_tsg in graph.ts.stereomers(C4H5F2O_TSG):
ste_tsgs = [
s
for r in graph.ts.compatible_reverse_stereomers(ste_tsg)
for s in graph.ts.compatible_reverse_stereomers(r)]
assert any(s == ste_tsg for s in ste_tsgs)
for ste_tsg in graph.ts.stereomers(C4H5F3O2_TSG):
ste_tsgs = [
s
for r in graph.ts.compatible_reverse_stereomers(ste_tsg)
for s in graph.ts.compatible_reverse_stereomers(r)]
assert any(s == ste_tsg for s in ste_tsgs)
if __name__ == '__main__':
# test__from_data()
# test__set_atom_implicit_hydrogen_valences()
# test__without_bond_orders()
# test__without_stereo_parities()
# test__atom_explicit_hydrogen_valences()
# test__atom_explicit_hydrogen_keys()
# test__explicit()
# test__backbone_keys()
# test__explicit_hydrogen_keys()
# test__stereomers()
# test__heuristic_geometry()
# test__connected_components()
# test__unsaturated_atom_keys()
# test__bonds_neighbor_atom_keys()
# test__resonance_dominant_radical_atom_keys()
# test__remove_bonds()
# test__resonance_dominant_atom_centered_cumulene_keys()
# test__resonance_dominant_bond_centered_cumulene_keys()
# test__stereogenic_bond_keys()
# test__resonance_dominant_atom_hybridizations()
# test__rotational_bond_keys()
# test__electron_count()
# test__atom_count()
# test__heavy_atom_count()
# test__subresonances()
# test__sigma_radical_atom_keys()
# test__stereomers()
# test__to_index_based_stereo()
# test__ts__nonconserved_atom_stereo_keys()
# test__ts__nonconserved_bond_stereo_keys()
# test__ts__compatible_reverse_stereomers()
# test__vmat__vmatrix()
# test__branch()
test__equivalent_atoms()
test__equivalent_bonds()
|
[
"automol.graph.transform_keys",
"automol.graph.atom_symbols",
"automol.graph.from_index_based_stereo",
"automol.graph.stereogenic_bond_keys",
"automol.graph.atom_stereo_parities",
"automol.graph.string",
"automol.graph.subgraph",
"automol.graph.resonance_dominant_radical_atom_keys",
"automol.graph.to_index_based_stereo",
"automol.graph.bond_orders",
"automol.graph.ts.stereomers",
"automol.graph.bond_keys",
"automol.graph.equivalent_bonds",
"automol.graph.resonances",
"automol.graph.atom_element_valences",
"automol.graph.ts.nonconserved_atom_stereo_keys",
"automol.smiles.inchi",
"automol.graph.atom_keys",
"automol.graph.atoms_neighbor_atom_keys",
"automol.graph.atom_lone_pair_counts",
"automol.graph.atom_explicit_hydrogen_valences",
"automol.graph.possible_spin_multiplicities",
"automol.graph.relabel",
"automol.graph.dominant_resonances",
"automol.graph.atom_explicit_hydrogen_keys",
"automol.graph.resonance_dominant_atom_centered_cumulene_keys",
"automol.graph.remove_atoms",
"automol.graph.electron_count",
"automol.graph.connected_components",
"automol.inchi.graph",
"automol.graph.atom_unsaturated_valences",
"automol.graph.backbone_isomorphism",
"automol.graph.union",
"automol.graph.resonance_dominant_bond_orders",
"automol.graph.equivalent_atoms",
"automol.graph.dominant_resonance",
"automol.graph.stereogenic_atom_keys",
"automol.graph.set_atom_implicit_hydrogen_valences",
"automol.graph.ring_systems",
"automol.graph.stereomers",
"automol.graph.atom_implicit_hydrogen_valences",
"automol.graph.formula",
"automol.graph.backbone_keys",
"numpy.random.permutation",
"automol.graph.atoms_bond_keys",
"automol.graph.explicit",
"automol.graph.vmat.vmatrix",
"automol.graph.bonds_neighbor_atom_keys",
"automol.graph.atom_count",
"automol.graph.explicit_hydrogen_keys",
"automol.graph.atom_bond_valences",
"automol.graph.ts.compatible_reverse_stereomers",
"automol.graph.atoms",
"automol.graph.rotational_bond_keys",
"automol.graph.sigma_radical_atom_keys",
"automol.graph.without_stereo_parities",
"automol.graph.atoms_second_degree_neighbor_atom_keys",
"automol.graph.bond_symmetry_numbers",
"automol.graph.resonance_dominant_bond_centered_cumulene_keys",
"automol.graph.maximum_spin_multiplicity",
"automol.graph.backbone_unique",
"automol.graph.backbone_isomorphic",
"automol.graph.subresonances",
"automol.graph.without_bond_orders",
"automol.graph.heavy_atom_count",
"automol.graph.resonance_dominant_atom_hybridizations",
"automol.graph.unsaturated_atom_keys",
"automol.graph.bond_stereo_parities",
"automol.graph.ts.nonconserved_bond_stereo_keys"
] |
[((14521, 14548), 'automol.graph.atom_keys', 'graph.atom_keys', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (14536, 14548), False, 'from automol import graph\n'), ((14559, 14656), 'automol.graph.set_atom_implicit_hydrogen_valences', 'graph.set_atom_implicit_hydrogen_valences', (['C8H13O_CGR', '{atm_key: (0) for atm_key in atm_keys}'], {}), '(C8H13O_CGR, {atm_key: (0) for\n atm_key in atm_keys})\n', (14600, 14656), False, 'from automol import graph\n'), ((15651, 15677), 'automol.graph.explicit', 'graph.explicit', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (15665, 15677), False, 'from automol import graph\n'), ((18334, 18386), 'automol.graph.transform_keys', 'graph.transform_keys', (['gra2', '(lambda x: x + gra1_natms)'], {}), '(gra2, lambda x: x + gra1_natms)\n', (18354, 18386), False, 'from automol import graph\n'), ((18398, 18421), 'automol.graph.union', 'graph.union', (['gra1', 'gra2'], {}), '(gra1, gra2)\n', (18409, 18421), False, 'from automol import graph\n'), ((18437, 18468), 'automol.graph.connected_components', 'graph.connected_components', (['gra'], {}), '(gra)\n', (18463, 18468), False, 'from automol import graph\n'), ((21207, 21266), 'automol.graph.backbone_isomorphic', 'graph.backbone_isomorphic', (['CH2FH2H_CGR_IMP', 'CH2FH2H_CGR_EXP'], {}), '(CH2FH2H_CGR_IMP, CH2FH2H_CGR_EXP)\n', (21232, 21266), False, 'from automol import graph\n'), ((28420, 28447), 'automol.graph.explicit', 'automol.graph.explicit', (['cgr'], {}), '(cgr)\n', (28442, 28447), False, 'import automol\n'), ((31128, 31187), 'automol.smiles.inchi', 'automol.smiles.inchi', (['"""C12CC(C1)C2CC3C(C3)CCC4C5CCC(CC5)C4"""'], {}), "('C12CC(C1)C2CC3C(C3)CCC4C5CCC(CC5)C4')\n", (31148, 31187), False, 'import automol\n'), ((31198, 31222), 'automol.inchi.graph', 'automol.inchi.graph', (['ich'], {}), '(ich)\n', (31217, 31222), False, 'import automol\n'), ((33578, 33637), 'automol.smiles.inchi', 'automol.smiles.inchi', (['"""C12CC(C1)C2CC3C(C3)CCC4C5CCC(CC5)C4"""'], {}), "('C12CC(C1)C2CC3C(C3)CCC4C5CCC(CC5)C4')\n", (33598, 33637), False, 'import automol\n'), ((33648, 33672), 'automol.inchi.graph', 'automol.inchi.graph', (['ich'], {}), '(ich)\n', (33667, 33672), False, 'import automol\n'), ((33691, 33714), 'automol.graph.vmat.vmatrix', 'graph.vmat.vmatrix', (['gra'], {}), '(gra)\n', (33709, 33714), False, 'from automol import graph\n'), ((36416, 36448), 'automol.graph.ts.stereomers', 'graph.ts.stereomers', (['C4H5F2O_TSG'], {}), '(C4H5F2O_TSG)\n', (36435, 36448), False, 'from automol import graph\n'), ((36689, 36722), 'automol.graph.ts.stereomers', 'graph.ts.stereomers', (['C4H5F3O2_TSG'], {}), '(C4H5F3O2_TSG)\n', (36708, 36722), False, 'from automol import graph\n'), ((15061, 15098), 'automol.graph.without_bond_orders', 'graph.without_bond_orders', (['C8H13O_RGR'], {}), '(C8H13O_RGR)\n', (15086, 15098), False, 'from automol import graph\n'), ((15214, 15255), 'automol.graph.without_stereo_parities', 'graph.without_stereo_parities', (['C8H13O_SGR'], {}), '(C8H13O_SGR)\n', (15243, 15255), False, 'from automol import graph\n'), ((15339, 15371), 'automol.graph.electron_count', 'graph.electron_count', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (15359, 15371), False, 'from automol import graph\n'), ((15457, 15485), 'automol.graph.atom_count', 'graph.atom_count', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (15473, 15485), False, 'from automol import graph\n'), ((15503, 15552), 'automol.graph.atom_count', 'graph.atom_count', (['C8H13O_CGR'], {'with_implicit': '(False)'}), '(C8H13O_CGR, with_implicit=False)\n', (15519, 15552), False, 'from automol import graph\n'), ((15689, 15716), 'automol.graph.heavy_atom_count', 'graph.heavy_atom_count', (['cgr'], {}), '(cgr)\n', (15711, 15716), False, 'from automol import graph\n'), ((15825, 15867), 'automol.graph.atoms_neighbor_atom_keys', 'graph.atoms_neighbor_atom_keys', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (15855, 15867), False, 'from automol import graph\n'), ((16259, 16315), 'automol.graph.atoms_second_degree_neighbor_atom_keys', 'graph.atoms_second_degree_neighbor_atom_keys', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (16303, 16315), False, 'from automol import graph\n'), ((16691, 16724), 'automol.graph.atoms_bond_keys', 'graph.atoms_bond_keys', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (16712, 16724), False, 'from automol import graph\n'), ((17423, 17465), 'automol.graph.bonds_neighbor_atom_keys', 'graph.bonds_neighbor_atom_keys', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (17453, 17465), False, 'from automol import graph\n'), ((18298, 18321), 'automol.graph.formula', 'graph.formula', (['C3H3_CGR'], {}), '(C3H3_CGR)\n', (18311, 18321), False, 'from automol import graph\n'), ((18592, 18624), 'automol.graph.subgraph', 'graph.subgraph', (['C3H3_CGR', '(1, 2)'], {}), '(C3H3_CGR, (1, 2))\n', (18606, 18624), False, 'from automol import graph\n'), ((19141, 19193), 'automol.graph.relabel', 'graph.relabel', (['C3H3_CGR', '{(0): 10, (1): 11, (2): 12}'], {}), '(C3H3_CGR, {(0): 10, (1): 11, (2): 12})\n', (19154, 19193), False, 'from automol import graph\n'), ((19457, 19491), 'automol.graph.remove_atoms', 'graph.remove_atoms', (['C3H3_CGR', '(0,)'], {}), '(C3H3_CGR, (0,))\n', (19475, 19491), False, 'from automol import graph\n'), ((20032, 20086), 'automol.graph.atom_explicit_hydrogen_valences', 'graph.atom_explicit_hydrogen_valences', (['CH2FH2H_CGR_EXP'], {}), '(CH2FH2H_CGR_EXP)\n', (20069, 20086), False, 'from automol import graph\n'), ((20256, 20306), 'automol.graph.atom_explicit_hydrogen_keys', 'graph.atom_explicit_hydrogen_keys', (['CH2FH2H_CGR_EXP'], {}), '(CH2FH2H_CGR_EXP)\n', (20289, 20306), False, 'from automol import graph\n'), ((20596, 20632), 'automol.graph.backbone_keys', 'graph.backbone_keys', (['CH2FH2H_CGR_EXP'], {}), '(CH2FH2H_CGR_EXP)\n', (20615, 20632), False, 'from automol import graph\n'), ((20759, 20804), 'automol.graph.explicit_hydrogen_keys', 'graph.explicit_hydrogen_keys', (['CH2FH2H_CGR_EXP'], {}), '(CH2FH2H_CGR_EXP)\n', (20787, 20804), False, 'from automol import graph\n'), ((20928, 20959), 'automol.graph.explicit', 'graph.explicit', (['CH2FH2H_CGR_IMP'], {}), '(CH2FH2H_CGR_IMP)\n', (20942, 20959), False, 'from automol import graph\n'), ((21305, 21321), 'automol.graph.atoms', 'graph.atoms', (['cgr'], {}), '(cgr)\n', (21316, 21321), False, 'from automol import graph\n'), ((21432, 21459), 'automol.graph.relabel', 'graph.relabel', (['cgr', 'pmt_dct'], {}), '(cgr, pmt_dct)\n', (21445, 21459), False, 'from automol import graph\n'), ((21475, 21514), 'automol.graph.backbone_isomorphic', 'graph.backbone_isomorphic', (['cgr', 'cgr_pmt'], {}), '(cgr, cgr_pmt)\n', (21500, 21514), False, 'from automol import graph\n'), ((21636, 21652), 'automol.graph.atoms', 'graph.atoms', (['cgr'], {}), '(cgr)\n', (21647, 21652), False, 'from automol import graph\n'), ((21763, 21790), 'automol.graph.relabel', 'graph.relabel', (['cgr', 'pmt_dct'], {}), '(cgr, pmt_dct)\n', (21776, 21790), False, 'from automol import graph\n'), ((21943, 21975), 'automol.graph.backbone_unique', 'graph.backbone_unique', (['C3H3_RGRS'], {}), '(C3H3_RGRS)\n', (21964, 21975), False, 'from automol import graph\n'), ((22110, 22149), 'automol.graph.atom_element_valences', 'graph.atom_element_valences', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (22137, 22149), False, 'from automol import graph\n'), ((22314, 22353), 'automol.graph.atom_lone_pair_counts', 'graph.atom_lone_pair_counts', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (22341, 22353), False, 'from automol import graph\n'), ((22512, 22548), 'automol.graph.atom_bond_valences', 'graph.atom_bond_valences', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (22536, 22548), False, 'from automol import graph\n'), ((22721, 22764), 'automol.graph.atom_unsaturated_valences', 'graph.atom_unsaturated_valences', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (22752, 22764), False, 'from automol import graph\n'), ((22929, 22968), 'automol.graph.unsaturated_atom_keys', 'graph.unsaturated_atom_keys', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (22956, 22968), False, 'from automol import graph\n'), ((23113, 23152), 'automol.graph.maximum_spin_multiplicity', 'graph.maximum_spin_multiplicity', (['C2_CGR'], {}), '(C2_CGR)\n', (23144, 23152), False, 'from automol import graph\n'), ((23269, 23311), 'automol.graph.possible_spin_multiplicities', 'graph.possible_spin_multiplicities', (['C2_CGR'], {}), '(C2_CGR)\n', (23303, 23311), False, 'from automol import graph\n'), ((23441, 23480), 'automol.graph.bond_symmetry_numbers', 'graph.bond_symmetry_numbers', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (23468, 23480), False, 'from automol import graph\n'), ((23863, 23917), 'automol.graph.resonance_dominant_atom_hybridizations', 'graph.resonance_dominant_atom_hybridizations', (['C3H3_CGR'], {}), '(C3H3_CGR)\n', (23907, 23917), False, 'from automol import graph\n'), ((23960, 24016), 'automol.graph.resonance_dominant_atom_hybridizations', 'graph.resonance_dominant_atom_hybridizations', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (24004, 24016), False, 'from automol import graph\n'), ((24452, 24501), 'automol.graph.resonance_dominant_atom_hybridizations', 'graph.resonance_dominant_atom_hybridizations', (['cgr'], {}), '(cgr)\n', (24496, 24501), False, 'from automol import graph\n'), ((25038, 25095), 'automol.graph.resonance_dominant_atom_centered_cumulene_keys', 'graph.resonance_dominant_atom_centered_cumulene_keys', (['cgr'], {}), '(cgr)\n', (25090, 25095), False, 'from automol import graph\n'), ((25622, 25679), 'automol.graph.resonance_dominant_bond_centered_cumulene_keys', 'graph.resonance_dominant_bond_centered_cumulene_keys', (['cgr'], {}), '(cgr)\n', (25674, 25679), False, 'from automol import graph\n'), ((25875, 25927), 'automol.graph.resonance_dominant_radical_atom_keys', 'graph.resonance_dominant_radical_atom_keys', (['C3H3_CGR'], {}), '(C3H3_CGR)\n', (25917, 25927), False, 'from automol import graph\n'), ((25972, 26026), 'automol.graph.resonance_dominant_radical_atom_keys', 'graph.resonance_dominant_radical_atom_keys', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (26014, 26026), False, 'from automol import graph\n'), ((26385, 26419), 'automol.graph.sigma_radical_atom_keys', 'graph.sigma_radical_atom_keys', (['gra'], {}), '(gra)\n', (26414, 26419), False, 'from automol import graph\n'), ((27170, 27204), 'automol.graph.sigma_radical_atom_keys', 'graph.sigma_radical_atom_keys', (['gra'], {}), '(gra)\n', (27199, 27204), False, 'from automol import graph\n'), ((27364, 27410), 'automol.graph.resonance_dominant_bond_orders', 'graph.resonance_dominant_bond_orders', (['C3H3_CGR'], {}), '(C3H3_CGR)\n', (27400, 27410), False, 'from automol import graph\n'), ((27654, 27680), 'automol.graph.resonances', 'graph.resonances', (['C3H3_CGR'], {}), '(C3H3_CGR)\n', (27670, 27680), False, 'from automol import graph\n'), ((27775, 27806), 'automol.graph.subresonances', 'graph.subresonances', (['C2_RGRS[1]'], {}), '(C2_RGRS[1])\n', (27794, 27806), False, 'from automol import graph\n'), ((27915, 27950), 'automol.graph.dominant_resonances', 'graph.dominant_resonances', (['C3H3_CGR'], {}), '(C3H3_CGR)\n', (27940, 27950), False, 'from automol import graph\n'), ((28059, 28093), 'automol.graph.dominant_resonance', 'graph.dominant_resonance', (['C3H3_CGR'], {}), '(C3H3_CGR)\n', (28083, 28093), False, 'from automol import graph\n'), ((28460, 28499), 'automol.graph.rotational_bond_keys', 'automol.graph.rotational_bond_keys', (['cgr'], {}), '(cgr)\n', (28494, 28499), False, 'import automol\n'), ((28776, 28815), 'automol.graph.rotational_bond_keys', 'automol.graph.rotational_bond_keys', (['cgr'], {}), '(cgr)\n', (28810, 28815), False, 'import automol\n'), ((28936, 28996), 'automol.graph.rotational_bond_keys', 'automol.graph.rotational_bond_keys', (['cgr'], {'with_h_rotors': '(False)'}), '(cgr, with_h_rotors=False)\n', (28970, 28996), False, 'import automol\n'), ((29164, 29203), 'automol.graph.stereogenic_atom_keys', 'graph.stereogenic_atom_keys', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (29191, 29203), False, 'from automol import graph\n'), ((29236, 29278), 'automol.graph.stereogenic_atom_keys', 'graph.stereogenic_atom_keys', (['C3H3CL2F3_CGR'], {}), '(C3H3CL2F3_CGR)\n', (29263, 29278), False, 'from automol import graph\n'), ((29528, 29560), 'automol.graph.stereogenic_atom_keys', 'graph.stereogenic_atom_keys', (['cgr'], {}), '(cgr)\n', (29555, 29560), False, 'from automol import graph\n'), ((29675, 29714), 'automol.graph.stereogenic_bond_keys', 'graph.stereogenic_bond_keys', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (29702, 29714), False, 'from automol import graph\n'), ((29726, 29765), 'automol.graph.stereogenic_bond_keys', 'graph.stereogenic_bond_keys', (['C3H5N3_CGR'], {}), '(C3H5N3_CGR)\n', (29753, 29765), False, 'from automol import graph\n'), ((29778, 29817), 'automol.graph.stereogenic_bond_keys', 'graph.stereogenic_bond_keys', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (29805, 29817), False, 'from automol import graph\n'), ((29872, 29911), 'automol.graph.stereogenic_bond_keys', 'graph.stereogenic_bond_keys', (['C3H5N3_CGR'], {}), '(C3H5N3_CGR)\n', (29899, 29911), False, 'from automol import graph\n'), ((30049, 30080), 'automol.graph.stereomers', 'graph.stereomers', (['C2H2CL2F2_CGR'], {}), '(C2H2CL2F2_CGR)\n', (30065, 30080), False, 'from automol import graph\n'), ((30110, 30141), 'automol.graph.stereomers', 'graph.stereomers', (['C3H3CL2F3_CGR'], {}), '(C3H3CL2F3_CGR)\n', (30126, 30141), False, 'from automol import graph\n'), ((30171, 30199), 'automol.graph.stereomers', 'graph.stereomers', (['C3H5N3_CGR'], {}), '(C3H5N3_CGR)\n', (30187, 30199), False, 'from automol import graph\n'), ((30226, 30254), 'automol.graph.stereomers', 'graph.stereomers', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (30242, 30254), False, 'from automol import graph\n'), ((30390, 30409), 'automol.graph.explicit', 'graph.explicit', (['sgr'], {}), '(sgr)\n', (30404, 30409), False, 'from automol import graph\n'), ((30428, 30460), 'automol.graph.to_index_based_stereo', 'graph.to_index_based_stereo', (['sgr'], {}), '(sgr)\n', (30455, 30460), False, 'from automol import graph\n'), ((30568, 30587), 'automol.graph.explicit', 'graph.explicit', (['sgr'], {}), '(sgr)\n', (30582, 30587), False, 'from automol import graph\n'), ((30606, 30638), 'automol.graph.to_index_based_stereo', 'graph.to_index_based_stereo', (['sgr'], {}), '(sgr)\n', (30633, 30638), False, 'from automol import graph\n'), ((30743, 30762), 'automol.graph.explicit', 'graph.explicit', (['sgr'], {}), '(sgr)\n', (30757, 30762), False, 'from automol import graph\n'), ((30781, 30813), 'automol.graph.to_index_based_stereo', 'graph.to_index_based_stereo', (['sgr'], {}), '(sgr)\n', (30808, 30813), False, 'from automol import graph\n'), ((30918, 30937), 'automol.graph.explicit', 'graph.explicit', (['sgr'], {}), '(sgr)\n', (30932, 30937), False, 'from automol import graph\n'), ((30956, 30988), 'automol.graph.to_index_based_stereo', 'graph.to_index_based_stereo', (['sgr'], {}), '(sgr)\n', (30983, 30988), False, 'from automol import graph\n'), ((31241, 31264), 'automol.graph.ring_systems', 'graph.ring_systems', (['gra'], {}), '(gra)\n', (31259, 31264), False, 'from automol import graph\n'), ((32304, 32340), 'automol.graph.equivalent_atoms', 'graph.equivalent_atoms', (['C4H10_GRA', '(3)'], {}), '(C4H10_GRA, 3)\n', (32326, 32340), False, 'from automol import graph\n'), ((32382, 32419), 'automol.graph.equivalent_atoms', 'graph.equivalent_atoms', (['C4H10_GRA', '(13)'], {}), '(C4H10_GRA, 13)\n', (32404, 32419), False, 'from automol import graph\n'), ((32462, 32498), 'automol.graph.equivalent_atoms', 'graph.equivalent_atoms', (['C4H10_GRA', '(0)'], {}), '(C4H10_GRA, 0)\n', (32484, 32498), False, 'from automol import graph\n'), ((32523, 32559), 'automol.graph.equivalent_atoms', 'graph.equivalent_atoms', (['C4H10_GRA', '(1)'], {}), '(C4H10_GRA, 1)\n', (32545, 32559), False, 'from automol import graph\n'), ((32584, 32620), 'automol.graph.equivalent_atoms', 'graph.equivalent_atoms', (['C4H10_GRA', '(2)'], {}), '(C4H10_GRA, 2)\n', (32606, 32620), False, 'from automol import graph\n'), ((32670, 32706), 'automol.graph.equivalent_atoms', 'graph.equivalent_atoms', (['C4H10_GRA', '(4)'], {}), '(C4H10_GRA, 4)\n', (32692, 32706), False, 'from automol import graph\n'), ((32804, 32840), 'automol.graph.equivalent_atoms', 'graph.equivalent_atoms', (['C4H10_GRA', '(5)'], {}), '(C4H10_GRA, 5)\n', (32826, 32840), False, 'from automol import graph\n'), ((32938, 32974), 'automol.graph.equivalent_atoms', 'graph.equivalent_atoms', (['C4H10_GRA', '(6)'], {}), '(C4H10_GRA, 6)\n', (32960, 32974), False, 'from automol import graph\n'), ((33072, 33109), 'automol.graph.equivalent_atoms', 'graph.equivalent_atoms', (['C4H10_GRA', '(11)'], {}), '(C4H10_GRA, 11)\n', (33094, 33109), False, 'from automol import graph\n'), ((33208, 33245), 'automol.graph.equivalent_atoms', 'graph.equivalent_atoms', (['C4H10_GRA', '(12)'], {}), '(C4H10_GRA, 12)\n', (33230, 33245), False, 'from automol import graph\n'), ((33420, 33461), 'automol.graph.equivalent_bonds', 'graph.equivalent_bonds', (['C4H10_GRA', '(2, 3)'], {}), '(C4H10_GRA, (2, 3))\n', (33442, 33461), False, 'from automol import graph\n'), ((33743, 33763), 'automol.graph.atom_keys', 'graph.atom_keys', (['gra'], {}), '(gra)\n', (33758, 33763), False, 'from automol import graph\n'), ((35710, 35761), 'automol.graph.ts.nonconserved_atom_stereo_keys', 'graph.ts.nonconserved_atom_stereo_keys', (['C4H5F2O_TSG'], {}), '(C4H5F2O_TSG)\n', (35748, 35761), False, 'from automol import graph\n'), ((35817, 35869), 'automol.graph.ts.nonconserved_atom_stereo_keys', 'graph.ts.nonconserved_atom_stereo_keys', (['C4H5F3O2_TSG'], {}), '(C4H5F3O2_TSG)\n', (35855, 35869), False, 'from automol import graph\n'), ((36034, 36085), 'automol.graph.ts.nonconserved_bond_stereo_keys', 'graph.ts.nonconserved_bond_stereo_keys', (['C4H5F2O_TSG'], {}), '(C4H5F2O_TSG)\n', (36072, 36085), False, 'from automol import graph\n'), ((36176, 36228), 'automol.graph.ts.nonconserved_bond_stereo_keys', 'graph.ts.nonconserved_bond_stereo_keys', (['C4H5F3O2_TSG'], {}), '(C4H5F3O2_TSG)\n', (36214, 36228), False, 'from automol import graph\n'), ((13481, 13511), 'automol.graph.atom_symbols', 'graph.atom_symbols', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (13499, 13511), False, 'from automol import graph\n'), ((13530, 13557), 'automol.graph.bond_keys', 'graph.bond_keys', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (13545, 13557), False, 'from automol import graph\n'), ((13601, 13650), 'automol.graph.atom_implicit_hydrogen_valences', 'graph.atom_implicit_hydrogen_valences', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (13638, 13650), False, 'from automol import graph\n'), ((13744, 13774), 'automol.graph.atom_symbols', 'graph.atom_symbols', (['C8H13O_RGR'], {}), '(C8H13O_RGR)\n', (13762, 13774), False, 'from automol import graph\n'), ((13793, 13820), 'automol.graph.bond_keys', 'graph.bond_keys', (['C8H13O_RGR'], {}), '(C8H13O_RGR)\n', (13808, 13820), False, 'from automol import graph\n'), ((13864, 13913), 'automol.graph.atom_implicit_hydrogen_valences', 'graph.atom_implicit_hydrogen_valences', (['C8H13O_RGR'], {}), '(C8H13O_RGR)\n', (13901, 13913), False, 'from automol import graph\n'), ((13936, 13965), 'automol.graph.bond_orders', 'graph.bond_orders', (['C8H13O_RGR'], {}), '(C8H13O_RGR)\n', (13953, 13965), False, 'from automol import graph\n'), ((14058, 14088), 'automol.graph.atom_symbols', 'graph.atom_symbols', (['C8H13O_SGR'], {}), '(C8H13O_SGR)\n', (14076, 14088), False, 'from automol import graph\n'), ((14107, 14134), 'automol.graph.bond_keys', 'graph.bond_keys', (['C8H13O_SGR'], {}), '(C8H13O_SGR)\n', (14122, 14134), False, 'from automol import graph\n'), ((14178, 14227), 'automol.graph.atom_implicit_hydrogen_valences', 'graph.atom_implicit_hydrogen_valences', (['C8H13O_SGR'], {}), '(C8H13O_SGR)\n', (14215, 14227), False, 'from automol import graph\n'), ((14254, 14292), 'automol.graph.atom_stereo_parities', 'graph.atom_stereo_parities', (['C8H13O_SGR'], {}), '(C8H13O_SGR)\n', (14280, 14292), False, 'from automol import graph\n'), ((14318, 14356), 'automol.graph.bond_stereo_parities', 'graph.bond_stereo_parities', (['C8H13O_SGR'], {}), '(C8H13O_SGR)\n', (14344, 14356), False, 'from automol import graph\n'), ((14712, 14742), 'automol.graph.atom_symbols', 'graph.atom_symbols', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (14730, 14742), False, 'from automol import graph\n'), ((14744, 14771), 'automol.graph.bond_keys', 'graph.bond_keys', (['C8H13O_CGR'], {}), '(C8H13O_CGR)\n', (14759, 14771), False, 'from automol import graph\n'), ((21065, 21096), 'automol.graph.explicit', 'graph.explicit', (['CH2FH2H_CGR_IMP'], {}), '(CH2FH2H_CGR_IMP)\n', (21079, 21096), False, 'from automol import graph\n'), ((21806, 21846), 'automol.graph.backbone_isomorphism', 'graph.backbone_isomorphism', (['cgr', 'cgr_pmt'], {}), '(cgr, cgr_pmt)\n', (21832, 21846), False, 'from automol import graph\n'), ((30483, 30521), 'automol.graph.from_index_based_stereo', 'graph.from_index_based_stereo', (['idx_sgr'], {}), '(idx_sgr)\n', (30512, 30521), False, 'from automol import graph\n'), ((30661, 30699), 'automol.graph.from_index_based_stereo', 'graph.from_index_based_stereo', (['idx_sgr'], {}), '(idx_sgr)\n', (30690, 30699), False, 'from automol import graph\n'), ((30836, 30874), 'automol.graph.from_index_based_stereo', 'graph.from_index_based_stereo', (['idx_sgr'], {}), '(idx_sgr)\n', (30865, 30874), False, 'from automol import graph\n'), ((31011, 31049), 'automol.graph.from_index_based_stereo', 'graph.from_index_based_stereo', (['idx_sgr'], {}), '(idx_sgr)\n', (31040, 31049), False, 'from automol import graph\n'), ((14927, 14952), 'automol.graph.string', 'automol.graph.string', (['sgr'], {}), '(sgr)\n', (14947, 14952), False, 'import automol\n'), ((21380, 21411), 'numpy.random.permutation', 'numpy.random.permutation', (['natms'], {}), '(natms)\n', (21404, 21411), False, 'import numpy\n'), ((21711, 21742), 'numpy.random.permutation', 'numpy.random.permutation', (['natms'], {}), '(natms)\n', (21735, 21742), False, 'import numpy\n'), ((36506, 36553), 'automol.graph.ts.compatible_reverse_stereomers', 'graph.ts.compatible_reverse_stereomers', (['ste_tsg'], {}), '(ste_tsg)\n', (36544, 36553), False, 'from automol import graph\n'), ((36575, 36616), 'automol.graph.ts.compatible_reverse_stereomers', 'graph.ts.compatible_reverse_stereomers', (['r'], {}), '(r)\n', (36613, 36616), False, 'from automol import graph\n'), ((36780, 36827), 'automol.graph.ts.compatible_reverse_stereomers', 'graph.ts.compatible_reverse_stereomers', (['ste_tsg'], {}), '(ste_tsg)\n', (36818, 36827), False, 'from automol import graph\n'), ((36849, 36890), 'automol.graph.ts.compatible_reverse_stereomers', 'graph.ts.compatible_reverse_stereomers', (['r'], {}), '(r)\n', (36887, 36890), False, 'from automol import graph\n')]
|
import botbowl
from botbowl.core import Action, Agent
import numpy as np
from copy import deepcopy
import random
import time
from botbowl.core.model import Team
PRINT = False
IGNORE_IN_GAME = [botbowl.ActionType.PLACE_PLAYER, botbowl.ActionType.END_SETUP, botbowl.ActionType.SETUP_FORMATION_SPREAD,
botbowl.ActionType.SETUP_FORMATION_LINE, botbowl.ActionType.SETUP_FORMATION_WEDGE, botbowl.ActionType.SETUP_FORMATION_ZONE]
class Node:
def __init__(self, action=None, parent=None, C=4):
self.parent = parent
self.children = []
self.action = action
self.evaluations = []
self.C = C
self.n_wins = 0
self.n_sims = 0
def UTC(self, root):
if self.n_sims != 0:
return self.n_wins / self.n_sims + self.C * (np.sqrt(np.log(root.n_sims) / self.n_sims))
else:
return float('inf')
def extract_children(self, game: botbowl.Game):
for action_choice in game.get_available_actions():
for player in action_choice.players:
self.children.append(
Node(Action(action_choice.action_type, player=player), parent=self))
for position in action_choice.positions:
self.children.append(
Node(Action(action_choice.action_type, position=position), parent=self))
if len(action_choice.players) == len(action_choice.positions) == 0:
self.children.append(
Node(Action(action_choice.action_type), parent=self))
return self
class SearchBot(botbowl.Agent):
def __init__(self, name, budget=10, time_budget=5, seed=None):
super().__init__(name)
self.my_team = None
self.budget = budget
self.time_budget = time_budget
self.path = []
self.last_action = None
def new_game(self, game, team):
print("NEW GAME woop woop")
self.my_team = team
def end_game(self, game: botbowl.Game):
# game._end_game()
print("END GAME")
pass
def selection(self, node: Node) -> Node:
return node.children[np.argmax([n.UTC(node) for n in node.children])]
def rollout(self, game: botbowl.Game, node: Node):
step_before_rollout = game.get_step()
if PRINT:
print(
f'condition 1: {not game.state.game_over and len(node.children) == 0}')
while not game.state.game_over and len(node.children) == 0:
action = np.random.choice(
node.extract_children(game).children).action
# if True:
# print('---------------->', action)
if action.action_type != botbowl.ActionType.PLACE_PLAYER:
game.step(action)
win = game.get_winner()
if PRINT:
print(f'winner: {win}')
if win == None:
# DRAW -- score is zero
score = -1
elif win == self:
score = 10
else:
score = -5
game.revert(step_before_rollout) # not sure if necessary
return score
def expand(self, game: botbowl.Game, node: Node):
game.step(node.action)
self.path.append(node)
node.extract_children(game=game)
def backpropagate(self, score, node: Node):
for n in range(len(self.path)):
self.path[n].n_sims += 1
self.path[n].n_wins += score
node.n_sims += 1
node.n_wins += score
def act(self, game: botbowl.Game):
game_copy = deepcopy(game)
game_copy.enable_forward_model()
game_copy.home_agent.human = True
game_copy.away_agent.human = True
root_step = game_copy.get_step()
root_node = Node()
available_actions = [
elem.action_type for elem in game_copy.get_available_actions()]
if PRINT:
print(available_actions)
# if we only have one action, return it, no need to choose what the best action can be
# if len(available_actions) == 1:
# return Action(available_actions[0])
# handle placing ball randomly on board
if len(available_actions) == 1:
if available_actions[0] == botbowl.ActionType.PLACE_BALL:
if PRINT:
print(
f'positions: {game_copy.get_available_actions()[0].positions}')
return Action(botbowl.ActionType.PLACE_BALL, position=np.random.choice(game.get_available_actions()[0].positions))
# else:
# print(f'single action is: {available_actions[0]}')
# input()
# handle heads or tail
if botbowl.ActionType.HEADS in available_actions or botbowl.ActionType.TAILS in available_actions:
return np.random.choice([Action(botbowl.ActionType.HEADS), Action(botbowl.ActionType.TAILS)])
# handle kick or receive
if botbowl.ActionType.KICK in available_actions or botbowl.ActionType.RECEIVE in available_actions:
# return np.random.choice([Action(botbowl.ActionType.KICK), Action(botbowl.ActionType.RECEIVE)])
return Action(botbowl.ActionType.KICK) # TODO remove
# handle the action to setup the bot team
if botbowl.ActionType.PLACE_PLAYER in available_actions or botbowl.ActionType.END_SETUP in available_actions or botbowl.ActionType.SETUP_FORMATION_SPREAD in available_actions or botbowl.ActionType.SETUP_FORMATION_WEDGE in available_actions:
available_actions.remove(botbowl.ActionType.PLACE_PLAYER)
for elem in game_copy.get_players_on_pitch(team=self.my_team):
return Action(botbowl.ActionType.END_SETUP)
available_actions.remove(botbowl.ActionType.END_SETUP)
return Action(np.random.choice(available_actions))
if game.get_ball().on_ground and botbowl.ActionType.MOVE in available_actions and self.last_action == botbowl.ActionType.START_MOVE:
return Action(botbowl.ActionType.MOVE, game.get_ball().position,
player=np.random.choice(game.get_players_on_pitch(team=self.my_team)))
root_node.extract_children(game=game_copy)
start = time.time()
for i in range(self.budget):
# while time.time() - start < self.time_budget:
# selection of node
node = self.selection(root_node)
self.path = [root_node]
while True:
if node.n_sims == 0:
score = self.rollout(game=game_copy, node=node)
self.backpropagate(score=score, node=node)
break
else:
self.expand(game=game_copy, node=node)
node = self.selection(node)
# if time.time() - start >= self.time_budget:
# break
game_copy.revert(root_step)
self.last_action = root_node.children[np.argmax(
[n.n_wins for n in root_node.children])].action
return self.last_action
# Register the bot to the framework
botbowl.register_bot('MCTS-bot-budget-10', SearchBot)
|
[
"copy.deepcopy",
"numpy.log",
"numpy.argmax",
"time.time",
"botbowl.core.Action",
"numpy.random.choice",
"botbowl.register_bot"
] |
[((7138, 7191), 'botbowl.register_bot', 'botbowl.register_bot', (['"""MCTS-bot-budget-10"""', 'SearchBot'], {}), "('MCTS-bot-budget-10', SearchBot)\n", (7158, 7191), False, 'import botbowl\n'), ((3561, 3575), 'copy.deepcopy', 'deepcopy', (['game'], {}), '(game)\n', (3569, 3575), False, 'from copy import deepcopy\n'), ((6246, 6257), 'time.time', 'time.time', ([], {}), '()\n', (6255, 6257), False, 'import time\n'), ((5180, 5211), 'botbowl.core.Action', 'Action', (['botbowl.ActionType.KICK'], {}), '(botbowl.ActionType.KICK)\n', (5186, 5211), False, 'from botbowl.core import Action, Agent\n'), ((5695, 5731), 'botbowl.core.Action', 'Action', (['botbowl.ActionType.END_SETUP'], {}), '(botbowl.ActionType.END_SETUP)\n', (5701, 5731), False, 'from botbowl.core import Action, Agent\n'), ((5825, 5860), 'numpy.random.choice', 'np.random.choice', (['available_actions'], {}), '(available_actions)\n', (5841, 5860), True, 'import numpy as np\n'), ((6996, 7045), 'numpy.argmax', 'np.argmax', (['[n.n_wins for n in root_node.children]'], {}), '([n.n_wins for n in root_node.children])\n', (7005, 7045), True, 'import numpy as np\n'), ((4841, 4873), 'botbowl.core.Action', 'Action', (['botbowl.ActionType.HEADS'], {}), '(botbowl.ActionType.HEADS)\n', (4847, 4873), False, 'from botbowl.core import Action, Agent\n'), ((4875, 4907), 'botbowl.core.Action', 'Action', (['botbowl.ActionType.TAILS'], {}), '(botbowl.ActionType.TAILS)\n', (4881, 4907), False, 'from botbowl.core import Action, Agent\n'), ((1122, 1170), 'botbowl.core.Action', 'Action', (['action_choice.action_type'], {'player': 'player'}), '(action_choice.action_type, player=player)\n', (1128, 1170), False, 'from botbowl.core import Action, Agent\n'), ((1302, 1354), 'botbowl.core.Action', 'Action', (['action_choice.action_type'], {'position': 'position'}), '(action_choice.action_type, position=position)\n', (1308, 1354), False, 'from botbowl.core import Action, Agent\n'), ((1513, 1546), 'botbowl.core.Action', 'Action', (['action_choice.action_type'], {}), '(action_choice.action_type)\n', (1519, 1546), False, 'from botbowl.core import Action, Agent\n'), ((816, 835), 'numpy.log', 'np.log', (['root.n_sims'], {}), '(root.n_sims)\n', (822, 835), True, 'import numpy as np\n')]
|
import math
import matplotlib
import numpy as np
from typing import Sequence
from PIL import Image
from io import BytesIO
from contextlib import contextmanager
from matplotlib.artist import Artist
from matplotlib.axes import Axes
from figpptx.slide_editor import SlideTransformer, Box
def to_image(arg, **kwargs):
if isinstance(arg, matplotlib.figure.Figure):
return fig_to_image(arg, **kwargs)
elif isinstance(arg, Axes):
is_tight = kwargs.pop("is_tight", True)
return ax_to_image(arg, is_tight, **kwargs)
elif isinstance(arg, Artist):
return artists_to_image(arg)
elif isinstance(arg, Image.Image):
return arg.copy()
if isinstance(arg, Sequence):
if all(isinstance(elem, Artist) for elem in arg):
return artists_to_image(arg)
else:
raise ValueError("All elements must be ``Artist``.")
raise ValueError(f"``{arg}`` cannot be converted to image.")
def fig_to_image(fig, **kwargs):
"""Convert ``matplotlib.Figure`` to ``PIL.Image``.
Args:
kwargs (str):
Keyword parameters for ``Figure.savefig`` except ``fname``.
"""
# Ref: https://stackoverflow.com/questions/8598673/how-to-save-a-pylab-figure-into-in-memory-file-which-can-be-read-into-pil-image/8598881 # NOQA
kwargs["format"] = kwargs.get("format", "png")
kwargs["transparent"] = kwargs.get("transparent", True)
buf = BytesIO()
fig.savefig(buf, **kwargs)
buf.seek(0)
image = Image.open(buf).copy()
buf.close()
return image
def ax_to_image(ax, is_tight=True, **kwargs):
"""Convert ``matplotlib.Axes`` to ``PIL.Image``."""
kwargs["transparent"] = kwargs.get("transparent", True)
fig = ax.figure
artists = fig.get_children() # [TODO] Check ``get_axes`` is more apt?
with _store_visibility(artists):
for artist in artists:
if artist is not ax:
artist.set_visible(False)
image = fig_to_image(fig, **kwargs)
if is_tight:
image = _crop_image(image, ax)
bbox = ax.get_tightbbox(fig.canvas.get_renderer())
xmin, xmax = math.floor(bbox.xmin), math.ceil(bbox.xmax)
ymin, ymax = math.floor(bbox.ymin), math.ceil(bbox.ymax)
image = image.crop([xmin, ymin, xmax, ymax])
return image
def artists_to_image(artists, is_tight=True, **kwargs):
if isinstance(artists, Artist):
artists = [artists]
if not artists:
raise ValueError("``Empty Collection of Artists.``")
# Check whether the all belongs to the same figure.
figures = [artist.get_figure() for artist in artists]
figures = [figure for figure in figures if figure]
figures = set(figures)
if not figures:
raise ValueError("Figure does not exist.")
elif 1 < len(figures):
ValueError("All the ``Artists`` must belong to the same Figure.")
figure = list(figures)[0]
target_pairs = sum([_get_artist_pairs(artist) for artist in artists], [])
target_ids = {id(pair[0]) for pair in target_pairs}
pairs = _get_artist_pairs(figure)
leaf_artists = [artist for artist, has_child in pairs if not has_child]
with _store_visibility(leaf_artists):
for artist in leaf_artists:
if id(artist) not in target_ids:
artist.set_visible(False)
image = fig_to_image(figure, **kwargs)
if is_tight:
image = _crop_image(image, artists)
return image
def _get_artist_pairs(root):
result = list()
def _inner(artist):
children = artist.get_children()
has_child = True if children else False
for child in children:
_inner(child)
pair = (artist, has_child)
result.append(pair)
_inner(root)
return result
def _get_bbox(image):
"""
(2020-01-12)
``Image.getbbox()`` does not seem to work intendedly. (Really?)
So, substitution is implemented.
"""
assert image.mode == "RGBA"
width, height = image.size
array = np.array(image)
alpha = array[:, :, -1]
ys, xs = np.where(alpha != 0)
xmin, xmax = np.min(xs) - 1, np.max(xs) + 1
ymin, ymax = np.min(ys) - 1, np.max(ys) + 1
xmin = np.clip(xmin, 0, width)
xmax = np.clip(xmax, 0, width)
ymin = np.clip(ymin, 0, height)
ymax = np.clip(ymax, 0, height)
return xmin, ymin, xmax, ymax
def _crop_image(fig_image, artist):
"""Crop the ``fig_image`` so that only ROI of ``target`` remains."""
width, height = fig_image.size
from figpptx import artist_misc
transformer = SlideTransformer(0, 0, size=(width, height), offset=(0, 0))
if isinstance(artist, Axes):
fig = artist_misc.to_figure(artist)
renderer = fig.canvas.get_renderer()
bbox = artist.get_tightbbox(renderer)
vertices = transformer.transform(bbox)
box = Box.from_vertices(vertices)
elif isinstance(artist, Artist):
box = transformer.get_box(artist)
elif isinstance(artist, Sequence):
boxes = [transformer.get_box(elem) for elem in artist]
box = Box.union(boxes)
else:
raise ValueError("Argument Error.", artist)
xmin, xmax = math.floor(box.left), math.ceil(box.left + box.width)
ymin, ymax = math.floor(box.top), math.ceil(box.top + box.height)
xmin, xmax = max(0, xmin), min(xmax, width - 1)
ymin, ymax = max(0, ymin), min(ymax, height - 1)
image = fig_image.crop([xmin, ymin, xmax + 1, ymax + 1])
return image
@contextmanager
def _store_visibility(artists):
stored = dict()
for artist in artists:
stored[id(artist)] = artist.get_visible()
def _restore():
for artist in artists:
artist.set_visible(stored[id(artist)])
try:
yield
except Exception as e:
_restore()
raise e
else:
_restore()
if __name__ == "__main__":
pass
|
[
"io.BytesIO",
"math.ceil",
"figpptx.slide_editor.Box.from_vertices",
"math.floor",
"numpy.clip",
"figpptx.artist_misc.to_figure",
"PIL.Image.open",
"numpy.min",
"numpy.where",
"numpy.array",
"numpy.max",
"figpptx.slide_editor.Box.union",
"figpptx.slide_editor.SlideTransformer"
] |
[((1432, 1441), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (1439, 1441), False, 'from io import BytesIO\n'), ((4024, 4039), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (4032, 4039), True, 'import numpy as np\n'), ((4081, 4101), 'numpy.where', 'np.where', (['(alpha != 0)'], {}), '(alpha != 0)\n', (4089, 4101), True, 'import numpy as np\n'), ((4209, 4232), 'numpy.clip', 'np.clip', (['xmin', '(0)', 'width'], {}), '(xmin, 0, width)\n', (4216, 4232), True, 'import numpy as np\n'), ((4244, 4267), 'numpy.clip', 'np.clip', (['xmax', '(0)', 'width'], {}), '(xmax, 0, width)\n', (4251, 4267), True, 'import numpy as np\n'), ((4279, 4303), 'numpy.clip', 'np.clip', (['ymin', '(0)', 'height'], {}), '(ymin, 0, height)\n', (4286, 4303), True, 'import numpy as np\n'), ((4315, 4339), 'numpy.clip', 'np.clip', (['ymax', '(0)', 'height'], {}), '(ymax, 0, height)\n', (4322, 4339), True, 'import numpy as np\n'), ((4576, 4635), 'figpptx.slide_editor.SlideTransformer', 'SlideTransformer', (['(0)', '(0)'], {'size': '(width, height)', 'offset': '(0, 0)'}), '(0, 0, size=(width, height), offset=(0, 0))\n', (4592, 4635), False, 'from figpptx.slide_editor import SlideTransformer, Box\n'), ((4683, 4712), 'figpptx.artist_misc.to_figure', 'artist_misc.to_figure', (['artist'], {}), '(artist)\n', (4704, 4712), False, 'from figpptx import artist_misc\n'), ((4865, 4892), 'figpptx.slide_editor.Box.from_vertices', 'Box.from_vertices', (['vertices'], {}), '(vertices)\n', (4882, 4892), False, 'from figpptx.slide_editor import SlideTransformer, Box\n'), ((5185, 5205), 'math.floor', 'math.floor', (['box.left'], {}), '(box.left)\n', (5195, 5205), False, 'import math\n'), ((5207, 5238), 'math.ceil', 'math.ceil', (['(box.left + box.width)'], {}), '(box.left + box.width)\n', (5216, 5238), False, 'import math\n'), ((5256, 5275), 'math.floor', 'math.floor', (['box.top'], {}), '(box.top)\n', (5266, 5275), False, 'import math\n'), ((5277, 5308), 'math.ceil', 'math.ceil', (['(box.top + box.height)'], {}), '(box.top + box.height)\n', (5286, 5308), False, 'import math\n'), ((1501, 1516), 'PIL.Image.open', 'Image.open', (['buf'], {}), '(buf)\n', (1511, 1516), False, 'from PIL import Image\n'), ((2143, 2164), 'math.floor', 'math.floor', (['bbox.xmin'], {}), '(bbox.xmin)\n', (2153, 2164), False, 'import math\n'), ((2166, 2186), 'math.ceil', 'math.ceil', (['bbox.xmax'], {}), '(bbox.xmax)\n', (2175, 2186), False, 'import math\n'), ((2208, 2229), 'math.floor', 'math.floor', (['bbox.ymin'], {}), '(bbox.ymin)\n', (2218, 2229), False, 'import math\n'), ((2231, 2251), 'math.ceil', 'math.ceil', (['bbox.ymax'], {}), '(bbox.ymax)\n', (2240, 2251), False, 'import math\n'), ((4119, 4129), 'numpy.min', 'np.min', (['xs'], {}), '(xs)\n', (4125, 4129), True, 'import numpy as np\n'), ((4135, 4145), 'numpy.max', 'np.max', (['xs'], {}), '(xs)\n', (4141, 4145), True, 'import numpy as np\n'), ((4167, 4177), 'numpy.min', 'np.min', (['ys'], {}), '(ys)\n', (4173, 4177), True, 'import numpy as np\n'), ((4183, 4193), 'numpy.max', 'np.max', (['ys'], {}), '(ys)\n', (4189, 4193), True, 'import numpy as np\n'), ((5088, 5104), 'figpptx.slide_editor.Box.union', 'Box.union', (['boxes'], {}), '(boxes)\n', (5097, 5104), False, 'from figpptx.slide_editor import SlideTransformer, Box\n')]
|
import pytest
import numpy as np
from numpy.testing import assert_, run_module_suite
from qutip import (smesolve, mesolve, photocurrent_mesolve, liouvillian,
QobjEvo, spre, spost, destroy, coherent, parallel_map,
qeye, fock_dm, general_stochastic, ket2dm, num)
def f(t, args):
return args["a"] * t
@pytest.mark.slow
def test_smesolve_homodyne_methods():
"Stochastic: smesolve: homodyne methods with single jump operator"
def arccoth(x):
return 0.5*np.log((1.+x)/(x-1.))
th = 0.1 # Interaction parameter
alpha = np.cos(th)
beta = np.sin(th)
gamma = 1.
N = 30 # number of Fock states
Id = qeye(N)
a = destroy(N)
s = 0.5*((alpha+beta)*a + (alpha-beta)*a.dag())
x = (a + a.dag()) * 2**-0.5
H = Id
c_op = [gamma**0.5 * a]
sc_op = [s]
e_op = [x, x*x]
rho0 = fock_dm(N,0) # initial vacuum state
T = 3. # final time
# number of time steps for which we save the expectation values
N_store = 121
Nsub = 10
tlist = np.linspace(0, T, N_store)
ddt = (tlist[1]-tlist[0])
#### Analytic solution
y0 = 0.5
A = (gamma**2 + alpha**2 * (beta**2 + 4*gamma) - 2*alpha*beta*gamma)**0.5
B = arccoth((-4*alpha**2*y0 + alpha*beta - gamma)/A)
y_an = (alpha*beta - gamma + A / np.tanh(0.5*A*tlist - B))/(4*alpha**2)
list_methods_tol = [['euler-maruyama', 2e-2],
['pc-euler', 2e-3],
['pc-euler-2', 2e-3],
['platen', 1e-3],
['milstein', 1e-3],
['milstein-imp', 1e-3],
['rouchon', 1e-3],
['taylor1.5', 1e-4],
['taylor1.5-imp', 1e-4],
['explicit1.5', 1e-4],
['taylor2.0', 1e-4]]
for n_method in list_methods_tol:
sol = smesolve(H, rho0, tlist, c_op, sc_op, e_op,
nsubsteps=Nsub, method='homodyne', solver = n_method[0])
sol2 = smesolve(H, rho0, tlist, c_op, sc_op, e_op, store_measurement=0,
nsubsteps=Nsub, method='homodyne', solver = n_method[0],
noise = sol.noise)
sol3 = smesolve(H, rho0, tlist, c_op, sc_op, e_op,
nsubsteps=Nsub*5, method='homodyne',
solver = n_method[0], tol=1e-8)
err = 1/T * np.sum(np.abs(y_an - \
(sol.expect[1]-sol.expect[0]*sol.expect[0].conj())))*ddt
err3 = 1/T * np.sum(np.abs(y_an - \
(sol3.expect[1]-sol3.expect[0]*sol3.expect[0].conj())))*ddt
print(n_method[0], ': deviation =', err, ', tol =', n_method[1])
assert_(err < n_method[1])
# 5* more substep should decrease the error
assert_(err3 < err)
# just to check that noise is not affected by smesolve
assert_(np.all(sol.noise == sol2.noise))
assert_(np.all(sol.expect[0] == sol2.expect[0]))
sol = smesolve(H, rho0, tlist[:2], c_op, sc_op, e_op, noise=10, ntraj=2,
nsubsteps=Nsub, method='homodyne', solver='euler',
store_measurement=1)
sol2 = smesolve(H, rho0, tlist[:2], c_op, sc_op, e_op, noise=10, ntraj=2,
nsubsteps=Nsub, method='homodyne', solver='euler',
store_measurement=0)
sol3 = smesolve(H, rho0, tlist[:2], c_op, sc_op, e_op, noise=11, ntraj=2,
nsubsteps=Nsub, method='homodyne', solver='euler')
# sol and sol2 have the same seed, sol3 differ.
assert_(np.all(sol.noise == sol2.noise))
assert_(np.all(sol.noise != sol3.noise))
assert_(not np.all(sol.measurement[0] == 0.+0j))
assert_(np.all(sol2.measurement[0] == 0.+0j))
sol = smesolve(H, rho0, tlist[:2], c_op, sc_op, e_op, noise=np.array([1,2]),
ntraj=2, nsubsteps=Nsub, method='homodyne', solver='euler')
sol2 = smesolve(H, rho0, tlist[:2], c_op, sc_op, e_op, noise=np.array([2,1]),
ntraj=2, nsubsteps=Nsub, method='homodyne', solver='euler')
# sol and sol2 have the seed of traj 1 and 2 reversed.
assert_(np.all(sol.noise[0,:,:,:] == sol2.noise[1,:,:,:]))
assert_(np.all(sol.noise[1,:,:,:] == sol2.noise[0,:,:,:]))
def test_smesolve_photocurrent():
"Stochastic: photocurrent_mesolve"
tol = 0.01
N = 4
gamma = 0.25
ntraj = 20
nsubsteps = 100
a = destroy(N)
H = [[a.dag() * a,f]]
psi0 = coherent(N, 0.5)
sc_ops = [np.sqrt(gamma) * a, np.sqrt(gamma) * a * 0.5]
e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())]
times = np.linspace(0, 1.0, 21)
res_ref = mesolve(H, psi0, times, sc_ops, e_ops, args={"a":2})
res = photocurrent_mesolve(H, psi0, times, [], sc_ops, e_ops, args={"a":2},
ntraj=ntraj, nsubsteps=nsubsteps, store_measurement=True,
map_func=parallel_map)
assert_(all([np.mean(abs(res.expect[idx] - res_ref.expect[idx])) < tol
for idx in range(len(e_ops))]))
assert_(len(res.measurement) == ntraj)
assert_(all([m.shape == (len(times), len(sc_ops))
for m in res.measurement]))
def test_smesolve_homodyne():
"Stochastic: smesolve: homodyne, time-dependent H"
tol = 0.01
N = 4
gamma = 0.25
ntraj = 20
nsubsteps = 100
a = destroy(N)
H = [[a.dag() * a,f]]
psi0 = coherent(N, 0.5)
sc_ops = [np.sqrt(gamma) * a, np.sqrt(gamma) * a * 0.5]
e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())]
times = np.linspace(0, 1.0, 21)
res_ref = mesolve(H, psi0, times, sc_ops, e_ops, args={"a":2})
list_methods_tol = ['euler-maruyama',
'pc-euler',
'pc-euler-2',
'platen',
'milstein',
'milstein-imp',
'rouchon',
'taylor15',
'taylor15-imp',
'explicit15']
for solver in list_methods_tol:
res = smesolve(H, psi0, times, [], sc_ops, e_ops,
ntraj=ntraj, nsubsteps=nsubsteps, args={"a":2},
method='homodyne', store_measurement=True,
solver=solver, map_func=parallel_map)
assert_(all([np.mean(abs(res.expect[idx] - res_ref.expect[idx])) < tol
for idx in range(len(e_ops))]))
assert_(len(res.measurement) == ntraj)
assert_(all([m.shape == (len(times), len(sc_ops))
for m in res.measurement]))
@pytest.mark.slow
def test_smesolve_heterodyne():
"Stochastic: smesolve: heterodyne, time-dependent H"
tol = 0.01
N = 4
gamma = 0.25
ntraj = 20
nsubsteps = 100
a = destroy(N)
H = [[a.dag() * a, f]]
psi0 = coherent(N, 0.5)
sc_ops = [np.sqrt(gamma) * a, np.sqrt(gamma) * a * 0.5]
e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())]
times = np.linspace(0, 1.0, 21)
res_ref = mesolve(H, psi0, times, sc_ops, e_ops, args={"a":2})
list_methods_tol = ['euler-maruyama',
'pc-euler',
'pc-euler-2',
'platen',
'milstein',
'milstein-imp',
'rouchon',
'taylor15',
'taylor15-imp',
'explicit15']
for solver in list_methods_tol:
res = smesolve(H, psi0, times, [], sc_ops, e_ops,
ntraj=ntraj, nsubsteps=nsubsteps, args={"a":2},
method='heterodyne', store_measurement=True,
solver=solver, map_func=parallel_map)
assert_(all([np.mean(abs(res.expect[idx] - res_ref.expect[idx])) < tol
for idx in range(len(e_ops))]))
assert_(len(res.measurement) == ntraj)
assert_(all([m.shape == (len(times), len(sc_ops), 2)
for m in res.measurement]))
@pytest.mark.slow
def test_general_stochastic():
"Stochastic: general_stochastic"
"Reproduce smesolve homodyne"
tol = 0.025
N = 4
gamma = 0.25
ntraj = 20
nsubsteps = 50
a = destroy(N)
H = [[a.dag() * a,f]]
psi0 = coherent(N, 0.5)
sc_ops = [np.sqrt(gamma) * a, np.sqrt(gamma) * a * 0.5]
e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())]
L = liouvillian(QobjEvo([[a.dag() * a,f]], args={"a":2}), c_ops = sc_ops)
L.compile()
sc_opsM = [QobjEvo(spre(op) + spost(op.dag())) for op in sc_ops]
[op.compile() for op in sc_opsM]
e_opsM = [spre(op) for op in e_ops]
def d1(t, vec):
return L.mul_vec(t,vec)
def d2(t, vec):
out = []
for op in sc_opsM:
out.append(op.mul_vec(t,vec)-op.expect(t,vec)*vec)
return np.stack(out)
times = np.linspace(0, 0.5, 13)
res_ref = mesolve(H, psi0, times, sc_ops, e_ops, args={"a":2})
list_methods_tol = ['euler-maruyama',
'platen',
'explicit15']
for solver in list_methods_tol:
res = general_stochastic(ket2dm(psi0),times,d1,d2,len_d2=2, e_ops=e_opsM,
normalize=False, ntraj=ntraj, nsubsteps=nsubsteps,
solver=solver)
assert_(all([np.mean(abs(res.expect[idx] - res_ref.expect[idx])) < tol
for idx in range(len(e_ops))]))
assert_(len(res.measurement) == ntraj)
def f_dargs(a, args):
return args["expect_op_3"] - 1
def test_ssesolve_feedback():
"Stochastic: ssesolve: time-dependent H with feedback"
tol = 0.01
N = 4
ntraj = 10
nsubsteps = 100
a = destroy(N)
H = [num(N)]
psi0 = coherent(N, 2.5)
sc_ops = [[a + a.dag(), f_dargs]]
e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag()), qeye(N)]
times = np.linspace(0, 10, 101)
res_ref = mesolve(H, psi0, times, sc_ops, e_ops,
args={"expect_op_3":qeye(N)})
res = smesolve(H, psi0, times, sc_ops=sc_ops, e_ops=e_ops, noise=1,
ntraj=ntraj, nsubsteps=nsubsteps, method='homodyne',
map_func=parallel_map, args={"expect_op_3":qeye(N)})
print(all([np.mean(abs(res.expect[idx] - res_ref.expect[idx])) < tol
for idx in range(len(e_ops))]))
if __name__ == "__main__":
run_module_suite()
|
[
"qutip.num",
"qutip.coherent",
"numpy.sin",
"qutip.destroy",
"qutip.fock_dm",
"numpy.testing.run_module_suite",
"numpy.linspace",
"qutip.photocurrent_mesolve",
"qutip.mesolve",
"qutip.spre",
"numpy.stack",
"qutip.smesolve",
"numpy.tanh",
"qutip.qeye",
"numpy.testing.assert_",
"numpy.cos",
"numpy.all",
"numpy.log",
"qutip.ket2dm",
"numpy.array",
"numpy.sqrt"
] |
[((583, 593), 'numpy.cos', 'np.cos', (['th'], {}), '(th)\n', (589, 593), True, 'import numpy as np\n'), ((605, 615), 'numpy.sin', 'np.sin', (['th'], {}), '(th)\n', (611, 615), True, 'import numpy as np\n'), ((692, 699), 'qutip.qeye', 'qeye', (['N'], {}), '(N)\n', (696, 699), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((708, 718), 'qutip.destroy', 'destroy', (['N'], {}), '(N)\n', (715, 718), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((889, 902), 'qutip.fock_dm', 'fock_dm', (['N', '(0)'], {}), '(N, 0)\n', (896, 902), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((1085, 1111), 'numpy.linspace', 'np.linspace', (['(0)', 'T', 'N_store'], {}), '(0, T, N_store)\n', (1096, 1111), True, 'import numpy as np\n'), ((3065, 3207), 'qutip.smesolve', 'smesolve', (['H', 'rho0', 'tlist[:2]', 'c_op', 'sc_op', 'e_op'], {'noise': '(10)', 'ntraj': '(2)', 'nsubsteps': 'Nsub', 'method': '"""homodyne"""', 'solver': '"""euler"""', 'store_measurement': '(1)'}), "(H, rho0, tlist[:2], c_op, sc_op, e_op, noise=10, ntraj=2,\n nsubsteps=Nsub, method='homodyne', solver='euler', store_measurement=1)\n", (3073, 3207), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((3255, 3397), 'qutip.smesolve', 'smesolve', (['H', 'rho0', 'tlist[:2]', 'c_op', 'sc_op', 'e_op'], {'noise': '(10)', 'ntraj': '(2)', 'nsubsteps': 'Nsub', 'method': '"""homodyne"""', 'solver': '"""euler"""', 'store_measurement': '(0)'}), "(H, rho0, tlist[:2], c_op, sc_op, e_op, noise=10, ntraj=2,\n nsubsteps=Nsub, method='homodyne', solver='euler', store_measurement=0)\n", (3263, 3397), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((3445, 3566), 'qutip.smesolve', 'smesolve', (['H', 'rho0', 'tlist[:2]', 'c_op', 'sc_op', 'e_op'], {'noise': '(11)', 'ntraj': '(2)', 'nsubsteps': 'Nsub', 'method': '"""homodyne"""', 'solver': '"""euler"""'}), "(H, rho0, tlist[:2], c_op, sc_op, e_op, noise=11, ntraj=2,\n nsubsteps=Nsub, method='homodyne', solver='euler')\n", (3453, 3566), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((4495, 4505), 'qutip.destroy', 'destroy', (['N'], {}), '(N)\n', (4502, 4505), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((4544, 4560), 'qutip.coherent', 'coherent', (['N', '(0.5)'], {}), '(N, 0.5)\n', (4552, 4560), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((4694, 4717), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)', '(21)'], {}), '(0, 1.0, 21)\n', (4705, 4717), True, 'import numpy as np\n'), ((4732, 4785), 'qutip.mesolve', 'mesolve', (['H', 'psi0', 'times', 'sc_ops', 'e_ops'], {'args': "{'a': 2}"}), "(H, psi0, times, sc_ops, e_ops, args={'a': 2})\n", (4739, 4785), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((4795, 4955), 'qutip.photocurrent_mesolve', 'photocurrent_mesolve', (['H', 'psi0', 'times', '[]', 'sc_ops', 'e_ops'], {'args': "{'a': 2}", 'ntraj': 'ntraj', 'nsubsteps': 'nsubsteps', 'store_measurement': '(True)', 'map_func': 'parallel_map'}), "(H, psi0, times, [], sc_ops, e_ops, args={'a': 2},\n ntraj=ntraj, nsubsteps=nsubsteps, store_measurement=True, map_func=\n parallel_map)\n", (4815, 4955), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((5425, 5435), 'qutip.destroy', 'destroy', (['N'], {}), '(N)\n', (5432, 5435), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((5474, 5490), 'qutip.coherent', 'coherent', (['N', '(0.5)'], {}), '(N, 0.5)\n', (5482, 5490), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((5624, 5647), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)', '(21)'], {}), '(0, 1.0, 21)\n', (5635, 5647), True, 'import numpy as np\n'), ((5662, 5715), 'qutip.mesolve', 'mesolve', (['H', 'psi0', 'times', 'sc_ops', 'e_ops'], {'args': "{'a': 2}"}), "(H, psi0, times, sc_ops, e_ops, args={'a': 2})\n", (5669, 5715), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((6863, 6873), 'qutip.destroy', 'destroy', (['N'], {}), '(N)\n', (6870, 6873), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((6913, 6929), 'qutip.coherent', 'coherent', (['N', '(0.5)'], {}), '(N, 0.5)\n', (6921, 6929), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((7063, 7086), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)', '(21)'], {}), '(0, 1.0, 21)\n', (7074, 7086), True, 'import numpy as np\n'), ((7101, 7154), 'qutip.mesolve', 'mesolve', (['H', 'psi0', 'times', 'sc_ops', 'e_ops'], {'args': "{'a': 2}"}), "(H, psi0, times, sc_ops, e_ops, args={'a': 2})\n", (7108, 7154), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((8319, 8329), 'qutip.destroy', 'destroy', (['N'], {}), '(N)\n', (8326, 8329), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((8368, 8384), 'qutip.coherent', 'coherent', (['N', '(0.5)'], {}), '(N, 0.5)\n', (8376, 8384), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((8969, 8992), 'numpy.linspace', 'np.linspace', (['(0)', '(0.5)', '(13)'], {}), '(0, 0.5, 13)\n', (8980, 8992), True, 'import numpy as np\n'), ((9007, 9060), 'qutip.mesolve', 'mesolve', (['H', 'psi0', 'times', 'sc_ops', 'e_ops'], {'args': "{'a': 2}"}), "(H, psi0, times, sc_ops, e_ops, args={'a': 2})\n", (9014, 9060), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((9809, 9819), 'qutip.destroy', 'destroy', (['N'], {}), '(N)\n', (9816, 9819), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((9849, 9865), 'qutip.coherent', 'coherent', (['N', '(2.5)'], {}), '(N, 2.5)\n', (9857, 9865), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((9986, 10009), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(101)'], {}), '(0, 10, 101)\n', (9997, 10009), True, 'import numpy as np\n'), ((10486, 10504), 'numpy.testing.run_module_suite', 'run_module_suite', ([], {}), '()\n', (10502, 10504), False, 'from numpy.testing import assert_, run_module_suite\n'), ((1951, 2054), 'qutip.smesolve', 'smesolve', (['H', 'rho0', 'tlist', 'c_op', 'sc_op', 'e_op'], {'nsubsteps': 'Nsub', 'method': '"""homodyne"""', 'solver': 'n_method[0]'}), "(H, rho0, tlist, c_op, sc_op, e_op, nsubsteps=Nsub, method=\n 'homodyne', solver=n_method[0])\n", (1959, 2054), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((2090, 2231), 'qutip.smesolve', 'smesolve', (['H', 'rho0', 'tlist', 'c_op', 'sc_op', 'e_op'], {'store_measurement': '(0)', 'nsubsteps': 'Nsub', 'method': '"""homodyne"""', 'solver': 'n_method[0]', 'noise': 'sol.noise'}), "(H, rho0, tlist, c_op, sc_op, e_op, store_measurement=0, nsubsteps=\n Nsub, method='homodyne', solver=n_method[0], noise=sol.noise)\n", (2098, 2231), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((2292, 2410), 'qutip.smesolve', 'smesolve', (['H', 'rho0', 'tlist', 'c_op', 'sc_op', 'e_op'], {'nsubsteps': '(Nsub * 5)', 'method': '"""homodyne"""', 'solver': 'n_method[0]', 'tol': '(1e-08)'}), "(H, rho0, tlist, c_op, sc_op, e_op, nsubsteps=Nsub * 5, method=\n 'homodyne', solver=n_method[0], tol=1e-08)\n", (2300, 2410), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((2778, 2804), 'numpy.testing.assert_', 'assert_', (['(err < n_method[1])'], {}), '(err < n_method[1])\n', (2785, 2804), False, 'from numpy.testing import assert_, run_module_suite\n'), ((2865, 2884), 'numpy.testing.assert_', 'assert_', (['(err3 < err)'], {}), '(err3 < err)\n', (2872, 2884), False, 'from numpy.testing import assert_, run_module_suite\n'), ((3647, 3678), 'numpy.all', 'np.all', (['(sol.noise == sol2.noise)'], {}), '(sol.noise == sol2.noise)\n', (3653, 3678), True, 'import numpy as np\n'), ((3692, 3723), 'numpy.all', 'np.all', (['(sol.noise != sol3.noise)'], {}), '(sol.noise != sol3.noise)\n', (3698, 3723), True, 'import numpy as np\n'), ((3790, 3831), 'numpy.all', 'np.all', (['(sol2.measurement[0] == 0.0 + 0.0j)'], {}), '(sol2.measurement[0] == 0.0 + 0.0j)\n', (3796, 3831), True, 'import numpy as np\n'), ((4220, 4275), 'numpy.all', 'np.all', (['(sol.noise[0, :, :, :] == sol2.noise[1, :, :, :])'], {}), '(sol.noise[0, :, :, :] == sol2.noise[1, :, :, :])\n', (4226, 4275), True, 'import numpy as np\n'), ((4283, 4338), 'numpy.all', 'np.all', (['(sol.noise[1, :, :, :] == sol2.noise[0, :, :, :])'], {}), '(sol.noise[1, :, :, :] == sol2.noise[0, :, :, :])\n', (4289, 4338), True, 'import numpy as np\n'), ((6140, 6322), 'qutip.smesolve', 'smesolve', (['H', 'psi0', 'times', '[]', 'sc_ops', 'e_ops'], {'ntraj': 'ntraj', 'nsubsteps': 'nsubsteps', 'args': "{'a': 2}", 'method': '"""homodyne"""', 'store_measurement': '(True)', 'solver': 'solver', 'map_func': 'parallel_map'}), "(H, psi0, times, [], sc_ops, e_ops, ntraj=ntraj, nsubsteps=\n nsubsteps, args={'a': 2}, method='homodyne', store_measurement=True,\n solver=solver, map_func=parallel_map)\n", (6148, 6322), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((7579, 7763), 'qutip.smesolve', 'smesolve', (['H', 'psi0', 'times', '[]', 'sc_ops', 'e_ops'], {'ntraj': 'ntraj', 'nsubsteps': 'nsubsteps', 'args': "{'a': 2}", 'method': '"""heterodyne"""', 'store_measurement': '(True)', 'solver': 'solver', 'map_func': 'parallel_map'}), "(H, psi0, times, [], sc_ops, e_ops, ntraj=ntraj, nsubsteps=\n nsubsteps, args={'a': 2}, method='heterodyne', store_measurement=True,\n solver=solver, map_func=parallel_map)\n", (7587, 7763), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((8720, 8728), 'qutip.spre', 'spre', (['op'], {}), '(op)\n', (8724, 8728), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((8942, 8955), 'numpy.stack', 'np.stack', (['out'], {}), '(out)\n', (8950, 8955), True, 'import numpy as np\n'), ((9830, 9836), 'qutip.num', 'num', (['N'], {}), '(N)\n', (9833, 9836), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((9964, 9971), 'qutip.qeye', 'qeye', (['N'], {}), '(N)\n', (9968, 9971), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((511, 540), 'numpy.log', 'np.log', (['((1.0 + x) / (x - 1.0))'], {}), '((1.0 + x) / (x - 1.0))\n', (517, 540), True, 'import numpy as np\n'), ((2964, 2995), 'numpy.all', 'np.all', (['(sol.noise == sol2.noise)'], {}), '(sol.noise == sol2.noise)\n', (2970, 2995), True, 'import numpy as np\n'), ((3013, 3052), 'numpy.all', 'np.all', (['(sol.expect[0] == sol2.expect[0])'], {}), '(sol.expect[0] == sol2.expect[0])\n', (3019, 3052), True, 'import numpy as np\n'), ((3741, 3781), 'numpy.all', 'np.all', (['(sol.measurement[0] == 0.0 + 0.0j)'], {}), '(sol.measurement[0] == 0.0 + 0.0j)\n', (3747, 3781), True, 'import numpy as np\n'), ((3892, 3908), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (3900, 3908), True, 'import numpy as np\n'), ((4053, 4069), 'numpy.array', 'np.array', (['[2, 1]'], {}), '([2, 1])\n', (4061, 4069), True, 'import numpy as np\n'), ((4575, 4589), 'numpy.sqrt', 'np.sqrt', (['gamma'], {}), '(gamma)\n', (4582, 4589), True, 'import numpy as np\n'), ((5505, 5519), 'numpy.sqrt', 'np.sqrt', (['gamma'], {}), '(gamma)\n', (5512, 5519), True, 'import numpy as np\n'), ((6944, 6958), 'numpy.sqrt', 'np.sqrt', (['gamma'], {}), '(gamma)\n', (6951, 6958), True, 'import numpy as np\n'), ((8399, 8413), 'numpy.sqrt', 'np.sqrt', (['gamma'], {}), '(gamma)\n', (8406, 8413), True, 'import numpy as np\n'), ((9243, 9255), 'qutip.ket2dm', 'ket2dm', (['psi0'], {}), '(psi0)\n', (9249, 9255), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((1355, 1383), 'numpy.tanh', 'np.tanh', (['(0.5 * A * tlist - B)'], {}), '(0.5 * A * tlist - B)\n', (1362, 1383), True, 'import numpy as np\n'), ((4595, 4609), 'numpy.sqrt', 'np.sqrt', (['gamma'], {}), '(gamma)\n', (4602, 4609), True, 'import numpy as np\n'), ((5525, 5539), 'numpy.sqrt', 'np.sqrt', (['gamma'], {}), '(gamma)\n', (5532, 5539), True, 'import numpy as np\n'), ((6964, 6978), 'numpy.sqrt', 'np.sqrt', (['gamma'], {}), '(gamma)\n', (6971, 6978), True, 'import numpy as np\n'), ((8419, 8433), 'numpy.sqrt', 'np.sqrt', (['gamma'], {}), '(gamma)\n', (8426, 8433), True, 'import numpy as np\n'), ((8623, 8631), 'qutip.spre', 'spre', (['op'], {}), '(op)\n', (8627, 8631), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((10105, 10112), 'qutip.qeye', 'qeye', (['N'], {}), '(N)\n', (10109, 10112), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n'), ((10321, 10328), 'qutip.qeye', 'qeye', (['N'], {}), '(N)\n', (10325, 10328), False, 'from qutip import smesolve, mesolve, photocurrent_mesolve, liouvillian, QobjEvo, spre, spost, destroy, coherent, parallel_map, qeye, fock_dm, general_stochastic, ket2dm, num\n')]
|
from typing import Dict, List, Union
from typeguard import check_argument_types
import tensorflow as tf
import numpy as np
from neuralmonkey.decoders.autoregressive import AutoregressiveDecoder
from neuralmonkey.decoders.sequence_labeler import SequenceLabeler
from neuralmonkey.decorators import tensor
from neuralmonkey.runners.base_runner import BaseRunner
SupportedDecoders = Union[AutoregressiveDecoder, SequenceLabeler]
class XentRunner(BaseRunner[SupportedDecoders]):
# pylint: disable=too-few-public-methods
# Pylint issue here: https://github.com/PyCQA/pylint/issues/2607
class Executable(BaseRunner.Executable["XentRunner"]):
def collect_results(self, results: List[Dict]) -> None:
xents = np.mean([res["xents"] for res in results], axis=0)
self.set_runner_result(outputs=xents.tolist(),
losses=[float(np.mean(xents))])
# pylint: enable=too-few-public-methods
def __init__(self,
output_series: str,
decoder: SupportedDecoders) -> None:
check_argument_types()
super().__init__(output_series, decoder)
@tensor
def fetches(self) -> Dict[str, tf.Tensor]:
return {"xents": self.decoder.train_xents}
@property
def loss_names(self) -> List[str]:
return ["xent"]
|
[
"numpy.mean",
"typeguard.check_argument_types"
] |
[((1083, 1105), 'typeguard.check_argument_types', 'check_argument_types', ([], {}), '()\n', (1103, 1105), False, 'from typeguard import check_argument_types\n'), ((739, 789), 'numpy.mean', 'np.mean', (["[res['xents'] for res in results]"], {'axis': '(0)'}), "([res['xents'] for res in results], axis=0)\n", (746, 789), True, 'import numpy as np\n'), ((898, 912), 'numpy.mean', 'np.mean', (['xents'], {}), '(xents)\n', (905, 912), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 1 15:45:38 2022
@author: erri
"""
import numpy as np
import os
run = 'q07_1'
DoD_name = 'DoD_s1-s0_filt_nozero_rst.txt'
home_dir = os.getcwd()
DoDs_dir = os.path.join(home_dir, 'DoDs')
DoD_path = os.path.join(DoDs_dir, 'DoD_' + run, DoD_name)
DoD = np.loadtxt(DoD_path, delimiter='\t')
array = np.where(DoD==-999, np.nan, DoD)
def morph_quantities(array):
import numpy as np
'''
This function ...
Input:
array: 2D numpy array
2D array with np.nans insted of -999.
'''
# Define total volume matrix, Deposition matrix and Scour matrix
vol_array = np.where(np.isnan(array), 0, array) # Total volume matrix
dep_array = (vol_array>0)*vol_array # DoD of only deposition data
sco_array = (vol_array<0)*vol_array # DoD of only scour data
# Volume are calculated as the sum of the cell value. The measure unit is a length.
# To obtain a volume, the _vol value has to be multiply by the cell dimension.
tot_vol = np.sum(vol_array) # Total net volume as the algebric sum of all the cells [L]
sum_vol = np.sum(np.abs(vol_array)) # Sum of scour and deposition volume as algebric sum of the abs of each cell [l]
dep_vol = np.sum(dep_array) # Deposition volume as the sum of the value of deposition cell [L]
sco_vol = np.sum(sco_array) # Scour volume as the sum of the value of scour cell [L]
# Define nature array as -1=sco, 0=no_changes, and 1=dep
nature_array = np.where(array>0, 1, array)
nature_array = np.where(nature_array<0, -1, nature_array)
# Define activity array: VERIFIED
tot_act_array = np.where(np.isnan(nature_array), 0, nature_array) # Where active then 1
dep_act_array = tot_act_array*(tot_act_array>0) # Where scour then 1
sco_act_array = tot_act_array*(tot_act_array<0) # Where scour then 1
# Calculate morphological quantities VERIFIED
# Active area array is calculated as the number of active cell. To obtain a width the number of cell has to be multiply by the crosswise length of the generic cell
morph_act_area = np.count_nonzero(abs(tot_act_array)) # Active area both in terms of scour and deposition in number of cells [-]
morph_act_area_dep = np.sum(dep_act_array) # Active deposition area in number of cells [-]
morph_act_area_sco = np.sum(abs(sco_act_array)) # Active scour area in number of cells [-]
# Create active width for each cross section
act_width_array = np.array([np.nansum(abs(tot_act_array), axis=0)]) # Array of the crosswise morphological total active width in number of cells
act_width_array_dep = np.array([np.nansum(dep_act_array, axis=0)]) # Array of the crosswise morphological deposition active width in number of cells
act_width_array_sco = np.array([np.nansum(abs(sco_act_array), axis=0)]) # Array of the crosswise morphological scour active width in number of cells
# Calculate the mean of each active width array: VERIFIED
act_width_mean = np.nanmean(act_width_array) # Total mean active width in number of cells (could be a real number)
act_width_mean_dep = np.nanmean(act_width_array_dep) # Deposition mean active width in number of cells (could be a real number)
act_width_mean_sco = np.nanmean(act_width_array_sco) # Scour mean active width in number of cells (could be a real number)
# Calculate active thickness for total volumes, deposition volumes and scour volumes VERIFIED
vol_array=np.where(vol_array==0, np.nan, vol_array)
dep_array=np.where(dep_array==0, np.nan, dep_array)
sco_array=np.where(sco_array==0, np.nan, sco_array)
act_thickness = np.nanmean(np.abs(dep_array)) + np.nanmean(np.abs(sco_array)) # Active thickness as the average of scour and deposition active thickness
act_thickness_dep = np.nanmean(np.abs(dep_array)) # Deposition active thickness (abs(V_sco) + V_dep)/act_area [mm]
act_thickness_sco = np.nanmean(np.abs(sco_array)) # Scour active thickness (abs(V_sco) + V_dep)/act_area [mm]
# Calculate the Bed Relief Index
bri = np.nanstd(array)
return tot_vol, sum_vol, dep_vol, sco_vol, morph_act_area, morph_act_area_dep, morph_act_area_sco, act_width_mean, act_width_mean_dep, act_width_mean_sco, act_thickness, act_thickness_dep, act_thickness_sco, bri
tot_vol, sum_vol, dep_vol, sco_vol, morph_act_area, morph_act_area_dep, morph_act_area_sco, act_width_mean, act_width_mean_dep, act_width_mean_sco, act_thickness, act_thickness_dep, act_thickness_sco, bri = morph_quantities(array)
|
[
"numpy.nansum",
"numpy.sum",
"numpy.abs",
"os.getcwd",
"numpy.nanstd",
"numpy.isnan",
"numpy.where",
"numpy.loadtxt",
"os.path.join",
"numpy.nanmean"
] |
[((204, 215), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (213, 215), False, 'import os\n'), ((227, 257), 'os.path.join', 'os.path.join', (['home_dir', '"""DoDs"""'], {}), "(home_dir, 'DoDs')\n", (239, 257), False, 'import os\n'), ((269, 315), 'os.path.join', 'os.path.join', (['DoDs_dir', "('DoD_' + run)", 'DoD_name'], {}), "(DoDs_dir, 'DoD_' + run, DoD_name)\n", (281, 315), False, 'import os\n'), ((322, 358), 'numpy.loadtxt', 'np.loadtxt', (['DoD_path'], {'delimiter': '"""\t"""'}), "(DoD_path, delimiter='\\t')\n", (332, 358), True, 'import numpy as np\n'), ((368, 402), 'numpy.where', 'np.where', (['(DoD == -999)', 'np.nan', 'DoD'], {}), '(DoD == -999, np.nan, DoD)\n', (376, 402), True, 'import numpy as np\n'), ((1063, 1080), 'numpy.sum', 'np.sum', (['vol_array'], {}), '(vol_array)\n', (1069, 1080), True, 'import numpy as np\n'), ((1276, 1293), 'numpy.sum', 'np.sum', (['dep_array'], {}), '(dep_array)\n', (1282, 1293), True, 'import numpy as np\n'), ((1376, 1393), 'numpy.sum', 'np.sum', (['sco_array'], {}), '(sco_array)\n', (1382, 1393), True, 'import numpy as np\n'), ((1536, 1565), 'numpy.where', 'np.where', (['(array > 0)', '(1)', 'array'], {}), '(array > 0, 1, array)\n', (1544, 1565), True, 'import numpy as np\n'), ((1583, 1627), 'numpy.where', 'np.where', (['(nature_array < 0)', '(-1)', 'nature_array'], {}), '(nature_array < 0, -1, nature_array)\n', (1591, 1627), True, 'import numpy as np\n'), ((2288, 2309), 'numpy.sum', 'np.sum', (['dep_act_array'], {}), '(dep_act_array)\n', (2294, 2309), True, 'import numpy as np\n'), ((3050, 3077), 'numpy.nanmean', 'np.nanmean', (['act_width_array'], {}), '(act_width_array)\n', (3060, 3077), True, 'import numpy as np\n'), ((3173, 3204), 'numpy.nanmean', 'np.nanmean', (['act_width_array_dep'], {}), '(act_width_array_dep)\n', (3183, 3204), True, 'import numpy as np\n'), ((3305, 3336), 'numpy.nanmean', 'np.nanmean', (['act_width_array_sco'], {}), '(act_width_array_sco)\n', (3315, 3336), True, 'import numpy as np\n'), ((3524, 3567), 'numpy.where', 'np.where', (['(vol_array == 0)', 'np.nan', 'vol_array'], {}), '(vol_array == 0, np.nan, vol_array)\n', (3532, 3567), True, 'import numpy as np\n'), ((3580, 3623), 'numpy.where', 'np.where', (['(dep_array == 0)', 'np.nan', 'dep_array'], {}), '(dep_array == 0, np.nan, dep_array)\n', (3588, 3623), True, 'import numpy as np\n'), ((3636, 3679), 'numpy.where', 'np.where', (['(sco_array == 0)', 'np.nan', 'sco_array'], {}), '(sco_array == 0, np.nan, sco_array)\n', (3644, 3679), True, 'import numpy as np\n'), ((4120, 4136), 'numpy.nanstd', 'np.nanstd', (['array'], {}), '(array)\n', (4129, 4136), True, 'import numpy as np\n'), ((689, 704), 'numpy.isnan', 'np.isnan', (['array'], {}), '(array)\n', (697, 704), True, 'import numpy as np\n'), ((1162, 1179), 'numpy.abs', 'np.abs', (['vol_array'], {}), '(vol_array)\n', (1168, 1179), True, 'import numpy as np\n'), ((1698, 1720), 'numpy.isnan', 'np.isnan', (['nature_array'], {}), '(nature_array)\n', (1706, 1720), True, 'import numpy as np\n'), ((3870, 3887), 'numpy.abs', 'np.abs', (['dep_array'], {}), '(dep_array)\n', (3876, 3887), True, 'import numpy as np\n'), ((3989, 4006), 'numpy.abs', 'np.abs', (['sco_array'], {}), '(sco_array)\n', (3995, 4006), True, 'import numpy as np\n'), ((2692, 2724), 'numpy.nansum', 'np.nansum', (['dep_act_array'], {'axis': '(0)'}), '(dep_act_array, axis=0)\n', (2701, 2724), True, 'import numpy as np\n'), ((3709, 3726), 'numpy.abs', 'np.abs', (['dep_array'], {}), '(dep_array)\n', (3715, 3726), True, 'import numpy as np\n'), ((3741, 3758), 'numpy.abs', 'np.abs', (['sco_array'], {}), '(sco_array)\n', (3747, 3758), True, 'import numpy as np\n')]
|
# Copyright 2016 <NAME> and The Novo Nordisk Foundation Center for Biosustainability, DTU.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import gzip
import logging
import os
import pickle
import re
import shutil
import time
import numpy as np
from IProgress import ProgressBar, Percentage
from cameo import fba
from cameo.flux_analysis.analysis import n_carbon
from cobra.core.reaction import Reaction
from marsi import config
__all__ = ['data_dir', 'log_dir', 'pickle_large', 'unpickle_large', 'frange', 'src_dir', 'internal_data_dir']
data_dir = os.path.join(config.prj_dir, "data")
models_dir = os.path.join(config.prj_dir, "models")
log_dir = os.path.join(config.prj_dir, "log")
src_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)))
internal_data_dir = os.path.join(src_dir, 'io', 'files')
INCHI_KEY_TYPE = np.dtype("a27")
BIOMASS_RE = re.compile("biomass", re.IGNORECASE)
MAX_BYTES = 2 ** 31 - 1
logger = logging.getLogger(__name__)
def pickle_large(obj, file_path, progress=False):
with open(file_path, 'wb') as model_handler:
bytes_out = pickle.dumps(obj)
output_size = len(bytes_out)
if progress:
pbar = ProgressBar(maxval=output_size, widgets=["Writing ", Percentage()])
for idx in pbar(range(0, output_size, MAX_BYTES)):
model_handler.write(bytes_out[idx:idx + MAX_BYTES])
else:
for idx in range(0, output_size, MAX_BYTES):
model_handler.write(bytes_out[idx:idx + MAX_BYTES])
def unpickle_large(file_path, progress=False):
input_size = os.path.getsize(file_path)
logger.debug("Input size: %f bytes" % input_size)
with open(file_path, 'rb') as file_handler:
bytes_in = bytearray(0)
if progress:
pbar = ProgressBar(maxval=input_size, widgets=["Loading ", Percentage()])
for _ in pbar(range(0, input_size, MAX_BYTES)):
bytes_in += file_handler.read(MAX_BYTES)
else:
for _ in range(0, input_size, MAX_BYTES):
bytes_in += file_handler.read(MAX_BYTES)
return pickle.loads(bytes_in)
def frange(start, stop=None, steps=10):
"""
Float range generator.
Generates *steps* equally separated between *start* and *stop*.
If *stop* is None, the values are between 0 and *start*
Parameters
----------
start : float
The initial value.
stop : float
The final value.
steps : int
Number of values to generate.
Returns
-------
generator
A generator that yields float.
"""
if stop is None:
stop = start
start = 0
# Python 2 division of int returns int
start = float(start)
stop = float(stop)
step_size = (stop - start) / float(steps)
logger.debug("Step size %f" % step_size)
for i in range(steps):
logger.debug("Iteration %i: %f" % (i + 1, i * step_size))
yield start + i * step_size
def unique(l):
"""
Removes repeated values from a list.
Parameters
----------
l: list
Returns
-------
list
The same list with only unique values.
"""
s = set()
n = 0
for x in l:
if x not in s:
s.add(x)
l[n] = x
n += 1
del l[n:]
def timing(debug=False): # pragma: no cover
def function_wrapper(func):
if debug:
def debug_wrap_func(*args, **kwargs):
start = time.time()
ret = func(*args, **kwargs)
stop = time.time()
if config.log.level >= config.Level.DEBUG:
print('%s function took %0.3f ms' % (func.__name__, (stop - start) * 1000.0))
return ret
return debug_wrap_func
else:
def wrap_func(*args, **kwargs):
start = time.time()
ret = func(*args, **kwargs)
stop = time.time()
print('%s function took %0.3f ms' % (func.__name__, (stop - start) * 1000.0))
return ret
return wrap_func
return function_wrapper
def default_carbon_sources(model):
solution = fba(model)
carbon_sources = []
for ex in model.exchanges:
assert isinstance(ex, Reaction)
if ex.lower_bound < 0 and solution[ex.id] < 0 < n_carbon(ex):
logger.debug("Found carbon source: %s")
carbon_sources.append(ex)
return carbon_sources
def gunzip(file):
assert file[-3:] == ".gz"
in_name = file
out_name = file[0:-3]
with gzip.open(in_name, 'rb') as f_in, open(out_name, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def search_metabolites(model, species_id, ignore_external=True):
if ignore_external:
return model.metabolites.query(lambda mid: mid[:-2] == species_id and mid[-2:] != "_e", attribute='id')
else:
return model.metabolites.query(lambda mid: mid[:-2] == species_id, attribute='id')
|
[
"pickle.loads",
"gzip.open",
"pickle.dumps",
"IProgress.Percentage",
"os.path.getsize",
"os.path.dirname",
"numpy.dtype",
"time.time",
"cameo.fba",
"cameo.flux_analysis.analysis.n_carbon",
"shutil.copyfileobj",
"os.path.join",
"logging.getLogger",
"re.compile"
] |
[((1086, 1122), 'os.path.join', 'os.path.join', (['config.prj_dir', '"""data"""'], {}), "(config.prj_dir, 'data')\n", (1098, 1122), False, 'import os\n'), ((1136, 1174), 'os.path.join', 'os.path.join', (['config.prj_dir', '"""models"""'], {}), "(config.prj_dir, 'models')\n", (1148, 1174), False, 'import os\n'), ((1185, 1220), 'os.path.join', 'os.path.join', (['config.prj_dir', '"""log"""'], {}), "(config.prj_dir, 'log')\n", (1197, 1220), False, 'import os\n'), ((1309, 1345), 'os.path.join', 'os.path.join', (['src_dir', '"""io"""', '"""files"""'], {}), "(src_dir, 'io', 'files')\n", (1321, 1345), False, 'import os\n'), ((1364, 1379), 'numpy.dtype', 'np.dtype', (['"""a27"""'], {}), "('a27')\n", (1372, 1379), True, 'import numpy as np\n'), ((1394, 1430), 're.compile', 're.compile', (['"""biomass"""', 're.IGNORECASE'], {}), "('biomass', re.IGNORECASE)\n", (1404, 1430), False, 'import re\n'), ((1466, 1493), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1483, 1493), False, 'import logging\n'), ((2114, 2140), 'os.path.getsize', 'os.path.getsize', (['file_path'], {}), '(file_path)\n', (2129, 2140), False, 'import os\n'), ((2637, 2659), 'pickle.loads', 'pickle.loads', (['bytes_in'], {}), '(bytes_in)\n', (2649, 2659), False, 'import pickle\n'), ((4717, 4727), 'cameo.fba', 'fba', (['model'], {}), '(model)\n', (4720, 4727), False, 'from cameo import fba\n'), ((1260, 1285), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1275, 1285), False, 'import os\n'), ((1615, 1632), 'pickle.dumps', 'pickle.dumps', (['obj'], {}), '(obj)\n', (1627, 1632), False, 'import pickle\n'), ((5115, 5139), 'gzip.open', 'gzip.open', (['in_name', '"""rb"""'], {}), "(in_name, 'rb')\n", (5124, 5139), False, 'import gzip\n'), ((5188, 5219), 'shutil.copyfileobj', 'shutil.copyfileobj', (['f_in', 'f_out'], {}), '(f_in, f_out)\n', (5206, 5219), False, 'import shutil\n'), ((4004, 4015), 'time.time', 'time.time', ([], {}), '()\n', (4013, 4015), False, 'import time\n'), ((4083, 4094), 'time.time', 'time.time', ([], {}), '()\n', (4092, 4094), False, 'import time\n'), ((4396, 4407), 'time.time', 'time.time', ([], {}), '()\n', (4405, 4407), False, 'import time\n'), ((4475, 4486), 'time.time', 'time.time', ([], {}), '()\n', (4484, 4486), False, 'import time\n'), ((4880, 4892), 'cameo.flux_analysis.analysis.n_carbon', 'n_carbon', (['ex'], {}), '(ex)\n', (4888, 4892), False, 'from cameo.flux_analysis.analysis import n_carbon\n'), ((1763, 1775), 'IProgress.Percentage', 'Percentage', ([], {}), '()\n', (1773, 1775), False, 'from IProgress import ProgressBar, Percentage\n'), ((2368, 2380), 'IProgress.Percentage', 'Percentage', ([], {}), '()\n', (2378, 2380), False, 'from IProgress import ProgressBar, Percentage\n')]
|
from __future__ import division
import random
import threading
import numpy as np
# Major -> Mixolydian (-) | Lydian (-) 0
# Dorian -> Minor (-) | Mixolydian (+) 1
# Phyrgian -> Locrian (-) | Minor (+) 2
# Lydian -> Mixolydian (-) | Major (+) 3
# Mixolydian -> Dorian (-) | Major (+) 4
# Minor -> Phyrgian (-) | Dorian (+) 5
# Locrian -> Locrian (-) | Phyrgian (+) 6
SCALE_ORDER = (6, 2, 5, 1, 4, 3, 0)
class MinMax(object):
def __init__(self, min_, max_):
self.min = min_
self.max = max_
@property
def diff(self):
return self.max - self.min
@property
def minmax(self):
return (self.min, self.max)
def norm(self, n):
return (n - self.min) / self.max
E_RANGE = MinMax(-50.0, 50.0)
D_RANGE = MinMax(-50.0, 50.0)
C_RANGE = MinMax(-50.0, 50.0)
T_RANGE = MinMax(60, 180)
V_RANGE = MinMax(70, 127)
class LifeState(object):
def __init__(self, inputs, debug=False):
self.debug = debug
self.inputs = inputs
self.update_rate = 3 # secs
self.energy = 0
self.disposition = 0
self.chaos = 0
self.update()
def update(self):
if len(self.inputs) > 0:
ranges = zip(*(E_RANGE.minmax, D_RANGE.minmax, C_RANGE.minmax))
states = [(input.energy,
input.disposition,
input.chaos)
for input in self.inputs]
states = np.clip(states, *ranges)
energies, dispositions, chaoses = zip(*states.tolist)
self.energy = np.mean(energies)
self.disposition = np.mean(dispositions)
self.chaos = np.mean(chaoses)
if self.debug:
msg = ['State update',
"Energy: {}".format(self.energy),
"Disposition: {}".format(self.disposition),
"Chaos: {}".format(self.chaos),
'']
print('\n'.join(msg))
threading.Timer(self.update_rate, self.update).start()
def get_tempo(self, old_tempo): # Energy +/- Chaos
# f(en) = {log(en + 1) * (T - 90) / log(E + 1) + 90 | en >= 0
# (e^(en + E) - 1) * (90 - t) / (e^E - 1) + 60 | en <= 0}
# en = energy, E = max_energy, T = max_tempo, t = min_tempo
energy = self.energy
chaos = self.chaos
if energy >= 0:
_a = np.log10(energy + 1)
_b = T_RANGE.max - 90
_c = np.log10(E_RANGE.max + 1)
_d = 90
else:
_a = np.exp(energy + E_RANGE.max) - 1
_b = 90 - T_RANGE.min
_c = np.exp(E_RANGE.max) - 1
_d = 60
tempo = _a * _b / _c + _d
tempo += chaos / C_RANGE.diff
# Tempo can only change by at most 20 bpm
tempo = np.clip(tempo, old_tempo - 20, old_tempo + 20)
return tempo
def get_key(self, old_key): # Disposition
disposition = self.disposition
d_ratio = D_RANGE.norm(disposition)
if 0 <= d_ratio < 0.05:
target_scale = 0
elif 0.05 <= d_ratio < 0.12:
target_scale = 1
elif 0.12 <= d_ratio < 0.3:
target_scale = 2
elif 0.3 <= d_ratio < 0.4:
target_scale = 3
elif 0.4 <= d_ratio < 0.7:
target_scale = random.choice((4, 5))
elif 0.7 <= d_ratio <= 1:
target_scale = 6
scale_rank = SCALE_ORDER.index(old_key[1])
direction = np.sign(target_scale - scale_rank)
scale = SCALE_ORDER[scale_rank + direction]
root = 0
key = (root, scale)
return key
def get_octave(self, old_octave):
octave = old_octave
if random.random() < 0.1:
octave = old_octave + random.choice((-1, 1))
if abs(octave) > 1:
octave = old_octave
return octave
def get_volume(self, _old_volume): # Energy +/- Chaos
energy = self.energy
chaos = self.chaos
e_ratio = E_RANGE.norm(energy)
volume = e_ratio * (V_RANGE.diff) + V_RANGE.min
volume += chaos / C_RANGE.diff
return int(volume)
def get_dissonance(self, _old_dissonance): # Disposition
disposition = self.disposition
d_ratio = D_RANGE.norm(disposition)
if 0 <= d_ratio < 0.1:
dissonance = 0.2
elif 0.1 <= d_ratio < 0.9:
dissonance = 0.1
elif 0.9 <= d_ratio <= 1:
dissonance = 0.05
return dissonance
def get_length_ratio(self, _old_length_ratio): # Energy +/- Chaos
energy = self.energy
e_ratio = E_RANGE.norm(energy)
if 0 <= e_ratio < 0.1:
length_ratio = (1, 2)
elif 0.1 <= e_ratio < 0.6:
length_ratio = (1, 4)
elif 0.6 <= e_ratio < 0.8:
length_ratio = (1, 3)
elif 0.8 <= e_ratio < 0.9:
length_ratio = (1, 2)
elif 0.9 <= e_ratio <= 1:
length_ratio = (2, 1)
return length_ratio
|
[
"threading.Timer",
"random.choice",
"numpy.clip",
"random.random",
"numpy.mean",
"numpy.exp",
"numpy.sign",
"numpy.log10"
] |
[((2880, 2926), 'numpy.clip', 'np.clip', (['tempo', '(old_tempo - 20)', '(old_tempo + 20)'], {}), '(tempo, old_tempo - 20, old_tempo + 20)\n', (2887, 2926), True, 'import numpy as np\n'), ((3557, 3591), 'numpy.sign', 'np.sign', (['(target_scale - scale_rank)'], {}), '(target_scale - scale_rank)\n', (3564, 3591), True, 'import numpy as np\n'), ((1486, 1510), 'numpy.clip', 'np.clip', (['states', '*ranges'], {}), '(states, *ranges)\n', (1493, 1510), True, 'import numpy as np\n'), ((1604, 1621), 'numpy.mean', 'np.mean', (['energies'], {}), '(energies)\n', (1611, 1621), True, 'import numpy as np\n'), ((1653, 1674), 'numpy.mean', 'np.mean', (['dispositions'], {}), '(dispositions)\n', (1660, 1674), True, 'import numpy as np\n'), ((1700, 1716), 'numpy.mean', 'np.mean', (['chaoses'], {}), '(chaoses)\n', (1707, 1716), True, 'import numpy as np\n'), ((2463, 2483), 'numpy.log10', 'np.log10', (['(energy + 1)'], {}), '(energy + 1)\n', (2471, 2483), True, 'import numpy as np\n'), ((2535, 2560), 'numpy.log10', 'np.log10', (['(E_RANGE.max + 1)'], {}), '(E_RANGE.max + 1)\n', (2543, 2560), True, 'import numpy as np\n'), ((3790, 3805), 'random.random', 'random.random', ([], {}), '()\n', (3803, 3805), False, 'import random\n'), ((2037, 2083), 'threading.Timer', 'threading.Timer', (['self.update_rate', 'self.update'], {}), '(self.update_rate, self.update)\n', (2052, 2083), False, 'import threading\n'), ((2612, 2640), 'numpy.exp', 'np.exp', (['(energy + E_RANGE.max)'], {}), '(energy + E_RANGE.max)\n', (2618, 2640), True, 'import numpy as np\n'), ((2696, 2715), 'numpy.exp', 'np.exp', (['E_RANGE.max'], {}), '(E_RANGE.max)\n', (2702, 2715), True, 'import numpy as np\n'), ((3847, 3869), 'random.choice', 'random.choice', (['(-1, 1)'], {}), '((-1, 1))\n', (3860, 3869), False, 'import random\n'), ((3400, 3421), 'random.choice', 'random.choice', (['(4, 5)'], {}), '((4, 5))\n', (3413, 3421), False, 'import random\n')]
|
import torch
from torch.nn.functional import one_hot
import h5py
import shutil
import numpy as np
from pathlib import Path
from tqdm import tqdm
from time import time
from utils.metrics import calc_ece, calc_nll_brier, BrierLoss
from runners.base_runner import BaseRunner, reduce_tensor, gather_tensor
class CnnRunner(BaseRunner):
def __init__(self, loader, model, optim, lr_scheduler, num_epoch, loss_with_weight,
val_metric, test_metric, logger, model_path, rank, adv_training):
self.num_epoch = num_epoch
self.epoch = 0
self.loss_with_weight = loss_with_weight
self.adv_training = adv_training
self.val_metric = val_metric
self.test_metric = test_metric
self.optim = optim
self.lr_scheduler = lr_scheduler
self.best_score = 0.
self.save_kwargs = {}
self.world_size = torch.distributed.get_world_size()
super().__init__(loader, model, logger, model_path, rank)
self.load()
def _calc_loss(self, img, label):
self.model.train()
output = self.model(img.cuda(non_blocking=True))
label = label.cuda(non_blocking=True)
loss_ = 0
for loss, w in self.loss_with_weight:
_loss = w * loss(output, label)
loss_ += _loss
return loss_
def fgsm(self, img, label):
step_size = 0.01
# loss_fn = torch.nn.CrossEntropyLoss()
loss_fn = self.loss_with_weight[0][0]
img = img.cuda()
img.requires_grad = True
self.model.eval()
self.model.zero_grad()
output = self.model(img)
loss = loss_fn(output, label.cuda())
loss.backward()
grad_sign = img.grad.sign()
img_new = img + step_size * grad_sign
return img_new.cpu().detach()
def _train_a_batch(self, batch):
if self.adv_training:
img_new = self.fgsm(*batch)
batch[0] = img_new
loss = self._calc_loss(*batch)
self.optim.zero_grad()
loss.backward()
self.optim.step()
_loss = reduce_tensor(loss, True).item()
return _loss
@torch.no_grad()
def _valid_a_batch(self, img, label, with_output=False):
output = self.model(img.cuda(non_blocking=True))
label = label.cuda(non_blocking=True)
result = self.val_metric(output, label)
if with_output:
result = [result, output]
return result
def train(self):
self.log("Start to train", 'debug')
for epoch in range(self.epoch, self.num_epoch):
self.model.train()
loader = self.loader.load("train")
if self.rank == 0:
t_iter = tqdm(loader, total=self.loader.len,
desc=f"[Train {epoch}]")
else:
t_iter = loader
losses = 0
times = []
for i, batch in enumerate(t_iter):
t = time()
loss = self._train_a_batch(batch)
times += [time() - t]
losses += loss
if self.rank == 0:
t_iter.set_postfix(loss=f"{loss:.4} / {losses/(i+1):.4}")
self.log(f"[Train] epoch:{epoch} loss:{losses/(i+1)}", 'info')
print("Batch Training Time : ", np.mean(times))
self.lr_scheduler.step()
self.val(epoch)
def val(self, epoch):
loader = self.loader.load('val')
v_iter = loader
metrics = []
self.model.eval()
for batch in v_iter:
_metric = self._valid_a_batch(*batch, with_output=False)
metrics += [gather_tensor(_metric).cpu().numpy()]
acc = np.concatenate(metrics).mean()
self.log(f"[Val] {epoch} Score: {acc}", 'info')
if self.rank == 0:
self.save(epoch, acc, **self.save_kwargs)
def test(self, is_seg):
self.load('model.pth')
loader = self.loader.load('test')
if self.rank == 0:
t_iter = tqdm(loader, total=self.loader.len)
else:
t_iter = loader
outputs = []
labels = []
metrics = []
self.model.eval()
for img, label in t_iter:
_metric, output = self._valid_a_batch(img, label, with_output=True)
labels += [gather_tensor(label).cpu().numpy()]
outputs += [gather_tensor(output).cpu().numpy()]
metrics += [gather_tensor(_metric).cpu().numpy()]
if is_seg:
met = np.concatenate(metrics).mean()
self.log(f"[Test] MeanIOU: {met:.2f}", 'info')
save_path = Path(self.model_path) / 'infer'
save_path.mkdir(parents=True, exist_ok=True)
index = 0
for out, label in zip(outputs, labels):
for i in range(label.shape[0]):
l = label[i]
o = out[i]
with h5py.File(f"{save_path}/{index}.h5", 'w') as h:
h.create_dataset('output', data=o)
h.create_dataset('label', data=l)
index += 1
else:
labels = np.concatenate(labels)
outputs = np.concatenate(outputs, axis=0)
acc = (outputs.argmax(1) == labels).mean() * 100
ece = calc_ece(outputs, labels)
nll, brier = calc_nll_brier(outputs, labels, self.num_classes)
log = f"[Test] ACC: {acc:.2f}, ECE : {ece:.2f}, "
log += f"NLL : {nll:.2f}, Brier : {brier:.2f}"
self.log(log, 'info')
with h5py.File(f"{self.model_path}/output.h5", 'w') as h:
h.create_dataset('output', data=outputs)
h.create_dataset('label', data=labels)
def save(self, epoch, metric, file_name="model", **kwargs):
torch.save({"epoch": epoch,
"param": self.model.state_dict(),
"optimizer": self.optim.state_dict(),
"score": metric,
"best": self.best_score,
"lr_schdlr": self.lr_scheduler.state_dict(),
**kwargs}, f"{self.model_path}/{file_name}.pth")
cond = metric >= self.best_score
if cond:
self.log(f"{self.best_score} -------------------> {metric}", 'debug')
self.best_score = metric
shutil.copy2(f"{self.model_path}/{file_name}.pth",
f"{self.model_path}/best.pth")
self.log(f"Model has saved at {epoch} epoch.", 'debug')
def load(self, file_name="model.pth"):
self.log(self.model_path, 'debug')
if (self.model_path / file_name).exists():
self.log(f"Loading {self.model_path} File", 'debug')
ckpoint = torch.load(f"{self.model_path}/{file_name}", map_location='cpu')
for key, value in ckpoint.items():
if key == 'param':
self.model.load_state_dict(value)
elif key == 'optimizer':
self.optim.load_state_dict(value)
elif key == 'lr_schdlr':
self.lr_scheduler.load_state_dict(value)
elif key == 'epoch':
self.epoch = value + 1
elif key == 'best':
self.best_score = value
else:
self.__dict__[key] = value
self.log(f"Model Type : {file_name}, epoch : {self.epoch}", 'debug')
else:
self.log("Failed to load, not existing file", 'debug')
def get_lr(self):
return self.lr_scheduler.optimizer.param_groups[0]['lr']
|
[
"tqdm.tqdm",
"h5py.File",
"runners.base_runner.gather_tensor",
"runners.base_runner.reduce_tensor",
"shutil.copy2",
"torch.load",
"time.time",
"pathlib.Path",
"numpy.mean",
"torch.distributed.get_world_size",
"utils.metrics.calc_nll_brier",
"torch.no_grad",
"utils.metrics.calc_ece",
"numpy.concatenate"
] |
[((2154, 2169), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2167, 2169), False, 'import torch\n'), ((883, 917), 'torch.distributed.get_world_size', 'torch.distributed.get_world_size', ([], {}), '()\n', (915, 917), False, 'import torch\n'), ((4048, 4083), 'tqdm.tqdm', 'tqdm', (['loader'], {'total': 'self.loader.len'}), '(loader, total=self.loader.len)\n', (4052, 4083), False, 'from tqdm import tqdm\n'), ((5194, 5216), 'numpy.concatenate', 'np.concatenate', (['labels'], {}), '(labels)\n', (5208, 5216), True, 'import numpy as np\n'), ((5239, 5270), 'numpy.concatenate', 'np.concatenate', (['outputs'], {'axis': '(0)'}), '(outputs, axis=0)\n', (5253, 5270), True, 'import numpy as np\n'), ((5350, 5375), 'utils.metrics.calc_ece', 'calc_ece', (['outputs', 'labels'], {}), '(outputs, labels)\n', (5358, 5375), False, 'from utils.metrics import calc_ece, calc_nll_brier, BrierLoss\n'), ((5401, 5450), 'utils.metrics.calc_nll_brier', 'calc_nll_brier', (['outputs', 'labels', 'self.num_classes'], {}), '(outputs, labels, self.num_classes)\n', (5415, 5450), False, 'from utils.metrics import calc_ece, calc_nll_brier, BrierLoss\n'), ((6408, 6493), 'shutil.copy2', 'shutil.copy2', (['f"""{self.model_path}/{file_name}.pth"""', 'f"""{self.model_path}/best.pth"""'], {}), "(f'{self.model_path}/{file_name}.pth',\n f'{self.model_path}/best.pth')\n", (6420, 6493), False, 'import shutil\n'), ((6808, 6872), 'torch.load', 'torch.load', (['f"""{self.model_path}/{file_name}"""'], {'map_location': '"""cpu"""'}), "(f'{self.model_path}/{file_name}', map_location='cpu')\n", (6818, 6872), False, 'import torch\n'), ((2094, 2119), 'runners.base_runner.reduce_tensor', 'reduce_tensor', (['loss', '(True)'], {}), '(loss, True)\n', (2107, 2119), False, 'from runners.base_runner import BaseRunner, reduce_tensor, gather_tensor\n'), ((2722, 2782), 'tqdm.tqdm', 'tqdm', (['loader'], {'total': 'self.loader.len', 'desc': 'f"""[Train {epoch}]"""'}), "(loader, total=self.loader.len, desc=f'[Train {epoch}]')\n", (2726, 2782), False, 'from tqdm import tqdm\n'), ((2976, 2982), 'time.time', 'time', ([], {}), '()\n', (2980, 2982), False, 'from time import time\n'), ((3335, 3349), 'numpy.mean', 'np.mean', (['times'], {}), '(times)\n', (3342, 3349), True, 'import numpy as np\n'), ((3730, 3753), 'numpy.concatenate', 'np.concatenate', (['metrics'], {}), '(metrics)\n', (3744, 3753), True, 'import numpy as np\n'), ((4662, 4683), 'pathlib.Path', 'Path', (['self.model_path'], {}), '(self.model_path)\n', (4666, 4683), False, 'from pathlib import Path\n'), ((5623, 5669), 'h5py.File', 'h5py.File', (['f"""{self.model_path}/output.h5"""', '"""w"""'], {}), "(f'{self.model_path}/output.h5', 'w')\n", (5632, 5669), False, 'import h5py\n'), ((4548, 4571), 'numpy.concatenate', 'np.concatenate', (['metrics'], {}), '(metrics)\n', (4562, 4571), True, 'import numpy as np\n'), ((3059, 3065), 'time.time', 'time', ([], {}), '()\n', (3063, 3065), False, 'from time import time\n'), ((4963, 5004), 'h5py.File', 'h5py.File', (['f"""{save_path}/{index}.h5"""', '"""w"""'], {}), "(f'{save_path}/{index}.h5', 'w')\n", (4972, 5004), False, 'import h5py\n'), ((3678, 3700), 'runners.base_runner.gather_tensor', 'gather_tensor', (['_metric'], {}), '(_metric)\n', (3691, 3700), False, 'from runners.base_runner import BaseRunner, reduce_tensor, gather_tensor\n'), ((4352, 4372), 'runners.base_runner.gather_tensor', 'gather_tensor', (['label'], {}), '(label)\n', (4365, 4372), False, 'from runners.base_runner import BaseRunner, reduce_tensor, gather_tensor\n'), ((4412, 4433), 'runners.base_runner.gather_tensor', 'gather_tensor', (['output'], {}), '(output)\n', (4425, 4433), False, 'from runners.base_runner import BaseRunner, reduce_tensor, gather_tensor\n'), ((4473, 4495), 'runners.base_runner.gather_tensor', 'gather_tensor', (['_metric'], {}), '(_metric)\n', (4486, 4495), False, 'from runners.base_runner import BaseRunner, reduce_tensor, gather_tensor\n')]
|
import os
import torch
import pickle
import pytest
import tempfile
import h5py
import numpy as np
from timeit import timeit
from tianshou.data import Batch, SegmentTree, \
ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer
from tianshou.data.utils.converter import to_hdf5
if __name__ == '__main__':
from env import MyTestEnv
else: # pytest
from test.base.env import MyTestEnv
def test_replaybuffer(size=10, bufsize=20):
env = MyTestEnv(size)
buf = ReplayBuffer(bufsize)
buf.update(buf)
assert str(buf) == buf.__class__.__name__ + '()'
obs = env.reset()
action_list = [1] * 5 + [0] * 10 + [1] * 10
for i, a in enumerate(action_list):
obs_next, rew, done, info = env.step(a)
buf.add(obs, [a], rew, done, obs_next, info)
obs = obs_next
assert len(buf) == min(bufsize, i + 1)
with pytest.raises(ValueError):
buf._add_to_buffer('rew', np.array([1, 2, 3]))
assert buf.act.dtype == np.object
assert isinstance(buf.act[0], list)
data, indice = buf.sample(bufsize * 2)
assert (indice < len(buf)).all()
assert (data.obs < size).all()
assert (0 <= data.done).all() and (data.done <= 1).all()
b = ReplayBuffer(size=10)
b.add(1, 1, 1, 'str', 1, {'a': 3, 'b': {'c': 5.0}})
assert b.obs[0] == 1
assert b.done[0] == 'str'
assert np.all(b.obs[1:] == 0)
assert np.all(b.done[1:] == np.array(None))
assert b.info.a[0] == 3 and b.info.a.dtype == np.integer
assert np.all(b.info.a[1:] == 0)
assert b.info.b.c[0] == 5.0 and b.info.b.c.dtype == np.inexact
assert np.all(b.info.b.c[1:] == 0.0)
with pytest.raises(IndexError):
b[22]
b = ListReplayBuffer()
with pytest.raises(NotImplementedError):
b.sample(0)
def test_ignore_obs_next(size=10):
# Issue 82
buf = ReplayBuffer(size, ignore_obs_next=True)
for i in range(size):
buf.add(obs={'mask1': np.array([i, 1, 1, 0, 0]),
'mask2': np.array([i + 4, 0, 1, 0, 0]),
'mask': i},
act={'act_id': i,
'position_id': i + 3},
rew=i,
done=i % 3 == 0,
info={'if': i})
indice = np.arange(len(buf))
orig = np.arange(len(buf))
data = buf[indice]
data2 = buf[indice]
assert isinstance(data, Batch)
assert isinstance(data2, Batch)
assert np.allclose(indice, orig)
assert np.allclose(data.obs_next.mask, data2.obs_next.mask)
assert np.allclose(data.obs_next.mask, [0, 2, 3, 3, 5, 6, 6, 8, 9, 9])
buf.stack_num = 4
data = buf[indice]
data2 = buf[indice]
assert np.allclose(data.obs_next.mask, data2.obs_next.mask)
assert np.allclose(data.obs_next.mask, np.array([
[0, 0, 0, 0], [1, 1, 1, 2], [1, 1, 2, 3], [1, 1, 2, 3],
[4, 4, 4, 5], [4, 4, 5, 6], [4, 4, 5, 6],
[7, 7, 7, 8], [7, 7, 8, 9], [7, 7, 8, 9]]))
assert np.allclose(data.info['if'], data2.info['if'])
assert np.allclose(data.info['if'], np.array([
[0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 2], [1, 1, 2, 3],
[4, 4, 4, 4], [4, 4, 4, 5], [4, 4, 5, 6],
[7, 7, 7, 7], [7, 7, 7, 8], [7, 7, 8, 9]]))
assert data.obs_next
def test_stack(size=5, bufsize=9, stack_num=4):
env = MyTestEnv(size)
buf = ReplayBuffer(bufsize, stack_num=stack_num)
buf2 = ReplayBuffer(bufsize, stack_num=stack_num, sample_avail=True)
buf3 = ReplayBuffer(bufsize, stack_num=stack_num, save_only_last_obs=True)
obs = env.reset(1)
for i in range(16):
obs_next, rew, done, info = env.step(1)
buf.add(obs, 1, rew, done, None, info)
buf2.add(obs, 1, rew, done, None, info)
buf3.add([None, None, obs], 1, rew, done, [None, obs], info)
obs = obs_next
if done:
obs = env.reset(1)
indice = np.arange(len(buf))
assert np.allclose(buf.get(indice, 'obs')[..., 0], [
[1, 1, 1, 2], [1, 1, 2, 3], [1, 2, 3, 4],
[1, 1, 1, 1], [1, 1, 1, 2], [1, 1, 2, 3],
[1, 2, 3, 4], [4, 4, 4, 4], [1, 1, 1, 1]])
assert np.allclose(buf.get(indice, 'obs'), buf3.get(indice, 'obs'))
assert np.allclose(buf.get(indice, 'obs'), buf3.get(indice, 'obs_next'))
_, indice = buf2.sample(0)
assert indice.tolist() == [2, 6]
_, indice = buf2.sample(1)
assert indice in [2, 6]
with pytest.raises(IndexError):
buf[bufsize * 2]
def test_priortized_replaybuffer(size=32, bufsize=15):
env = MyTestEnv(size)
buf = PrioritizedReplayBuffer(bufsize, 0.5, 0.5)
obs = env.reset()
action_list = [1] * 5 + [0] * 10 + [1] * 10
for i, a in enumerate(action_list):
obs_next, rew, done, info = env.step(a)
buf.add(obs, a, rew, done, obs_next, info, np.random.randn() - 0.5)
obs = obs_next
data, indice = buf.sample(len(buf) // 2)
if len(buf) // 2 == 0:
assert len(data) == len(buf)
else:
assert len(data) == len(buf) // 2
assert len(buf) == min(bufsize, i + 1)
data, indice = buf.sample(len(buf) // 2)
buf.update_weight(indice, -data.weight / 2)
assert np.allclose(
buf.weight[indice], np.abs(-data.weight / 2) ** buf._alpha)
def test_update():
buf1 = ReplayBuffer(4, stack_num=2)
buf2 = ReplayBuffer(4, stack_num=2)
for i in range(5):
buf1.add(obs=np.array([i]), act=float(i), rew=i * i,
done=i % 2 == 0, info={'incident': 'found'})
assert len(buf1) > len(buf2)
buf2.update(buf1)
assert len(buf1) == len(buf2)
assert (buf2[0].obs == buf1[1].obs).all()
assert (buf2[-1].obs == buf1[0].obs).all()
def test_segtree():
realop = np.sum
# small test
actual_len = 8
tree = SegmentTree(actual_len) # 1-15. 8-15 are leaf nodes
assert len(tree) == actual_len
assert np.all([tree[i] == 0. for i in range(actual_len)])
with pytest.raises(IndexError):
tree[actual_len]
naive = np.zeros([actual_len])
for _ in range(1000):
# random choose a place to perform single update
index = np.random.randint(actual_len)
value = np.random.rand()
naive[index] = value
tree[index] = value
for i in range(actual_len):
for j in range(i + 1, actual_len):
ref = realop(naive[i:j])
out = tree.reduce(i, j)
assert np.allclose(ref, out), (ref, out)
assert np.allclose(tree.reduce(start=1), realop(naive[1:]))
assert np.allclose(tree.reduce(end=-1), realop(naive[:-1]))
# batch setitem
for _ in range(1000):
index = np.random.choice(actual_len, size=4)
value = np.random.rand(4)
naive[index] = value
tree[index] = value
assert np.allclose(realop(naive), tree.reduce())
for i in range(10):
left = np.random.randint(actual_len)
right = np.random.randint(left + 1, actual_len + 1)
assert np.allclose(realop(naive[left:right]),
tree.reduce(left, right))
# large test
actual_len = 16384
tree = SegmentTree(actual_len)
naive = np.zeros([actual_len])
for _ in range(1000):
index = np.random.choice(actual_len, size=64)
value = np.random.rand(64)
naive[index] = value
tree[index] = value
assert np.allclose(realop(naive), tree.reduce())
for i in range(10):
left = np.random.randint(actual_len)
right = np.random.randint(left + 1, actual_len + 1)
assert np.allclose(realop(naive[left:right]),
tree.reduce(left, right))
# test prefix-sum-idx
actual_len = 8
tree = SegmentTree(actual_len)
naive = np.random.rand(actual_len)
tree[np.arange(actual_len)] = naive
for _ in range(1000):
scalar = np.random.rand() * naive.sum()
index = tree.get_prefix_sum_idx(scalar)
assert naive[:index].sum() <= scalar <= naive[:index + 1].sum()
# corner case here
naive = np.ones(actual_len, np.int)
tree[np.arange(actual_len)] = naive
for scalar in range(actual_len):
index = tree.get_prefix_sum_idx(scalar * 1.)
assert naive[:index].sum() <= scalar <= naive[:index + 1].sum()
tree = SegmentTree(10)
tree[np.arange(3)] = np.array([0.1, 0, 0.1])
assert np.allclose(tree.get_prefix_sum_idx(
np.array([0, .1, .1 + 1e-6, .2 - 1e-6])), [0, 0, 2, 2])
with pytest.raises(AssertionError):
tree.get_prefix_sum_idx(.2)
# test large prefix-sum-idx
actual_len = 16384
tree = SegmentTree(actual_len)
naive = np.random.rand(actual_len)
tree[np.arange(actual_len)] = naive
for _ in range(1000):
scalar = np.random.rand() * naive.sum()
index = tree.get_prefix_sum_idx(scalar)
assert naive[:index].sum() <= scalar <= naive[:index + 1].sum()
# profile
if __name__ == '__main__':
size = 100000
bsz = 64
naive = np.random.rand(size)
tree = SegmentTree(size)
tree[np.arange(size)] = naive
def sample_npbuf():
return np.random.choice(size, bsz, p=naive / naive.sum())
def sample_tree():
scalar = np.random.rand(bsz) * tree.reduce()
return tree.get_prefix_sum_idx(scalar)
print('npbuf', timeit(sample_npbuf, setup=sample_npbuf, number=1000))
print('tree', timeit(sample_tree, setup=sample_tree, number=1000))
def test_pickle():
size = 100
vbuf = ReplayBuffer(size, stack_num=2)
lbuf = ListReplayBuffer()
pbuf = PrioritizedReplayBuffer(size, 0.6, 0.4)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
rew = torch.tensor([1.]).to(device)
for i in range(4):
vbuf.add(obs=Batch(index=np.array([i])), act=0, rew=rew, done=0)
for i in range(3):
lbuf.add(obs=Batch(index=np.array([i])), act=1, rew=rew, done=0)
for i in range(5):
pbuf.add(obs=Batch(index=np.array([i])),
act=2, rew=rew, done=0, weight=np.random.rand())
# save & load
_vbuf = pickle.loads(pickle.dumps(vbuf))
_lbuf = pickle.loads(pickle.dumps(lbuf))
_pbuf = pickle.loads(pickle.dumps(pbuf))
assert len(_vbuf) == len(vbuf) and np.allclose(_vbuf.act, vbuf.act)
assert len(_lbuf) == len(lbuf) and np.allclose(_lbuf.act, lbuf.act)
assert len(_pbuf) == len(pbuf) and np.allclose(_pbuf.act, pbuf.act)
# make sure the meta var is identical
assert _vbuf.stack_num == vbuf.stack_num
assert np.allclose(_pbuf.weight[np.arange(len(_pbuf))],
pbuf.weight[np.arange(len(pbuf))])
def test_hdf5():
size = 100
buffers = {
"array": ReplayBuffer(size, stack_num=2),
"list": ListReplayBuffer(),
"prioritized": PrioritizedReplayBuffer(size, 0.6, 0.4)
}
buffer_types = {k: b.__class__ for k, b in buffers.items()}
device = 'cuda' if torch.cuda.is_available() else 'cpu'
rew = torch.tensor([1.]).to(device)
for i in range(4):
kwargs = {
'obs': Batch(index=np.array([i])),
'act': i,
'rew': rew,
'done': 0,
'info': {"number": {"n": i}, 'extra': None},
}
buffers["array"].add(**kwargs)
buffers["list"].add(**kwargs)
buffers["prioritized"].add(weight=np.random.rand(), **kwargs)
# save
paths = {}
for k, buf in buffers.items():
f, path = tempfile.mkstemp(suffix='.hdf5')
os.close(f)
buf.save_hdf5(path)
paths[k] = path
# load replay buffer
_buffers = {k: buffer_types[k].load_hdf5(paths[k]) for k in paths.keys()}
# compare
for k in buffers.keys():
assert len(_buffers[k]) == len(buffers[k])
assert np.allclose(_buffers[k].act, buffers[k].act)
assert _buffers[k].stack_num == buffers[k].stack_num
assert _buffers[k]._maxsize == buffers[k]._maxsize
assert _buffers[k]._index == buffers[k]._index
assert np.all(_buffers[k]._indices == buffers[k]._indices)
for k in ["array", "prioritized"]:
assert isinstance(buffers[k].get(0, "info"), Batch)
assert isinstance(_buffers[k].get(0, "info"), Batch)
for k in ["array"]:
assert np.all(
buffers[k][:].info.number.n == _buffers[k][:].info.number.n)
assert np.all(
buffers[k][:].info.extra == _buffers[k][:].info.extra)
for path in paths.values():
os.remove(path)
# raise exception when value cannot be pickled
data = {"not_supported": lambda x: x*x}
grp = h5py.Group
with pytest.raises(NotImplementedError):
to_hdf5(data, grp)
# ndarray with data type not supported by HDF5 that cannot be pickled
data = {"not_supported": np.array(lambda x: x*x)}
grp = h5py.Group
with pytest.raises(RuntimeError):
to_hdf5(data, grp)
if __name__ == '__main__':
test_hdf5()
test_replaybuffer()
test_ignore_obs_next()
test_stack()
test_pickle()
test_segtree()
test_priortized_replaybuffer()
test_priortized_replaybuffer(233333, 200000)
test_update()
|
[
"os.remove",
"numpy.abs",
"numpy.allclose",
"numpy.ones",
"numpy.random.randint",
"numpy.arange",
"os.close",
"timeit.timeit",
"tianshou.data.ListReplayBuffer",
"numpy.random.randn",
"tianshou.data.PrioritizedReplayBuffer",
"tianshou.data.SegmentTree",
"pytest.raises",
"numpy.random.choice",
"pickle.dumps",
"torch.cuda.is_available",
"test.base.env.MyTestEnv",
"numpy.all",
"tianshou.data.ReplayBuffer",
"tempfile.mkstemp",
"tianshou.data.utils.converter.to_hdf5",
"numpy.zeros",
"numpy.array",
"numpy.random.rand",
"torch.tensor"
] |
[((453, 468), 'test.base.env.MyTestEnv', 'MyTestEnv', (['size'], {}), '(size)\n', (462, 468), False, 'from test.base.env import MyTestEnv\n'), ((479, 500), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', (['bufsize'], {}), '(bufsize)\n', (491, 500), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((1208, 1229), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', ([], {'size': '(10)'}), '(size=10)\n', (1220, 1229), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((1352, 1374), 'numpy.all', 'np.all', (['(b.obs[1:] == 0)'], {}), '(b.obs[1:] == 0)\n', (1358, 1374), True, 'import numpy as np\n'), ((1495, 1520), 'numpy.all', 'np.all', (['(b.info.a[1:] == 0)'], {}), '(b.info.a[1:] == 0)\n', (1501, 1520), True, 'import numpy as np\n'), ((1599, 1628), 'numpy.all', 'np.all', (['(b.info.b.c[1:] == 0.0)'], {}), '(b.info.b.c[1:] == 0.0)\n', (1605, 1628), True, 'import numpy as np\n'), ((1687, 1705), 'tianshou.data.ListReplayBuffer', 'ListReplayBuffer', ([], {}), '()\n', (1703, 1705), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((1833, 1873), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', (['size'], {'ignore_obs_next': '(True)'}), '(size, ignore_obs_next=True)\n', (1845, 1873), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((2410, 2435), 'numpy.allclose', 'np.allclose', (['indice', 'orig'], {}), '(indice, orig)\n', (2421, 2435), True, 'import numpy as np\n'), ((2447, 2499), 'numpy.allclose', 'np.allclose', (['data.obs_next.mask', 'data2.obs_next.mask'], {}), '(data.obs_next.mask, data2.obs_next.mask)\n', (2458, 2499), True, 'import numpy as np\n'), ((2511, 2574), 'numpy.allclose', 'np.allclose', (['data.obs_next.mask', '[0, 2, 3, 3, 5, 6, 6, 8, 9, 9]'], {}), '(data.obs_next.mask, [0, 2, 3, 3, 5, 6, 6, 8, 9, 9])\n', (2522, 2574), True, 'import numpy as np\n'), ((2655, 2707), 'numpy.allclose', 'np.allclose', (['data.obs_next.mask', 'data2.obs_next.mask'], {}), '(data.obs_next.mask, data2.obs_next.mask)\n', (2666, 2707), True, 'import numpy as np\n'), ((2939, 2985), 'numpy.allclose', 'np.allclose', (["data.info['if']", "data2.info['if']"], {}), "(data.info['if'], data2.info['if'])\n", (2950, 2985), True, 'import numpy as np\n'), ((3288, 3303), 'test.base.env.MyTestEnv', 'MyTestEnv', (['size'], {}), '(size)\n', (3297, 3303), False, 'from test.base.env import MyTestEnv\n'), ((3314, 3356), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', (['bufsize'], {'stack_num': 'stack_num'}), '(bufsize, stack_num=stack_num)\n', (3326, 3356), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((3368, 3429), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', (['bufsize'], {'stack_num': 'stack_num', 'sample_avail': '(True)'}), '(bufsize, stack_num=stack_num, sample_avail=True)\n', (3380, 3429), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((3441, 3508), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', (['bufsize'], {'stack_num': 'stack_num', 'save_only_last_obs': '(True)'}), '(bufsize, stack_num=stack_num, save_only_last_obs=True)\n', (3453, 3508), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((4484, 4499), 'test.base.env.MyTestEnv', 'MyTestEnv', (['size'], {}), '(size)\n', (4493, 4499), False, 'from test.base.env import MyTestEnv\n'), ((4510, 4552), 'tianshou.data.PrioritizedReplayBuffer', 'PrioritizedReplayBuffer', (['bufsize', '(0.5)', '(0.5)'], {}), '(bufsize, 0.5, 0.5)\n', (4533, 4552), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((5255, 5283), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', (['(4)'], {'stack_num': '(2)'}), '(4, stack_num=2)\n', (5267, 5283), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((5295, 5323), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', (['(4)'], {'stack_num': '(2)'}), '(4, stack_num=2)\n', (5307, 5323), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((5741, 5764), 'tianshou.data.SegmentTree', 'SegmentTree', (['actual_len'], {}), '(actual_len)\n', (5752, 5764), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((5964, 5986), 'numpy.zeros', 'np.zeros', (['[actual_len]'], {}), '([actual_len])\n', (5972, 5986), True, 'import numpy as np\n'), ((7109, 7132), 'tianshou.data.SegmentTree', 'SegmentTree', (['actual_len'], {}), '(actual_len)\n', (7120, 7132), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((7145, 7167), 'numpy.zeros', 'np.zeros', (['[actual_len]'], {}), '([actual_len])\n', (7153, 7167), True, 'import numpy as np\n'), ((7710, 7733), 'tianshou.data.SegmentTree', 'SegmentTree', (['actual_len'], {}), '(actual_len)\n', (7721, 7733), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((7746, 7772), 'numpy.random.rand', 'np.random.rand', (['actual_len'], {}), '(actual_len)\n', (7760, 7772), True, 'import numpy as np\n'), ((8042, 8069), 'numpy.ones', 'np.ones', (['actual_len', 'np.int'], {}), '(actual_len, np.int)\n', (8049, 8069), True, 'import numpy as np\n'), ((8283, 8298), 'tianshou.data.SegmentTree', 'SegmentTree', (['(10)'], {}), '(10)\n', (8294, 8298), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((8324, 8347), 'numpy.array', 'np.array', (['[0.1, 0, 0.1]'], {}), '([0.1, 0, 0.1])\n', (8332, 8347), True, 'import numpy as np\n'), ((8602, 8625), 'tianshou.data.SegmentTree', 'SegmentTree', (['actual_len'], {}), '(actual_len)\n', (8613, 8625), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((8638, 8664), 'numpy.random.rand', 'np.random.rand', (['actual_len'], {}), '(actual_len)\n', (8652, 8664), True, 'import numpy as np\n'), ((9528, 9559), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', (['size'], {'stack_num': '(2)'}), '(size, stack_num=2)\n', (9540, 9559), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((9571, 9589), 'tianshou.data.ListReplayBuffer', 'ListReplayBuffer', ([], {}), '()\n', (9587, 9589), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((9601, 9640), 'tianshou.data.PrioritizedReplayBuffer', 'PrioritizedReplayBuffer', (['size', '(0.6)', '(0.4)'], {}), '(size, 0.6, 0.4)\n', (9624, 9640), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((864, 889), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (877, 889), False, 'import pytest\n'), ((1638, 1663), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (1651, 1663), False, 'import pytest\n'), ((1715, 1749), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (1728, 1749), False, 'import pytest\n'), ((2751, 2905), 'numpy.array', 'np.array', (['[[0, 0, 0, 0], [1, 1, 1, 2], [1, 1, 2, 3], [1, 1, 2, 3], [4, 4, 4, 5], [4, \n 4, 5, 6], [4, 4, 5, 6], [7, 7, 7, 8], [7, 7, 8, 9], [7, 7, 8, 9]]'], {}), '([[0, 0, 0, 0], [1, 1, 1, 2], [1, 1, 2, 3], [1, 1, 2, 3], [4, 4, 4,\n 5], [4, 4, 5, 6], [4, 4, 5, 6], [7, 7, 7, 8], [7, 7, 8, 9], [7, 7, 8, 9]])\n', (2759, 2905), True, 'import numpy as np\n'), ((3026, 3180), 'numpy.array', 'np.array', (['[[0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 2], [1, 1, 2, 3], [4, 4, 4, 4], [4, \n 4, 4, 5], [4, 4, 5, 6], [7, 7, 7, 7], [7, 7, 7, 8], [7, 7, 8, 9]]'], {}), '([[0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 2], [1, 1, 2, 3], [4, 4, 4,\n 4], [4, 4, 4, 5], [4, 4, 5, 6], [7, 7, 7, 7], [7, 7, 7, 8], [7, 7, 8, 9]])\n', (3034, 3180), True, 'import numpy as np\n'), ((4365, 4390), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (4378, 4390), False, 'import pytest\n'), ((5900, 5925), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (5913, 5925), False, 'import pytest\n'), ((6086, 6115), 'numpy.random.randint', 'np.random.randint', (['actual_len'], {}), '(actual_len)\n', (6103, 6115), True, 'import numpy as np\n'), ((6132, 6148), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (6146, 6148), True, 'import numpy as np\n'), ((6617, 6653), 'numpy.random.choice', 'np.random.choice', (['actual_len'], {'size': '(4)'}), '(actual_len, size=4)\n', (6633, 6653), True, 'import numpy as np\n'), ((6670, 6687), 'numpy.random.rand', 'np.random.rand', (['(4)'], {}), '(4)\n', (6684, 6687), True, 'import numpy as np\n'), ((7210, 7247), 'numpy.random.choice', 'np.random.choice', (['actual_len'], {'size': '(64)'}), '(actual_len, size=64)\n', (7226, 7247), True, 'import numpy as np\n'), ((7264, 7282), 'numpy.random.rand', 'np.random.rand', (['(64)'], {}), '(64)\n', (7278, 7282), True, 'import numpy as np\n'), ((7782, 7803), 'numpy.arange', 'np.arange', (['actual_len'], {}), '(actual_len)\n', (7791, 7803), True, 'import numpy as np\n'), ((8079, 8100), 'numpy.arange', 'np.arange', (['actual_len'], {}), '(actual_len)\n', (8088, 8100), True, 'import numpy as np\n'), ((8308, 8320), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (8317, 8320), True, 'import numpy as np\n'), ((8469, 8498), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (8482, 8498), False, 'import pytest\n'), ((8674, 8695), 'numpy.arange', 'np.arange', (['actual_len'], {}), '(actual_len)\n', (8683, 8695), True, 'import numpy as np\n'), ((9000, 9020), 'numpy.random.rand', 'np.random.rand', (['size'], {}), '(size)\n', (9014, 9020), True, 'import numpy as np\n'), ((9036, 9053), 'tianshou.data.SegmentTree', 'SegmentTree', (['size'], {}), '(size)\n', (9047, 9053), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((9664, 9689), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9687, 9689), False, 'import torch\n'), ((10114, 10132), 'pickle.dumps', 'pickle.dumps', (['vbuf'], {}), '(vbuf)\n', (10126, 10132), False, 'import pickle\n'), ((10159, 10177), 'pickle.dumps', 'pickle.dumps', (['lbuf'], {}), '(lbuf)\n', (10171, 10177), False, 'import pickle\n'), ((10204, 10222), 'pickle.dumps', 'pickle.dumps', (['pbuf'], {}), '(pbuf)\n', (10216, 10222), False, 'import pickle\n'), ((10263, 10295), 'numpy.allclose', 'np.allclose', (['_vbuf.act', 'vbuf.act'], {}), '(_vbuf.act, vbuf.act)\n', (10274, 10295), True, 'import numpy as np\n'), ((10335, 10367), 'numpy.allclose', 'np.allclose', (['_lbuf.act', 'lbuf.act'], {}), '(_lbuf.act, lbuf.act)\n', (10346, 10367), True, 'import numpy as np\n'), ((10407, 10439), 'numpy.allclose', 'np.allclose', (['_pbuf.act', 'pbuf.act'], {}), '(_pbuf.act, pbuf.act)\n', (10418, 10439), True, 'import numpy as np\n'), ((10712, 10743), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', (['size'], {'stack_num': '(2)'}), '(size, stack_num=2)\n', (10724, 10743), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((10761, 10779), 'tianshou.data.ListReplayBuffer', 'ListReplayBuffer', ([], {}), '()\n', (10777, 10779), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((10804, 10843), 'tianshou.data.PrioritizedReplayBuffer', 'PrioritizedReplayBuffer', (['size', '(0.6)', '(0.4)'], {}), '(size, 0.6, 0.4)\n', (10827, 10843), False, 'from tianshou.data import Batch, SegmentTree, ReplayBuffer, ListReplayBuffer, PrioritizedReplayBuffer\n'), ((10937, 10962), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10960, 10962), False, 'import torch\n'), ((11466, 11498), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".hdf5"""'}), "(suffix='.hdf5')\n", (11482, 11498), False, 'import tempfile\n'), ((11507, 11518), 'os.close', 'os.close', (['f'], {}), '(f)\n', (11515, 11518), False, 'import os\n'), ((11785, 11829), 'numpy.allclose', 'np.allclose', (['_buffers[k].act', 'buffers[k].act'], {}), '(_buffers[k].act, buffers[k].act)\n', (11796, 11829), True, 'import numpy as np\n'), ((12020, 12071), 'numpy.all', 'np.all', (['(_buffers[k]._indices == buffers[k]._indices)'], {}), '(_buffers[k]._indices == buffers[k]._indices)\n', (12026, 12071), True, 'import numpy as np\n'), ((12271, 12338), 'numpy.all', 'np.all', (['(buffers[k][:].info.number.n == _buffers[k][:].info.number.n)'], {}), '(buffers[k][:].info.number.n == _buffers[k][:].info.number.n)\n', (12277, 12338), True, 'import numpy as np\n'), ((12367, 12428), 'numpy.all', 'np.all', (['(buffers[k][:].info.extra == _buffers[k][:].info.extra)'], {}), '(buffers[k][:].info.extra == _buffers[k][:].info.extra)\n', (12373, 12428), True, 'import numpy as np\n'), ((12483, 12498), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (12492, 12498), False, 'import os\n'), ((12625, 12659), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (12638, 12659), False, 'import pytest\n'), ((12669, 12687), 'tianshou.data.utils.converter.to_hdf5', 'to_hdf5', (['data', 'grp'], {}), '(data, grp)\n', (12676, 12687), False, 'from tianshou.data.utils.converter import to_hdf5\n'), ((12791, 12816), 'numpy.array', 'np.array', (['(lambda x: x * x)'], {}), '(lambda x: x * x)\n', (12799, 12816), True, 'import numpy as np\n'), ((12846, 12873), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (12859, 12873), False, 'import pytest\n'), ((12883, 12901), 'tianshou.data.utils.converter.to_hdf5', 'to_hdf5', (['data', 'grp'], {}), '(data, grp)\n', (12890, 12901), False, 'from tianshou.data.utils.converter import to_hdf5\n'), ((925, 944), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (933, 944), True, 'import numpy as np\n'), ((1407, 1421), 'numpy.array', 'np.array', (['None'], {}), '(None)\n', (1415, 1421), True, 'import numpy as np\n'), ((5183, 5207), 'numpy.abs', 'np.abs', (['(-data.weight / 2)'], {}), '(-data.weight / 2)\n', (5189, 5207), True, 'import numpy as np\n'), ((6849, 6878), 'numpy.random.randint', 'np.random.randint', (['actual_len'], {}), '(actual_len)\n', (6866, 6878), True, 'import numpy as np\n'), ((6899, 6942), 'numpy.random.randint', 'np.random.randint', (['(left + 1)', '(actual_len + 1)'], {}), '(left + 1, actual_len + 1)\n', (6916, 6942), True, 'import numpy as np\n'), ((7444, 7473), 'numpy.random.randint', 'np.random.randint', (['actual_len'], {}), '(actual_len)\n', (7461, 7473), True, 'import numpy as np\n'), ((7494, 7537), 'numpy.random.randint', 'np.random.randint', (['(left + 1)', '(actual_len + 1)'], {}), '(left + 1, actual_len + 1)\n', (7511, 7537), True, 'import numpy as np\n'), ((7856, 7872), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (7870, 7872), True, 'import numpy as np\n'), ((8404, 8448), 'numpy.array', 'np.array', (['[0, 0.1, 0.1 + 1e-06, 0.2 - 1e-06]'], {}), '([0, 0.1, 0.1 + 1e-06, 0.2 - 1e-06])\n', (8412, 8448), True, 'import numpy as np\n'), ((8748, 8764), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (8762, 8764), True, 'import numpy as np\n'), ((9067, 9082), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (9076, 9082), True, 'import numpy as np\n'), ((9351, 9404), 'timeit.timeit', 'timeit', (['sample_npbuf'], {'setup': 'sample_npbuf', 'number': '(1000)'}), '(sample_npbuf, setup=sample_npbuf, number=1000)\n', (9357, 9404), False, 'from timeit import timeit\n'), ((9428, 9479), 'timeit.timeit', 'timeit', (['sample_tree'], {'setup': 'sample_tree', 'number': '(1000)'}), '(sample_tree, setup=sample_tree, number=1000)\n', (9434, 9479), False, 'from timeit import timeit\n'), ((9711, 9730), 'torch.tensor', 'torch.tensor', (['[1.0]'], {}), '([1.0])\n', (9723, 9730), False, 'import torch\n'), ((10984, 11003), 'torch.tensor', 'torch.tensor', (['[1.0]'], {}), '([1.0])\n', (10996, 11003), False, 'import torch\n'), ((4762, 4779), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (4777, 4779), True, 'import numpy as np\n'), ((5368, 5381), 'numpy.array', 'np.array', (['[i]'], {}), '([i])\n', (5376, 5381), True, 'import numpy as np\n'), ((6393, 6414), 'numpy.allclose', 'np.allclose', (['ref', 'out'], {}), '(ref, out)\n', (6404, 6414), True, 'import numpy as np\n'), ((9240, 9259), 'numpy.random.rand', 'np.random.rand', (['bsz'], {}), '(bsz)\n', (9254, 9259), True, 'import numpy as np\n'), ((10053, 10069), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (10067, 10069), True, 'import numpy as np\n'), ((11358, 11374), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (11372, 11374), True, 'import numpy as np\n'), ((1930, 1955), 'numpy.array', 'np.array', (['[i, 1, 1, 0, 0]'], {}), '([i, 1, 1, 0, 0])\n', (1938, 1955), True, 'import numpy as np\n'), ((1987, 2016), 'numpy.array', 'np.array', (['[i + 4, 0, 1, 0, 0]'], {}), '([i + 4, 0, 1, 0, 0])\n', (1995, 2016), True, 'import numpy as np\n'), ((11087, 11100), 'numpy.array', 'np.array', (['[i]'], {}), '([i])\n', (11095, 11100), True, 'import numpy as np\n'), ((9797, 9810), 'numpy.array', 'np.array', (['[i]'], {}), '([i])\n', (9805, 9810), True, 'import numpy as np\n'), ((9893, 9906), 'numpy.array', 'np.array', (['[i]'], {}), '([i])\n', (9901, 9906), True, 'import numpy as np\n'), ((9989, 10002), 'numpy.array', 'np.array', (['[i]'], {}), '([i])\n', (9997, 10002), True, 'import numpy as np\n')]
|
import pickle
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from src.data_process_utils import create_folder, standardize_smi, get_encoded_smi
from src.CONSTS import BATCH_SIZE_PRED
def get_train_val_test_data():
create_folder('predictor_data/train_data/')
create_folder('predictor_data/test_data/')
df_data_1 = pd.read_csv('predictor_data/data_clean_a2d.csv')
df_data_2 = pd.read_csv('predictor_data/data_clean_kop.csv')
df_data = pd.concat([df_data_1, df_data_2])
df_data.loc[:, 'Data'] = df_data.SMILES.map(standardize_smi)
# train, val, test split
df_train, df_test \
= train_test_split(df_data, test_size=0.1, random_state=43)
df_train, df_val \
= train_test_split(df_train, test_size=0.1, random_state=43)
df_train.to_csv('predictor_data/train_data/df_train.csv', index=False)
df_test.to_csv('predictor_data/test_data/df_test.csv', index=False)
df_val.to_csv('predictor_data/test_data/df_val.csv', index=False)
max_y = np.quantile(df_train.pCHEMBL.values, 0.98)
min_y = np.quantile(df_train.pCHEMBL.values, 0.02)
with open('predictor_data/train_data/' + 'y_max_min.pkl', 'wb') as f:
pickle.dump((min_y, max_y), f)
def get_val_data():
df_val = pd.read_csv('predictor_data/test_data/df_val.csv')
with open('predictor_data/train_data/y_max_min.pkl', 'rb') as handle:
y_min, y_max = pickle.load(handle)
x = []
y = []
for _, row in df_val.iterrows():
x.append(get_encoded_smi(row.Data))
_y = (row.pCHEMBL - y_min) / (y_max - y_min)
y.append(_y)
_data = (np.vstack(x), np.vstack(y))
with open('predictor_data/test_data/' + 'Xy_val.pkl', 'wb') as f:
pickle.dump(_data, f)
def data_iterator_train():
df_train = pd.read_csv('predictor_data/train_data/df_train.csv')
with open('predictor_data/train_data/y_max_min.pkl', 'rb') as handle:
y_min, y_max = pickle.load(handle)
while True:
df_train = df_train.sample(frac=1).reset_index(drop=True)
x = []
y = []
for _, row in df_train.iterrows():
x.append(get_encoded_smi(row.Data))
_y = (row.pCHEMBL - y_min) / (y_max - y_min)
y.append(_y)
if len(x) >= BATCH_SIZE_PRED:
yield (np.vstack(x), np.vstack(y))
x = []
y = []
if x:
yield (np.vstack(x), np.vstack(y))
x = []
y = []
def data_iterator_test(test_df_path):
df_test = pd.read_csv(test_df_path)
with open('predictor_data/train_data/y_max_min.pkl', 'rb') as handle:
y_min, y_max = pickle.load(handle)
x = []
y = []
for _, row in df_test.iterrows():
x.append(get_encoded_smi(row.Data))
_y = (row.pCHEMBL - y_min) / (y_max - y_min)
y.append(_y)
if len(x) >= BATCH_SIZE_PRED:
yield (np.vstack(x), np.vstack(y))
x = []
y = []
if x:
yield (np.vstack(x), np.vstack(y))
x = []
y = []
if __name__ == "__main__":
get_train_val_test_data()
get_val_data()
# df_train = pd.read_csv('predictor_data/train_data/df_train.csv')
# for x, y in data_iterator_test('predictor_data/test_data/df_test.csv'):
# breakpoint()
# print(x.shape)
# # for x, y in data_iterator_train():
# # print(x.shape)
|
[
"src.data_process_utils.get_encoded_smi",
"pickle.dump",
"numpy.quantile",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"src.data_process_utils.create_folder",
"pickle.load",
"pandas.concat",
"numpy.vstack"
] |
[((265, 308), 'src.data_process_utils.create_folder', 'create_folder', (['"""predictor_data/train_data/"""'], {}), "('predictor_data/train_data/')\n", (278, 308), False, 'from src.data_process_utils import create_folder, standardize_smi, get_encoded_smi\n'), ((313, 355), 'src.data_process_utils.create_folder', 'create_folder', (['"""predictor_data/test_data/"""'], {}), "('predictor_data/test_data/')\n", (326, 355), False, 'from src.data_process_utils import create_folder, standardize_smi, get_encoded_smi\n'), ((372, 420), 'pandas.read_csv', 'pd.read_csv', (['"""predictor_data/data_clean_a2d.csv"""'], {}), "('predictor_data/data_clean_a2d.csv')\n", (383, 420), True, 'import pandas as pd\n'), ((437, 485), 'pandas.read_csv', 'pd.read_csv', (['"""predictor_data/data_clean_kop.csv"""'], {}), "('predictor_data/data_clean_kop.csv')\n", (448, 485), True, 'import pandas as pd\n'), ((500, 533), 'pandas.concat', 'pd.concat', (['[df_data_1, df_data_2]'], {}), '([df_data_1, df_data_2])\n', (509, 533), True, 'import pandas as pd\n'), ((663, 720), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df_data'], {'test_size': '(0.1)', 'random_state': '(43)'}), '(df_data, test_size=0.1, random_state=43)\n', (679, 720), False, 'from sklearn.model_selection import train_test_split\n'), ((755, 813), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df_train'], {'test_size': '(0.1)', 'random_state': '(43)'}), '(df_train, test_size=0.1, random_state=43)\n', (771, 813), False, 'from sklearn.model_selection import train_test_split\n'), ((1044, 1086), 'numpy.quantile', 'np.quantile', (['df_train.pCHEMBL.values', '(0.98)'], {}), '(df_train.pCHEMBL.values, 0.98)\n', (1055, 1086), True, 'import numpy as np\n'), ((1099, 1141), 'numpy.quantile', 'np.quantile', (['df_train.pCHEMBL.values', '(0.02)'], {}), '(df_train.pCHEMBL.values, 0.02)\n', (1110, 1141), True, 'import numpy as np\n'), ((1290, 1340), 'pandas.read_csv', 'pd.read_csv', (['"""predictor_data/test_data/df_val.csv"""'], {}), "('predictor_data/test_data/df_val.csv')\n", (1301, 1340), True, 'import pandas as pd\n'), ((1821, 1874), 'pandas.read_csv', 'pd.read_csv', (['"""predictor_data/train_data/df_train.csv"""'], {}), "('predictor_data/train_data/df_train.csv')\n", (1832, 1874), True, 'import pandas as pd\n'), ((2570, 2595), 'pandas.read_csv', 'pd.read_csv', (['test_df_path'], {}), '(test_df_path)\n', (2581, 2595), True, 'import pandas as pd\n'), ((1224, 1254), 'pickle.dump', 'pickle.dump', (['(min_y, max_y)', 'f'], {}), '((min_y, max_y), f)\n', (1235, 1254), False, 'import pickle\n'), ((1438, 1457), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (1449, 1457), False, 'import pickle\n'), ((1649, 1661), 'numpy.vstack', 'np.vstack', (['x'], {}), '(x)\n', (1658, 1661), True, 'import numpy as np\n'), ((1663, 1675), 'numpy.vstack', 'np.vstack', (['y'], {}), '(y)\n', (1672, 1675), True, 'import numpy as np\n'), ((1755, 1776), 'pickle.dump', 'pickle.dump', (['_data', 'f'], {}), '(_data, f)\n', (1766, 1776), False, 'import pickle\n'), ((1972, 1991), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (1983, 1991), False, 'import pickle\n'), ((2693, 2712), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (2704, 2712), False, 'import pickle\n'), ((1534, 1559), 'src.data_process_utils.get_encoded_smi', 'get_encoded_smi', (['row.Data'], {}), '(row.Data)\n', (1549, 1559), False, 'from src.data_process_utils import create_folder, standardize_smi, get_encoded_smi\n'), ((2790, 2815), 'src.data_process_utils.get_encoded_smi', 'get_encoded_smi', (['row.Data'], {}), '(row.Data)\n', (2805, 2815), False, 'from src.data_process_utils import create_folder, standardize_smi, get_encoded_smi\n'), ((2168, 2193), 'src.data_process_utils.get_encoded_smi', 'get_encoded_smi', (['row.Data'], {}), '(row.Data)\n', (2183, 2193), False, 'from src.data_process_utils import create_folder, standardize_smi, get_encoded_smi\n'), ((3040, 3052), 'numpy.vstack', 'np.vstack', (['x'], {}), '(x)\n', (3049, 3052), True, 'import numpy as np\n'), ((3054, 3066), 'numpy.vstack', 'np.vstack', (['y'], {}), '(y)\n', (3063, 3066), True, 'import numpy as np\n'), ((2450, 2462), 'numpy.vstack', 'np.vstack', (['x'], {}), '(x)\n', (2459, 2462), True, 'import numpy as np\n'), ((2464, 2476), 'numpy.vstack', 'np.vstack', (['y'], {}), '(y)\n', (2473, 2476), True, 'import numpy as np\n'), ((2948, 2960), 'numpy.vstack', 'np.vstack', (['x'], {}), '(x)\n', (2957, 2960), True, 'import numpy as np\n'), ((2962, 2974), 'numpy.vstack', 'np.vstack', (['y'], {}), '(y)\n', (2971, 2974), True, 'import numpy as np\n'), ((2342, 2354), 'numpy.vstack', 'np.vstack', (['x'], {}), '(x)\n', (2351, 2354), True, 'import numpy as np\n'), ((2356, 2368), 'numpy.vstack', 'np.vstack', (['y'], {}), '(y)\n', (2365, 2368), True, 'import numpy as np\n')]
|
################
## adaline.py ##
################
# original implementation
# <NAME>, Python Machine Learning, 3rd Edition
#############
## imports ##
#############
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import check_X_y, check_is_fitted
from sklearn.utils.multiclass import unique_labels
from rktools.monitors import ProgressBar
###############################################################################
## AdalineGD ##
###############################################################################
class AdalineGD(BaseEstimator, ClassifierMixin):
"""
The ADAptive LInear NEuron classifier.
Parameters
----------
* lr: float
Learning rate (between 0.0 and 1.0)
* n_epochs: int
Passes over the training dataset.
* random_state: int
Random number generator seed for random weight initialization.
Attributes
-----------
* w_: 1d-array
Weights after fitting.
* cost_: list
Sum-of-squares cost function value in each epoch. Indeed, now
the convergence criteria is no more the error at each epoch; but
the value of the cost function J.
"""
#################
## __init__() ##
#################
# TODO Pass an potential logger as paramater
def __init__(self, lr=0.01, n_epochs=50, random_state=1):
self.lr = lr
self.n_epochs = n_epochs
self.random_state = random_state
#####################
## init_weights() ##
#####################
def init_weights(self, n_features):
"""
Initialize the weight coefficients
"""
rgen = np.random.RandomState(self.random_state)
self.w_ = rgen.normal(loc=0.0, scale=0.01, size=1 + n_features)
############
## fit() ##
############
def fit(self, X, y):
"""
Fit training data.
Parameters
----------
* X : {array-like}, shape = [n_examples, n_features]
Training vectors, where n_examples is the number of examples and
n_features is the number of features.
* y : array-like, shape = [n_examples]
Target values.
Returns
-------
* self : object
"""
# check matrices
self.X_, self.y_ = check_X_y(X, y)
# Store the classes seen during fit
self.classes_ = unique_labels(y)
# The algorithm
self.init_weights(self.X_.shape[1])
self.cost_ = []
progress_bar = ProgressBar(max_value = self.n_epochs, desc="AdalineGD Epoch:")
for i in range(self.n_epochs):
net_input = self.net_input(X)
output = self.activation(net_input) # here, no effect
errors = (y - output)
# At each epoch, the coefficients are updated using
# the whole training dataset X, instead of one sample x_i
self.w_[1:] += self.lr * X.T.dot(errors)
self.w_[0] += self.lr * errors.sum()
# cost = J(W) = 1/2 * SSE
# with SSE = sum of error^2
cost = (errors**2).sum() / 2.0
self.cost_.append(cost)
progress_bar.update(1)
# end for
progress_bar.close()
return self
##################
## net_input() ##
##################
def net_input(self, X):
"""Calculate net input"""
return np.dot(X, self.w_[1:]) + self.w_[0]
##################
## activation() ##
##################
def activation(self, X):
"""Compute linear activation"""
# Please note that the "activation" method has no effect
# in the code since it is simply an identity function. We
# could write `output = self.net_input(X)` directly instead.
# The purpose of the activation is more conceptual, i.e.,
# in the case of logistic regression
# we could change it to a sigmoid function to implement a logistic regression classifier.
return X
###############
## predict() ##
###############
def predict(self, X):
"""Return class label after unit step"""
# Raise an error if not fitted
check_is_fitted(self)
return np.where(self.activation(self.net_input(X)) >= 0.0, 1, -1)
# End AdalineGD
################################################################################
## AdalineSGD ##
################################################################################
class AdalineSGD(BaseEstimator, ClassifierMixin):
"""
ADAptive LInear NEuron classifier with SGD.
Parameters
------------
* lr : float
Learning rate (between 0.0 and 1.0)
* n_epochs : int
Passes over the training dataset.
* shuffle : bool (default: True)
Shuffles training data every epoch if True to prevent cycles.
* random_state : int
Random number generator seed for random weight initialization.
Attributes
-----------
* w_ : 1d-array
Weights after fitting.
* cost_ : list
Sum-of-squares cost function value averaged over all training examples in each epoch.
"""
################
## __init__() ##
################
def __init__(self, lr=0.01, n_epochs=10, shuffle=True, random_state=1):
self.lr = lr
self.n_epochs = n_epochs
self.w_initialized = False
self.shuffle = shuffle
self.random_state = random_state
###########################
## init_weights() ##
###########################
def init_weights(self, n_features):
"""
Initialize weights to small random numbers
"""
self.rgen = np.random.RandomState(self.random_state)
self.w_ = self.rgen.normal(loc=0.0, scale=0.01, size=1 + n_features)
self.w_initialized = True
############
## fit() ##
############
def fit(self, X, y):
"""
Fit training data.
Parameters
----------
X : {array-like}, shape = [n_examples, n_features]
Training vectors, where n_examples is the number of examples and
n_features is the number of features.
y : array-like, shape = [n_examples]
Target values.
Returns
-------
self : object
"""
# check matrices
self.X_, self.y_ = check_X_y(X, y)
# Store the classes seen during fit
self.classes_ = unique_labels(y)
# The algorithm
self.init_weights(X.shape[1])
self.cost_ = []
progress_bar = ProgressBar(max_value = self.n_epochs, desc="AdalineSGD Epoch:")
for _ in range(self.n_epochs):
if self.shuffle:
X, y = self._shuffle(X, y)
cost = []
for xi, target in zip(X, y):
cost.append(self._update_weights(xi, target))
avg_cost = sum(cost) / len(y)
self.cost_.append(avg_cost)
progress_bar.update(1)
# end for
progress_bar.close()
return self
###################
## partial_fit() ##
###################
def partial_fit(self, X, y):
"""
Fit training data without reinitializing the weights
"""
if not self.w_initialized:
self.init_weights(X.shape[1])
if y.ravel().shape[0] > 1:
for xi, target in zip(X, y):
self._update_weights(xi, target)
else:
self._update_weights(X, y)
return self
################
## _shuffle() ##
################
def _shuffle(self, X, y):
"""Shuffle training data"""
r = self.rgen.permutation(len(y))
return X[r], y[r]
#######################
## _update_weights() ##
#######################
def _update_weights(self, xi, target):
"""Apply Adaline learning rule to update the weights"""
output = self.activation(self.net_input(xi))
error = (target - output)
self.w_[1:] += self.lr * xi.dot(error)
self.w_[0] += self.lr * error
cost = 0.5 * error**2
return cost
def net_input(self, X):
"""Calculate net input"""
return np.dot(X, self.w_[1:]) + self.w_[0]
def activation(self, X):
"""Compute linear activation"""
return X
###############
## predict() ##
###############
def predict(self, X):
"""
Return class label after unit step
"""
check_is_fitted(self)
return np.where(self.activation(self.net_input(X)) >= 0.0, 1, -1)
# End of AdalineSGD
|
[
"rktools.monitors.ProgressBar",
"sklearn.utils.validation.check_X_y",
"numpy.random.RandomState",
"sklearn.utils.validation.check_is_fitted",
"sklearn.utils.multiclass.unique_labels",
"numpy.dot"
] |
[((1768, 1808), 'numpy.random.RandomState', 'np.random.RandomState', (['self.random_state'], {}), '(self.random_state)\n', (1789, 1808), True, 'import numpy as np\n'), ((2423, 2438), 'sklearn.utils.validation.check_X_y', 'check_X_y', (['X', 'y'], {}), '(X, y)\n', (2432, 2438), False, 'from sklearn.utils.validation import check_X_y, check_is_fitted\n'), ((2508, 2524), 'sklearn.utils.multiclass.unique_labels', 'unique_labels', (['y'], {}), '(y)\n', (2521, 2524), False, 'from sklearn.utils.multiclass import unique_labels\n'), ((2644, 2705), 'rktools.monitors.ProgressBar', 'ProgressBar', ([], {'max_value': 'self.n_epochs', 'desc': '"""AdalineGD Epoch:"""'}), "(max_value=self.n_epochs, desc='AdalineGD Epoch:')\n", (2655, 2705), False, 'from rktools.monitors import ProgressBar\n'), ((4335, 4356), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (4350, 4356), False, 'from sklearn.utils.validation import check_X_y, check_is_fitted\n'), ((5894, 5934), 'numpy.random.RandomState', 'np.random.RandomState', (['self.random_state'], {}), '(self.random_state)\n', (5915, 5934), True, 'import numpy as np\n'), ((6583, 6598), 'sklearn.utils.validation.check_X_y', 'check_X_y', (['X', 'y'], {}), '(X, y)\n', (6592, 6598), False, 'from sklearn.utils.validation import check_X_y, check_is_fitted\n'), ((6668, 6684), 'sklearn.utils.multiclass.unique_labels', 'unique_labels', (['y'], {}), '(y)\n', (6681, 6684), False, 'from sklearn.utils.multiclass import unique_labels\n'), ((6806, 6868), 'rktools.monitors.ProgressBar', 'ProgressBar', ([], {'max_value': 'self.n_epochs', 'desc': '"""AdalineSGD Epoch:"""'}), "(max_value=self.n_epochs, desc='AdalineSGD Epoch:')\n", (6817, 6868), False, 'from rktools.monitors import ProgressBar\n'), ((8777, 8798), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (8792, 8798), False, 'from sklearn.utils.validation import check_X_y, check_is_fitted\n'), ((3547, 3569), 'numpy.dot', 'np.dot', (['X', 'self.w_[1:]'], {}), '(X, self.w_[1:])\n', (3553, 3569), True, 'import numpy as np\n'), ((8481, 8503), 'numpy.dot', 'np.dot', (['X', 'self.w_[1:]'], {}), '(X, self.w_[1:])\n', (8487, 8503), True, 'import numpy as np\n')]
|
import numpy as np
from odeintw import odeintw
from epidemioptim.environments.models.base_model import BaseModel
from epidemioptim.utils import *
PATH_TO_DATA = get_repo_path() + '/data/jane_model_data/ScenarioPlanFranceOne16.xlsx'
PATH_TO_HOME_MATRIX = get_repo_path() + '/data/jane_model_data/contactHome.txt'
PATH_TO_SCHOOL_MATRIX = get_repo_path() + '/data/jane_model_data/contactSchool.txt'
PATH_TO_WORK_MATRIX = get_repo_path() + '/data/jane_model_data/contactWork.txt'
PATH_TO_OTHER_MATRIX = get_repo_path() + '/data/jane_model_data/contactOtherPlaces.txt'
PATH_TO_COMORBIDITY_MATRIX = get_repo_path() + '/data/jane_model_data/coMorbidity.txt'
# ODE model
def vaccination_model(y: tuple,
t: float,
A: tuple,
alpha: tuple,
beta: tuple,
c: tuple,
delta: tuple,
epsilon: float,
gamma: tuple,
kappa: tuple,
nu: float,
omega: tuple,
p1: tuple,
p2: tuple,
p3: tuple,
rho: float,
sigma: float,
sigma2: float
):
"""
Parameters
----------
y: tuple
y = [S1, S2, S3, S4, E21, E22, E23, E31, E32, E33, E41, E42, E43, V11, V12, V13, V14, V21, V22, V23, V24, I2, I3, I4]
Si: # susceptible individuals with i level of infectivity
E2i: # individuals in mild latent state
E3i: # individuals in moderate latent state
E4i: # individuals in severe latent state
Ii: # symptomatic infected individuals with i level of infectivity
V1i: # vaccinated people with one dose, i being the immunity level
V2i: # vaccinated people with two doses, i being the immunity level
t: int
Timestep.
p1: tuple
Probability to go to mild class for an age group.
p2: tuple
Probability to go to moderate class for an age group.
p3: tuple
Probability to go to severe class for an age group.
alpha: tuple
Susceptibilty of individuals from Sin (i immunity status, n age group).
kappa: tuple
Rates of progress through the pre-infectious period of infection.
gamma: tuple
Recovery rate of infected individuals from Ijm (j immunity status, m age group).
rho: float
Vaccination efficacy for the first dose.
omega: tuple
Waning rate of immunity of individuals from Sin (i immunity status, n age group).
delta: tuple
Disease-induced mortality rate of infected individuals from Ijm (j immunity status, m age group).
A: tuple
Per capita activity counts of individuals in age group n
c: tuple
Mixing matrix between individuals in age group a and age groupe n, modified given mitigation, strategy, PPE,
social distancing, hand washing compliance (k-value)
sigma: float
Vaccination rate.
Returns
-------
tuple
Next states.
"""
origin = y.T
S1, S2, S3, S4 = origin[0], origin[1], origin[2], origin[3]
E21, E22, E23 = origin[4], origin[5], origin[6]
E31, E32, E33 = origin[7], origin[8], origin[9]
E41, E42, E43 = origin[10], origin[11], origin[12]
V11, V21, V31, V41 = origin[13], origin[14], origin[15], origin[16]
V12, V22, V32, V42 = origin[17], origin[18], origin[19], origin[20]
I2, I3, I4 = origin[21], origin[22], origin[23]
# Infect calculation
T = S1 + S2 + S3 + S4 + E21 + E22 + E23 + E31 + E32 + E33 + E41 + E42 + E43 + V11 + V21 + V31 + V41 + V12 + V22 + V32 + V42 + I2 + I3 + I4
# VOC and infectivity calculation
Xm = sum(np.multiply((beta), np.array((I2,I3,I4)).T).T)
Ym = np.divide(Xm, T)
infect = np.dot(np.array(c).T, Ym)
# Protection from severe disease (new qq)
qq = 0.3
pv2 = [(1-0.5*qq)*p2[1], (1-qq)*p2[2]+1/2*qq*p2[1], qq*p2[2]]
pv3 = [0*p2[1], (1-qq)*p3[2]+1/2*qq*p3[1], qq*p3[2]]
pv2 = np.array(pv2)
pv3 = np.array(pv3)
# Susceptible compartments
dS1dt = - sum(p1)*alpha[0]*A[0]*S1*infect + omega[1]*S2 - sigma*rho*S1 + omega[1]*V11
dS2dt = - sum(p2)*alpha[1]*A[1]*S2*infect + omega[2]*S3 - omega[1]*S2 - sigma*rho*S2 + gamma[1]*I2 + omega[2]*V21
dS3dt = - (p3[1]+p3[2])*alpha[2]*A[2]*S3*infect + omega[3]*S4 - omega[2]*S3 - sigma*rho*S3 + gamma[2]*I3 + omega[3]*(V31+V41+V12+V22+V32+V42)
dS4dt = - omega[3]*S4 - sigma*rho*S4 + gamma[3]*I4
# Exposed compartments
# To I2
dE21dt = p1[0]*alpha[0]*A[0]*S1*infect + p2[0]*alpha[1]*A[1]*S2*infect + pv2[0]*epsilon*alpha[1]*A[1]*V11*infect - kappa[1]*E21
dE22dt = kappa[1]*E21 - kappa[2]*E22
dE23dt = kappa[2]*E22 - kappa[3]*E23
# To I3
dE31dt = p1[1]*alpha[0]*A[0]*S1*infect + p2[1]*alpha[1]*A[1]*S2*infect + p3[1]*alpha[2]*A[2]*S3*infect + pv2[1]*epsilon*alpha[1]*A[1]*V11*infect + pv3[1]*epsilon*alpha[2]*A[2]*V21*infect - kappa[1]*E31
dE32dt = kappa[1]*E31 - kappa[2]*E32
dE33dt = kappa[2]*E32 - kappa[3]*E33
# To I4
dE41dt = p1[2]*alpha[0]*A[0]*S1*infect + p2[2]*alpha[1]*A[1]*S2*infect + p3[2]*alpha[2]*A[2]*S3*infect + pv2[2]*epsilon*alpha[1]*A[1]*V11*infect + pv3[2]*epsilon*alpha[2]*A[2]*V21*infect - kappa[1]*E41
dE42dt = kappa[1]*E41 - kappa[2]*E42
dE43dt = kappa[2]*E42 - kappa[3]*E43
# Vaccinated compartments
dV11dt = sigma*rho*S1 - sigma2*rho*V11 - sum(pv2)*epsilon*alpha[1]*A[1]*V11*infect - omega[1]*V11
dV21dt = sigma*rho*S2 - sigma2*rho*V21 - sum(pv3)*epsilon*alpha[2]*A[2]*V21*infect - omega[2]*V21
dV31dt = sigma*rho*S3 - sigma2*rho*V31 - omega[3]*V31
dV41dt = sigma*rho*S4 - sigma2*rho*V41 - omega[3]*V41
dV12dt = sigma2*rho*V11 - omega[3]*V12
dV22dt = sigma2*rho*V21 - omega[3]*V22
dV32dt = sigma2*rho*V31 - omega[3]*V32
dV42dt = sigma2*rho*V41 - omega[3]*V42
# From S to V
dCV11dt = sigma*rho*S1
dCV21dt = sigma*rho*S2
dCV31dt = sigma*rho*S3
dCV41dt = sigma*rho*S4
# From V1 to V2
dCV12dt = sigma2*rho*V11
dCV22dt = sigma2*rho*V21
dCV32dt = sigma2*rho*V31
dCV42dt = sigma2*rho*V41
# Infected compartments
dI2dt = kappa[3]*E23 - delta[1]*I2 - gamma[1]*I2
dI3dt = kappa[3]*E33 - delta[2]*I3 - gamma[2]*I3
dI4dt = kappa[3]*E43 - delta[3]*I4 - gamma[3]*I4
dydt = np.array((dS1dt, dS2dt, dS3dt, dS4dt, dE21dt, dE22dt, dE23dt, dE31dt, dE32dt, dE33dt, dE41dt, dE42dt, dE43dt, dV11dt, dV21dt, dV31dt, dV41dt, dV12dt, dV22dt, dV32dt, dV42dt, dI2dt, dI3dt, dI4dt, dCV11dt, dCV21dt, dCV31dt, dCV41dt, dCV12dt, dCV22dt, dCV32dt, dCV42dt))
return dydt.T
class HeffernanOdeModel16(BaseModel):
def __init__(self,
stochastic=False,
range_delay=None
):
# Groups and raw data
self._age_groups = ['0-4', '5-9', '10-14', '15-19', '20-24', '25-29', '30-34', '35-39', '40-44', '45-49', '50-54', '55-59', '60-64', '65-69', '70-74', '75+']
self._pop_size = pd.read_excel(PATH_TO_DATA, sheet_name='population', skiprows=3, usecols=(2,2))['Unnamed: 2']
self.pop_size = dict(zip(self._age_groups, (self._pop_size)))
self.step_list = [0,71,73,76,91,121,152,153,173,182,185,201,213,239,244,274,290,295,303,305,335,349,353,366,369,370,377,381,384,391,397,398,
402,404,405,409,412,418,419,425,426,431,433,440,447,454,456,459,461,465,468,472,475,481,482,486,488,489,494,496,497,501,503,
510,517,524,531,546,552,578,609,639,661,670,677,717,731,762,768,775,782,789,790,796,821]
# Matrices
self.p1 = get_text_file_data(PATH_TO_COMORBIDITY_MATRIX)
self.p2 = get_text_file_data(PATH_TO_COMORBIDITY_MATRIX)
self.p3 = [[0] + sub[1:] for sub in self.p1]
self.work = get_text_file_data(PATH_TO_WORK_MATRIX)
self.other = get_text_file_data(PATH_TO_OTHER_MATRIX)
self.home = get_text_file_data(PATH_TO_HOME_MATRIX)
self.school = get_text_file_data(PATH_TO_SCHOOL_MATRIX)
self.perturbations_matrices = get_perturbations_matrices(PATH_TO_DATA)
self.contact_modifiers = get_contact_modifiers(PATH_TO_DATA)
self.transition_matrices = get_transition_matrices(self.pop_size, self.home, self.school, self.work, self.other)
# Vaccination data
self.whovaccinated = get_target_population()
self._vaccination_coverage = get_coverage(PATH_TO_DATA)
self.vaccination_coverage = self._compute_delta_coverage()
self.active_vaccination = vaccination_active(PATH_TO_DATA)
self.mitigation_windows = mitigation_time(self.step_list)
self.number_doses = [1679218,3008288,6026744,12000000,12000000,12000000,12000000,12000000,12000000,0,0]
self.coverage_threshold = [0, 0, 0, 0, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.8, 0.8, 0.8, 0.8, 0.8]
self.dCV1 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.dCV2 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.vacStep = 0
self.newstep = 0
self.nbrConf = 0
# Tracking variables
self.step = 0
self.t = 0
self.k = 1
self.stochastic = stochastic
self._all_internal_params_distribs = dict()
self._all_initial_state_distribs = dict()
# Initialize distributions of parameters and initial conditions for all regions
self.define_params_and_initial_state_distributions()
# Sample initial conditions and initial model parameters
internal_params_labels = ['A', 'alpha', 'beta', 'c', 'delta', 'epsilon', 'gamma', 'kappa', 'nu', 'omega', 'p1', 'p2', 'p3', 'rho', 'sigma', 'sigma2']
# Define ODE model
self.internal_model = vaccination_model
super().__init__(internal_states_labels=['S1', 'S2', 'S3', 'S4', 'E21', 'E22', 'E23', 'E31', 'E32', 'E33', 'E41', 'E42', 'E43',
'V11', 'V21', 'V31', 'V41', 'V12', 'V22', 'V32', 'V42', 'I2', 'I3', 'I4', 'CV11', 'CV21', 'CV31', 'CV41', 'CV12', 'CV22', 'CV32', 'CV42'],
internal_params_labels=internal_params_labels,
stochastic=stochastic,
range_delay=range_delay)
def define_params_and_initial_state_distributions(self):
"""
Extract and define distributions of parameters for all age groups
"""
for i in self._age_groups:
self._all_internal_params_distribs[i] = dict(A=np.array(calculate_A_and_c(0, 1, self.contact_modifiers, self.perturbations_matrices, self.transition_matrices)[0]),
alpha=np.array(duplicate_data([1, 2/3, 1/3, 0], 16)).T,
beta=np.array(duplicate_data([0.04, 0.08, 0.008], 16)),
c=calculate_A_and_c(0, 1, self.contact_modifiers, self.perturbations_matrices, self.transition_matrices)[1],
delta=np.array([[0.0, 0.0, 0.0, 0.00001],
[0.0, 0.0, 0.0, 0.00001],
[0.0, 0.0, 0.0, 0.00001],
[0.0, 0.0, 0.0, 0.00001],
[0.0, 0.0, 0.0, 0.00005],
[0.0, 0.0, 0.0, 0.00005],
[0.0, 0.0, 0.0, 0.0002],
[0.0, 0.0, 0.0, 0.0002],
[0.0, 0.0, 0.0, 0.0005],
[0.0, 0.0, 0.0, 0.0005],
[0.0, 0.0, 0.0, 0.002],
[0.0, 0.0, 0.0, 0.002],
[0.0, 0.0, 0.0, 0.007],
[0.0, 0.0, 0.0, 0.007],
[0.0, 0.0, 0.0, 0.019],
[0.0, 0.0, 0.0, 0.083]]).T,
epsilon=1-0.559,
gamma=np.array(duplicate_data([0, 0.2, 0.1, 1/15], 16)).T,
kappa=np.array(duplicate_data([0, 1/1.5, 1/1.5, 1/1.5], 16)).T,
nu=0,
omega=np.array(duplicate_data([0, 1/365, 1/365, 1/365], 16)).T,
p1=np.array(self.p1).T,
p2=np.array(self.p2).T,
p3=np.array(self.p3).T,
rho=0.894,
sigma=np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
sigma2=np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
)
self._all_initial_state_distribs[i] = dict(S20=DiracDist(params=0, stochastic=self.stochastic),
S30=DiracDist(params=0, stochastic=self.stochastic),
S40=DiracDist(params=0, stochastic=self.stochastic),
E210=DiracDist(params=0, stochastic=self.stochastic),
E220=DiracDist(params=0, stochastic=self.stochastic),
E230=DiracDist(params=0, stochastic=self.stochastic),
E310=DiracDist(params=0, stochastic=self.stochastic),
E320=DiracDist(params=0, stochastic=self.stochastic),
E330=DiracDist(params=0, stochastic=self.stochastic),
E410=DiracDist(params=0, stochastic=self.stochastic),
E420=DiracDist(params=0, stochastic=self.stochastic),
E430=DiracDist(params=0, stochastic=self.stochastic),
V110=DiracDist(params=0, stochastic=self.stochastic),
V210=DiracDist(params=0, stochastic=self.stochastic),
V310=DiracDist(params=0, stochastic=self.stochastic),
V410=DiracDist(params=0, stochastic=self.stochastic),
V120=DiracDist(params=0, stochastic=self.stochastic),
V220=DiracDist(params=0, stochastic=self.stochastic),
V320=DiracDist(params=0, stochastic=self.stochastic),
V420=DiracDist(params=0, stochastic=self.stochastic),
I20=DiracDist(params=0, stochastic=self.stochastic),
I30=DiracDist(params=0, stochastic=self.stochastic),
I40=DiracDist(params=0, stochastic=self.stochastic),
CV110=DiracDist(params=0, stochastic=self.stochastic),
CV210=DiracDist(params=0, stochastic=self.stochastic),
CV310=DiracDist(params=0, stochastic=self.stochastic),
CV410=DiracDist(params=0, stochastic=self.stochastic),
CV120=DiracDist(params=0, stochastic=self.stochastic),
CV220=DiracDist(params=0, stochastic=self.stochastic),
CV320=DiracDist(params=0, stochastic=self.stochastic),
CV420=DiracDist(params=0, stochastic=self.stochastic))
def reset(self, delay=None) -> None:
"""
Resets the model parameters, and state, add delay.
Parameters
----------
delay: int, optional
Number of days the model should be run for before the start of the episode.
Default is 0.
"""
self._sample_model_params()
self._sample_initial_state()
self._reset_state()
self.dCV1 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.dCV2 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.vacStep = 0
self.step = 0
self.t = 0
self.k = 1
if self.stochastic:
if delay is not None:
self.delay(random=False, delay=delay)
else:
self.delay()
def _reset_state(self):
"""
Resets model state to initial state.
"""
self.current_state = dict()
for i in self._age_groups:
self.current_state[i] = dict(zip(self.internal_states_labels, np.array([self.initial_state[i]['{}0'.format(s)] for s in self.internal_states_labels])))
def _get_model_params(self) -> tuple:
"""
Get current parameters of the model
Returns
-------
tuple
tuple of the model parameters in the order of the list of labels
"""
return tuple([self.current_internal_params[k] for k in self.internal_params_labels])
def _get_current_state(self):
"""
Get current state in the order of state labels.
"""
state = []
for i in self._age_groups:
state.append([self.current_state[i]['{}'.format(s)] for s in self.internal_states_labels])
return state
def convert_states(self, state):
for i in state.keys():
state[i].tolist()
true_state = dict()
for i in self._age_groups:
true_state[i] = dict()
grp=0
for i in true_state.keys():
for j in state.keys():
true_state[i][j] = state[j][grp]
grp+=1
return true_state
def _sample_initial_state(self):
"""
Samples an initial model state from its distribution (Dirac distributions if self.stochastic is False).
"""
self.initial_state = dict()
for i in self._age_groups:
self.initial_state[i] = dict()
for k in self._all_initial_state_distribs[i].keys():
self.initial_state[i][k] = self._all_initial_state_distribs[i][k].sample()
if i in ['20-24', '25-29', '30-34', '35-39', '40-44', '45-49']:
self.initial_state[i]['I20'] = 10/6
self.initial_state[i]['I30'] = 1/6
# S10 is computed from other states, as the sum of all states equals the population size N
self.initial_state[i]['S10'] = self.pop_size[i] - np.sum([self.initial_state[i]['{}0'.format(s)] for s in self.internal_states_labels[1:]])
def _sample_model_params(self):
"""
Samples parameters of the model from their distribution (Dirac distributions if self.stochastic is False).
"""
self.initial_internal_params = dict()
for k in self._all_internal_params_distribs['0-4'].keys():
self.initial_internal_params[k] = self._all_internal_params_distribs['0-4'][k]
self._reset_model_params()
def _set_current_state(self, current_state):
"""
Set current state to given values.
Parameters
----------
current_state: 1D nd.array
State the current state should be set to.
"""
self.current_state = dict(zip(self.internal_states_labels, current_state.T))
def _compute_delta_coverage(self):
"""
Compute the goal coverage for each month regarding the Excel sheet
"""
maxcoverage = [x*100 for x in self._vaccination_coverage]
_deltaCoverage = list(range(len(maxcoverage)))
_deltaCoverage[0] = maxcoverage[0]
for i in range(1, len(maxcoverage)):
if maxcoverage[i] != maxcoverage[i-1]:
_deltaCoverage[i] = maxcoverage[i] - maxcoverage[i-1]
else:
_deltaCoverage[i] = maxcoverage[i-1]
for i in range(len(_deltaCoverage)):
if _deltaCoverage[i] == 0:
_deltaCoverage[i] = 10e-6
for i in range(1,14):
_deltaCoverage[-i] = _deltaCoverage[-14]
return _deltaCoverage
def compute_sigma(self):
"""
Computes sigma, the vaccination rate, regarding for each group if they are eligible to vaccination during a time period
"""
mwl = self.mitigation_windows[self.step]
lowVP = 1
pi = lowVP*(self.vaccination_coverage[self.vacStep]/100)
classes = ['S1', 'S2', 'S3', 'S4']
popGrp = ['S1', 'S2', 'S3', 'S4', 'E21', 'E22', 'E23', 'E31', 'E32', 'E33', 'E41', 'E42',
'E43', 'V11', 'V21', 'V31', 'V41', 'V12', 'V22', 'V32', 'V42', 'I2', 'I3', 'I4']
sigma = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
wcv, Ntot = 0, 0
for n in range(16):
Ntot += sum([self.current_state[self._age_groups[n]]['{}'.format(s)] for s in popGrp])
if self.whovaccinated[self.vacStep][n] == 1:
wcv += sum([self.current_state[self._age_groups[n]]['{}'.format(y)] for y in classes])
g = (pi*Ntot/wcv)
if g>1:
g=0.999999999
for k in range(16):
if self.whovaccinated[self.vacStep+1][k] == 1:
sigma[k] = 1/mwl*(-math.log(1-g))
if sigma[k] < 0:
sigma[k] = 0
for f in range(16):
size = sum([self.current_state[self._age_groups[f]]['{}'.format(s)] for s in popGrp])
if self.dCV1[f]/size >= self.coverage_threshold[f]/0.8944:
sigma[f] = 0
if self.t > 670:
return [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
return sigma
def politic_decision_module(self):
"""
Change contact matrices and k values regarding the number of cases (I4 + half of I3)
4 scenarios:
- no lockdown, holiday
- no holiday, lockdown
- holiday, lockdown
- no holiday, no lockdown
"""
# A CHAQUE DATE
total_I4 = 0
for f in range(16):
total_I4 += (sum([self.current_state[self._age_groups[f]]['I3']])*0.5 + sum([self.current_state[self._age_groups[f]]['I4']]))
if total_I4 > 37000:
if self.t > 531:
if self.t < 609:
self.newstep = 19
self.k = 0.3
self.newstep = 4
self.nbrConf += 1
#print(self.t, self.nbrConf) # print the number of lockdowns
elif total_I4 < 37000:
if self.t > 531:
if self.t < 609:
self.k = 0.6
self.newstep = 27
elif self.t > 609:
self.k = 0.55
self.newstep = 22
else:
self.k = 0.6
self.newstep = 22
def run_n_steps(self, current_state=None, n=1, labelled_states=False):
"""
Runs the model for n steps
Parameters
----------
current_state: 1D nd.array
Current model state.
n: int
Number of steps the model should be run for.
labelled_states: bool
Whether the result should be a dict with state labels or a nd array.
Returns
-------
dict or 2D nd.array
Returns a dict if labelled_states is True, where keys are state labels.
Returns an array of size (n, n_states) of the last n model states.
"""
if current_state is None:
current_state = np.array(self._get_current_state())
for f in range(16):
# Update the number of people vaccinated with 1 or 2 doses
self.dCV1[f] = sum([self.current_state[self._age_groups[f]]['{}'.format(s)] for s in ['CV11', 'CV21', 'CV31', 'CV41']])
self.dCV2[f] = sum([self.current_state[self._age_groups[f]]['{}'.format(s)] for s in ['CV12', 'CV22', 'CV32', 'CV42']])
if(self.t == self.step_list[self.step]):
self.k = k_value(self.t)
A_c = calculate_A_and_c(self.step, self.k, self.contact_modifiers, self.perturbations_matrices, self.transition_matrices)
self.current_internal_params['A'], self.current_internal_params['c'] = np.array(A_c[0]), A_c[1]
self.current_internal_params['nu'] = nu_value(self.t)
if self.t > 369:
# To uncomment if we want to run the initial model
# sigma = self.compute_sigma()
# self.current_internal_params['sigma'] = np.array(sigma)
self.current_internal_params['sigma2'] = np.array(duplicate_data(1/42, 16))
self.k = k_value(self.t)
##### TO COMMENT ####
# if we want to run the initial model
self.politic_decision_module()
A_c = calculate_A_and_c(self.newstep, self.k, self.contact_modifiers, self.perturbations_matrices, self.transition_matrices)
self.current_internal_params['A'], self.current_internal_params['c'] = np.array(A_c[0]), A_c[1]
##### END #####
self.vacStep += 1
self.step += 1
# Use the odeint library to run the ODE model
z = odeintw(self.internal_model, current_state, np.linspace(0, n, n + 1), args=(self._get_model_params()))
self._set_current_state(current_state=z[-1].copy()) # save new current state
self.t += 1
self.current_state = self.convert_states(self.current_state)
#print(self.nbrConf)
# format results
if labelled_states:
return self._convert_to_labelled_states(np.atleast_2d(z[1:]))
else:
return np.atleast_2d(z[1:])
if __name__ == '__main__':
# Get model
model = HeffernanOdeModel16(stochastic=False)
labels = ['0-4', '5-9', '10-14', '15-19', '20-24', '25-29', '30-34', '35-39', '40-44', '45-49', '50-54', '55-59', '60-64', '65-69', '70-74', '75+']
# Run simulation
simulation_horizon = 821
model_states = []
for i in range(simulation_horizon):
model_state = model.run_n_steps()
model_states += model_state.tolist()
# Plot
time = np.arange(simulation_horizon)
plot_preds(t=time,states=np.array(model_states).transpose()[23], title="Évolution du nombre de cas incident sévères (I$_4$) de COVID-19 avec vaccination")
# Plot hospitalizations (I4) and cases (I4 + half of I3)
i4tot = []
castot = []
for i in model_states:
tot = 0
tat = 0
for j in i:
tat += j[23]
tot += j[22]*0.5 + j[23]
i4tot.append(tot)
castot.append(tat)
plt.plot(time, np.array(i4tot), label='I$_4$ + 0.5*I$_3$')
plt.plot(time, np.array(castot), label='I$_4$', color='red')
# plt.plot(np.linspace(142, 579, (579-142)), (np.array(get_incidence())), label='Données SIDEP')
plt.axvline(x=370, label='Beginning of vaccination campaign', color='red', linewidth=1, linestyle='--')
plt.axvline(x=631, label='Fin de la première dose', linewidth=1, linestyle='--')
# plt.xlabel("Temps (en jours)")
# plt.ylabel(r'Nombre de personnes hospitalisées')
# plt.legend()
# plt.title("Évolution du nombre de cas incident modérés et sévères (I$_3$ + I$_4$) de COVID-19 avec vaccination")
plt.show()
|
[
"numpy.divide",
"numpy.array",
"numpy.arange",
"numpy.linspace",
"numpy.atleast_2d"
] |
[((3881, 3897), 'numpy.divide', 'np.divide', (['Xm', 'T'], {}), '(Xm, T)\n', (3890, 3897), True, 'import numpy as np\n'), ((4130, 4143), 'numpy.array', 'np.array', (['pv2'], {}), '(pv2)\n', (4138, 4143), True, 'import numpy as np\n'), ((4154, 4167), 'numpy.array', 'np.array', (['pv3'], {}), '(pv3)\n', (4162, 4167), True, 'import numpy as np\n'), ((6454, 6733), 'numpy.array', 'np.array', (['(dS1dt, dS2dt, dS3dt, dS4dt, dE21dt, dE22dt, dE23dt, dE31dt, dE32dt, dE33dt,\n dE41dt, dE42dt, dE43dt, dV11dt, dV21dt, dV31dt, dV41dt, dV12dt, dV22dt,\n dV32dt, dV42dt, dI2dt, dI3dt, dI4dt, dCV11dt, dCV21dt, dCV31dt, dCV41dt,\n dCV12dt, dCV22dt, dCV32dt, dCV42dt)'], {}), '((dS1dt, dS2dt, dS3dt, dS4dt, dE21dt, dE22dt, dE23dt, dE31dt,\n dE32dt, dE33dt, dE41dt, dE42dt, dE43dt, dV11dt, dV21dt, dV31dt, dV41dt,\n dV12dt, dV22dt, dV32dt, dV42dt, dI2dt, dI3dt, dI4dt, dCV11dt, dCV21dt,\n dCV31dt, dCV41dt, dCV12dt, dCV22dt, dCV32dt, dCV42dt))\n', (6462, 6733), True, 'import numpy as np\n'), ((28047, 28076), 'numpy.arange', 'np.arange', (['simulation_horizon'], {}), '(simulation_horizon)\n', (28056, 28076), True, 'import numpy as np\n'), ((28546, 28561), 'numpy.array', 'np.array', (['i4tot'], {}), '(i4tot)\n', (28554, 28561), True, 'import numpy as np\n'), ((28609, 28625), 'numpy.array', 'np.array', (['castot'], {}), '(castot)\n', (28617, 28625), True, 'import numpy as np\n'), ((3918, 3929), 'numpy.array', 'np.array', (['c'], {}), '(c)\n', (3926, 3929), True, 'import numpy as np\n'), ((27128, 27152), 'numpy.linspace', 'np.linspace', (['(0)', 'n', '(n + 1)'], {}), '(0, n, n + 1)\n', (27139, 27152), True, 'import numpy as np\n'), ((27551, 27571), 'numpy.atleast_2d', 'np.atleast_2d', (['z[1:]'], {}), '(z[1:])\n', (27564, 27571), True, 'import numpy as np\n'), ((26091, 26107), 'numpy.array', 'np.array', (['A_c[0]'], {}), '(A_c[0])\n', (26099, 26107), True, 'import numpy as np\n'), ((27496, 27516), 'numpy.atleast_2d', 'np.atleast_2d', (['z[1:]'], {}), '(z[1:])\n', (27509, 27516), True, 'import numpy as np\n'), ((3845, 3867), 'numpy.array', 'np.array', (['(I2, I3, I4)'], {}), '((I2, I3, I4))\n', (3853, 3867), True, 'import numpy as np\n'), ((13621, 13716), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0\n ]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0])\n', (13629, 13716), True, 'import numpy as np\n'), ((13778, 13873), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0\n ]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0])\n', (13786, 13873), True, 'import numpy as np\n'), ((26899, 26915), 'numpy.array', 'np.array', (['A_c[0]'], {}), '(A_c[0])\n', (26907, 26915), True, 'import numpy as np\n'), ((11258, 11679), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 1e-05], [0.0, 0.0, 0.0, 1e-05], [0.0, 0.0, 0.0, 1e-05], [\n 0.0, 0.0, 0.0, 1e-05], [0.0, 0.0, 0.0, 5e-05], [0.0, 0.0, 0.0, 5e-05],\n [0.0, 0.0, 0.0, 0.0002], [0.0, 0.0, 0.0, 0.0002], [0.0, 0.0, 0.0, \n 0.0005], [0.0, 0.0, 0.0, 0.0005], [0.0, 0.0, 0.0, 0.002], [0.0, 0.0, \n 0.0, 0.002], [0.0, 0.0, 0.0, 0.007], [0.0, 0.0, 0.0, 0.007], [0.0, 0.0,\n 0.0, 0.019], [0.0, 0.0, 0.0, 0.083]]'], {}), '([[0.0, 0.0, 0.0, 1e-05], [0.0, 0.0, 0.0, 1e-05], [0.0, 0.0, 0.0, \n 1e-05], [0.0, 0.0, 0.0, 1e-05], [0.0, 0.0, 0.0, 5e-05], [0.0, 0.0, 0.0,\n 5e-05], [0.0, 0.0, 0.0, 0.0002], [0.0, 0.0, 0.0, 0.0002], [0.0, 0.0, \n 0.0, 0.0005], [0.0, 0.0, 0.0, 0.0005], [0.0, 0.0, 0.0, 0.002], [0.0, \n 0.0, 0.0, 0.002], [0.0, 0.0, 0.0, 0.007], [0.0, 0.0, 0.0, 0.007], [0.0,\n 0.0, 0.0, 0.019], [0.0, 0.0, 0.0, 0.083]])\n', (11266, 11679), True, 'import numpy as np\n'), ((13307, 13324), 'numpy.array', 'np.array', (['self.p1'], {}), '(self.p1)\n', (13315, 13324), True, 'import numpy as np\n'), ((13388, 13405), 'numpy.array', 'np.array', (['self.p2'], {}), '(self.p2)\n', (13396, 13405), True, 'import numpy as np\n'), ((13469, 13486), 'numpy.array', 'np.array', (['self.p3'], {}), '(self.p3)\n', (13477, 13486), True, 'import numpy as np\n'), ((28106, 28128), 'numpy.array', 'np.array', (['model_states'], {}), '(model_states)\n', (28114, 28128), True, 'import numpy as np\n')]
|
# from matplotlib import rc
import matplotlib.cm as mplcm
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import numpy as np
import os
import seaborn as sns
from particle.statistics import (
calculate_avg_vel,
calculate_l1_convergence,
moving_average,
)
from particle.processing import (
get_master_yaml,
get_parameter_range,
match_parameters,
load_traj_data,
)
# Standard plotting choices
# rc("text", usetex=True)
sns.set(style="white", context="talk")
search_parameters = {
# "scaling": "Local",
# "D": 0.25,
# "phi": "Gamma",
# "dt": 0.005,
# "G": "Smooth",
# "option": "numba",
# "initial_dist_x": "one_cluster",
# "T_end": 200.0,
# "initial_dist_v": "pos_normal_dn",
# "particle_count": 600,
} # {"particle_count": 600}
# os.chdir("D:/InteractingParticleSystems/noisysystem_temp")
os.chdir("E:/")
# os.chdir("/Volumes/Extreme SSD/InteractingParticleSystems/noisysystem_temp")
# Path to YAML file relative to current directory
yaml_path = "./TimestepExperiments/LowGammaLoweringTimestepLowParticles"
# "../Experiments/one_cluster_low_gamma_ten_runs"
history = get_master_yaml(yaml_path)
fig, [ax1, ax2] = plt.subplots(1, 2, figsize=(10, 3), sharex=True)
cm = plt.get_cmap("coolwarm")
cNorm = colors.DivergingNorm(vmin=0.01, vcenter=0.05, vmax=0.25)
scalarMap = mplcm.ScalarMappable(norm=cNorm, cmap=cm)
gammas = get_parameter_range("gamma", history)
# np.array([0.25]) # np.arange(0.05, 0.15, 0.05)
# np.concatenate(([0.01], np.arange(0.05, 0.3, 0.05)))
# np.arange(0.01, 0.3, 0.05)
# np.concatenate(
# ([0.01], np.arange(0.05, 0.2, 0.05))
# )
for gamma in gammas:
search_parameters["gamma"] = gamma
file_names = match_parameters(search_parameters, history)
for idx, file_name in enumerate(file_names):
print(file_name)
t, x, v = load_traj_data(file_name, data_path="Experiments/Data.nosync/")
error = calculate_l1_convergence(t, x, v)
avg_vel = calculate_avg_vel(t, x, v)
if idx == 0:
avg_vel_store = np.zeros((len(file_names), len(avg_vel)))
error_store = np.zeros((len(file_names), len(error)))
ax1.plot(
t,
error,
color=scalarMap.to_rgba(history[file_name]["gamma"]),
label=f"{history[file_name]['gamma']}",
alpha=0.1,
zorder=1,
)
ax2.plot(
t,
avg_vel,
color=scalarMap.to_rgba(history[file_name]["gamma"]),
label=f"{history[file_name]['gamma']}",
alpha=0.1,
zorder=1,
)
error_store[idx, :] = error
avg_vel_store[idx, :] = avg_vel
# ax1.plot(
# t,
# np.mean(error_store, axis=0),
# color=scalarMap.to_rgba(history[file_name]["gamma"]),
# label=f"{history[file_name]['gamma']}",
# alpha=0.8,
# zorder=2,
# )
#
# ax2.plot(
# t,
# np.mean(avg_vel_store, axis=0),
# color=scalarMap.to_rgba(history[file_name]["gamma"]),
# label=f"{history[file_name]['gamma']}",
# alpha=0.8,
# zorder=2,
# )
expected_errors = {
"480": 7.52,
"600": 6.69,
"700": 6.26,
"1000": 5.25,
}
exp_error = expected_errors[str(search_parameters["particle_count"])]
ax1.plot([0, t[-1]], [exp_error, exp_error], "k--", alpha=0.2)
ax1.plot(
t[19:], moving_average(np.mean(error_store, axis=0), n=20), "r",
)
ax2.plot([0, t[-1]], [1, 1], "k--", alpha=0.2)
ax2.plot(
t[19:], moving_average(np.mean(avg_vel_store, axis=0), n=20), "r",
)
print(
f"Final difference in distance is {moving_average(np.mean(error_store, axis=0), n=20)[-1] - exp_error}"
)
print(
f"Final difference in velocity is {1- moving_average(np.mean(avg_vel_store, axis=0), n=20)[-1]}"
)
ax1.set(xlabel="Time", ylabel=r"$\ell^1$ Error")
ax2.set(xlabel="Time", ylabel=r"$\bar{M}^N(t)$")
# cbar = fig.colorbar(scalarMap, ticks=np.arange(0, max(gammas), 0.05))
# cbar.set_label(r"Interaction $\gamma$", rotation=270)
# cbar.ax.get_yaxis().labelpad = 15
plt.subplots_adjust(left=0.07, right=0.97, bottom=0.15, top=0.9, wspace=0.23)
plt.tight_layout()
plt.show()
# fig.savefig(f"OneClusterVaryGamma_longrun_log.jpg", dpi=300)
|
[
"matplotlib.pyplot.tight_layout",
"particle.processing.get_parameter_range",
"matplotlib.pyplot.show",
"matplotlib.pyplot.get_cmap",
"matplotlib.cm.ScalarMappable",
"particle.processing.load_traj_data",
"particle.statistics.calculate_l1_convergence",
"matplotlib.pyplot.subplots",
"matplotlib.colors.DivergingNorm",
"particle.processing.match_parameters",
"particle.statistics.calculate_avg_vel",
"numpy.mean",
"particle.processing.get_master_yaml",
"matplotlib.pyplot.subplots_adjust",
"seaborn.set",
"os.chdir"
] |
[((465, 503), 'seaborn.set', 'sns.set', ([], {'style': '"""white"""', 'context': '"""talk"""'}), "(style='white', context='talk')\n", (472, 503), True, 'import seaborn as sns\n'), ((878, 893), 'os.chdir', 'os.chdir', (['"""E:/"""'], {}), "('E:/')\n", (886, 893), False, 'import os\n'), ((1157, 1183), 'particle.processing.get_master_yaml', 'get_master_yaml', (['yaml_path'], {}), '(yaml_path)\n', (1172, 1183), False, 'from particle.processing import get_master_yaml, get_parameter_range, match_parameters, load_traj_data\n'), ((1203, 1251), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(10, 3)', 'sharex': '(True)'}), '(1, 2, figsize=(10, 3), sharex=True)\n', (1215, 1251), True, 'import matplotlib.pyplot as plt\n'), ((1257, 1281), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""coolwarm"""'], {}), "('coolwarm')\n", (1269, 1281), True, 'import matplotlib.pyplot as plt\n'), ((1290, 1346), 'matplotlib.colors.DivergingNorm', 'colors.DivergingNorm', ([], {'vmin': '(0.01)', 'vcenter': '(0.05)', 'vmax': '(0.25)'}), '(vmin=0.01, vcenter=0.05, vmax=0.25)\n', (1310, 1346), True, 'import matplotlib.colors as colors\n'), ((1359, 1400), 'matplotlib.cm.ScalarMappable', 'mplcm.ScalarMappable', ([], {'norm': 'cNorm', 'cmap': 'cm'}), '(norm=cNorm, cmap=cm)\n', (1379, 1400), True, 'import matplotlib.cm as mplcm\n'), ((1410, 1447), 'particle.processing.get_parameter_range', 'get_parameter_range', (['"""gamma"""', 'history'], {}), "('gamma', history)\n", (1429, 1447), False, 'from particle.processing import get_master_yaml, get_parameter_range, match_parameters, load_traj_data\n'), ((4107, 4184), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.07)', 'right': '(0.97)', 'bottom': '(0.15)', 'top': '(0.9)', 'wspace': '(0.23)'}), '(left=0.07, right=0.97, bottom=0.15, top=0.9, wspace=0.23)\n', (4126, 4184), True, 'import matplotlib.pyplot as plt\n'), ((4185, 4203), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4201, 4203), True, 'import matplotlib.pyplot as plt\n'), ((4204, 4214), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4212, 4214), True, 'import matplotlib.pyplot as plt\n'), ((1728, 1772), 'particle.processing.match_parameters', 'match_parameters', (['search_parameters', 'history'], {}), '(search_parameters, history)\n', (1744, 1772), False, 'from particle.processing import get_master_yaml, get_parameter_range, match_parameters, load_traj_data\n'), ((1865, 1928), 'particle.processing.load_traj_data', 'load_traj_data', (['file_name'], {'data_path': '"""Experiments/Data.nosync/"""'}), "(file_name, data_path='Experiments/Data.nosync/')\n", (1879, 1928), False, 'from particle.processing import get_master_yaml, get_parameter_range, match_parameters, load_traj_data\n'), ((1945, 1978), 'particle.statistics.calculate_l1_convergence', 'calculate_l1_convergence', (['t', 'x', 'v'], {}), '(t, x, v)\n', (1969, 1978), False, 'from particle.statistics import calculate_avg_vel, calculate_l1_convergence, moving_average\n'), ((1997, 2023), 'particle.statistics.calculate_avg_vel', 'calculate_avg_vel', (['t', 'x', 'v'], {}), '(t, x, v)\n', (2014, 2023), False, 'from particle.statistics import calculate_avg_vel, calculate_l1_convergence, moving_average\n'), ((3443, 3471), 'numpy.mean', 'np.mean', (['error_store'], {'axis': '(0)'}), '(error_store, axis=0)\n', (3450, 3471), True, 'import numpy as np\n'), ((3571, 3601), 'numpy.mean', 'np.mean', (['avg_vel_store'], {'axis': '(0)'}), '(avg_vel_store, axis=0)\n', (3578, 3601), True, 'import numpy as np\n'), ((3678, 3706), 'numpy.mean', 'np.mean', (['error_store'], {'axis': '(0)'}), '(error_store, axis=0)\n', (3685, 3706), True, 'import numpy as np\n'), ((3798, 3828), 'numpy.mean', 'np.mean', (['avg_vel_store'], {'axis': '(0)'}), '(avg_vel_store, axis=0)\n', (3805, 3828), True, 'import numpy as np\n')]
|
from train_lstm import JigsawLSTMModel, CONFIG
from pre_process import encode_sentence
import numpy as np
import torch
import pandas as pd
from tqdm import tqdm
import gc
import ast,emoji, string, re
from torch.utils.data import Dataset, DataLoader
# PyTorch Lightning
import pytorch_lightning as pl
MODEL_PATHS = [
'../models/checkpoints/lstm/fold_0_lstm.ckpt',
'../models/checkpoints/lstm/fold_1_lstm.ckpt',
'../models/checkpoints/lstm/fold_2_lstm.ckpt',
'../models/checkpoints/lstm/fold_3_lstm.ckpt',
'../models/checkpoints/lstm/fold_4_lstm.ckpt'
]
class JigsawEncodedDataset(Dataset):
def __init__(self, df):
self.X = df["encoded"]
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
return {"encoding":torch.from_numpy(self.X.loc[idx].astype(np.int32))}
def valid_fn(model, dataloader1, dataloader2, device):
model.eval()
model.freeze()
model=model.to(device)
dataset_size = 0
running_loss = 0.0
LT_PREDS = []
MT_PREDS = []
bar = tqdm(enumerate(dataloader1), total=len(dataloader1))
for step, data in bar:
enc = data['encoding']
_, outputs = model(enc.to(device))
MT_PREDS.append(outputs.view(-1).cpu().detach().numpy())
bar = tqdm(enumerate(dataloader2), total=len(dataloader2))
for step, data in bar:
enc = data['encoding']
_, outputs = model(enc.to(device))
LT_PREDS.append(outputs.view(-1).cpu().detach().numpy())
gc.collect()
return np.concatenate(LT_PREDS),np.concatenate(MT_PREDS)
def inference(model_paths, dataloader1, dataloader2, device):
final_preds1,final_preds2 = [],[]
for i, path in enumerate(model_paths):
model=JigsawLSTMModel.load_from_checkpoint(
checkpoint_path=path,
n_classes=CONFIG['num_classes'],
vocab_size=CONFIG['vocab_size'],embedding_dim=CONFIG['embedding_dim'],hidden_dim=CONFIG['hidden_dim'],num_layers=CONFIG['num_layers']
)
print(f"Getting predictions for model {i+1}")
lt_preds,mt_preds = valid_fn(model, dataloader1, dataloader2, device)
final_preds1.append(lt_preds)
final_preds2.append(mt_preds)
final_preds1 = np.array(final_preds1)
final_preds1 = np.mean(final_preds1, axis=0)
final_preds2 = np.array(final_preds2)
final_preds2 = np.mean(final_preds2, axis=0)
print(f'val is : {(final_preds1 < final_preds2).mean()}')
if __name__=="__main__":
df1 = pd.read_csv("../input/jigsaw-toxic-severity-rating/validation_data_more_toxic.csv")
df1['encoded']=df1['encoded'].apply(lambda x: np.fromstring(x, dtype=int, sep=' '))
df2 = pd.read_csv("../input/jigsaw-toxic-severity-rating/validation_data_less_toxic.csv")
df2['encoded']=df2['encoded'].apply(lambda x: np.fromstring(x, dtype=int, sep=' '))
test_dataset1 = JigsawEncodedDataset(df1)
test_loader1= DataLoader(test_dataset1, batch_size=CONFIG['valid_batch_size'],
num_workers=8, shuffle=False, pin_memory=True)
test_dataset2 = JigsawEncodedDataset(df2)
test_loader2= DataLoader(test_dataset2, batch_size=CONFIG['valid_batch_size'],
num_workers=8, shuffle=False, pin_memory=True)
inference(MODEL_PATHS, test_loader1, test_loader2, CONFIG['device'])
|
[
"train_lstm.JigsawLSTMModel.load_from_checkpoint",
"torch.utils.data.DataLoader",
"pandas.read_csv",
"gc.collect",
"numpy.mean",
"numpy.array",
"numpy.fromstring",
"numpy.concatenate"
] |
[((1535, 1547), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1545, 1547), False, 'import gc\n'), ((2256, 2278), 'numpy.array', 'np.array', (['final_preds1'], {}), '(final_preds1)\n', (2264, 2278), True, 'import numpy as np\n'), ((2298, 2327), 'numpy.mean', 'np.mean', (['final_preds1'], {'axis': '(0)'}), '(final_preds1, axis=0)\n', (2305, 2327), True, 'import numpy as np\n'), ((2347, 2369), 'numpy.array', 'np.array', (['final_preds2'], {}), '(final_preds2)\n', (2355, 2369), True, 'import numpy as np\n'), ((2389, 2418), 'numpy.mean', 'np.mean', (['final_preds2'], {'axis': '(0)'}), '(final_preds2, axis=0)\n', (2396, 2418), True, 'import numpy as np\n'), ((2521, 2609), 'pandas.read_csv', 'pd.read_csv', (['"""../input/jigsaw-toxic-severity-rating/validation_data_more_toxic.csv"""'], {}), "(\n '../input/jigsaw-toxic-severity-rating/validation_data_more_toxic.csv')\n", (2532, 2609), True, 'import pandas as pd\n'), ((2703, 2791), 'pandas.read_csv', 'pd.read_csv', (['"""../input/jigsaw-toxic-severity-rating/validation_data_less_toxic.csv"""'], {}), "(\n '../input/jigsaw-toxic-severity-rating/validation_data_less_toxic.csv')\n", (2714, 2791), True, 'import pandas as pd\n'), ((2940, 3055), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset1'], {'batch_size': "CONFIG['valid_batch_size']", 'num_workers': '(8)', 'shuffle': '(False)', 'pin_memory': '(True)'}), "(test_dataset1, batch_size=CONFIG['valid_batch_size'],\n num_workers=8, shuffle=False, pin_memory=True)\n", (2950, 3055), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((3146, 3261), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset2'], {'batch_size': "CONFIG['valid_batch_size']", 'num_workers': '(8)', 'shuffle': '(False)', 'pin_memory': '(True)'}), "(test_dataset2, batch_size=CONFIG['valid_batch_size'],\n num_workers=8, shuffle=False, pin_memory=True)\n", (3156, 3261), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((1559, 1583), 'numpy.concatenate', 'np.concatenate', (['LT_PREDS'], {}), '(LT_PREDS)\n', (1573, 1583), True, 'import numpy as np\n'), ((1584, 1608), 'numpy.concatenate', 'np.concatenate', (['MT_PREDS'], {}), '(MT_PREDS)\n', (1598, 1608), True, 'import numpy as np\n'), ((1768, 2012), 'train_lstm.JigsawLSTMModel.load_from_checkpoint', 'JigsawLSTMModel.load_from_checkpoint', ([], {'checkpoint_path': 'path', 'n_classes': "CONFIG['num_classes']", 'vocab_size': "CONFIG['vocab_size']", 'embedding_dim': "CONFIG['embedding_dim']", 'hidden_dim': "CONFIG['hidden_dim']", 'num_layers': "CONFIG['num_layers']"}), "(checkpoint_path=path, n_classes=CONFIG\n ['num_classes'], vocab_size=CONFIG['vocab_size'], embedding_dim=CONFIG[\n 'embedding_dim'], hidden_dim=CONFIG['hidden_dim'], num_layers=CONFIG[\n 'num_layers'])\n", (1804, 2012), False, 'from train_lstm import JigsawLSTMModel, CONFIG\n'), ((2655, 2691), 'numpy.fromstring', 'np.fromstring', (['x'], {'dtype': 'int', 'sep': '""" """'}), "(x, dtype=int, sep=' ')\n", (2668, 2691), True, 'import numpy as np\n'), ((2837, 2873), 'numpy.fromstring', 'np.fromstring', (['x'], {'dtype': 'int', 'sep': '""" """'}), "(x, dtype=int, sep=' ')\n", (2850, 2873), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# @Time : 2020/11/4 16:04
# @Email : <EMAIL>
# @Software: PyCharm
# @License: BSD 3-Clause
from itertools import combinations_with_replacement
import torch.nn.functional as F
import numpy as np
import os
import torch
from numpy import random
from torch import nn
from torch.nn import Module
from torch.utils import tensorboard
from cams.cam3d import GradCAM3dpp, GradCAM3d
from cams.nnn import Indexes
class Moudle1(Module):
def __init__(self, *args): # 鍒濆鍖?
super(Moudle1, self).__init__()
D_in, dens_out = 1, 22
D1, D2 = 6, 1
dense1, dense2 = 27, 64
AvgPool3d_x, AvgPool3d_y, AvgPool3d_z =10,10,10
self.link = D2 * AvgPool3d_x * AvgPool3d_y * AvgPool3d_x
model_conv = nn.Sequential(
# Indexes(D_in, D2,(10,10,10)),
nn.Conv3d(D_in, D2, 1, stride=1, padding=0),
# nn.BatchNorm3d(D2),
# nn.ReLU(True),
# nn.MaxPool3d(3, stride=1, padding=1),
# nn.Dropout3d()
)
model_sigmod = nn.Sigmoid()
model_Linear = nn.Sequential(
nn.ReLU(True),
nn.Dropout(),
nn.Linear(self.link, dens_out),
nn.ReLU(True),
# nn.Dropout(),
# nn.Linear(dens_out, dens_out),
# nn.ReLU(True),
# nn.Dropout(),
# nn.Linear(dense2, dens_out),
)
self.model_conv = model_conv
self.model_sigmod = model_sigmod
self.avgpool = nn.AdaptiveAvgPool3d((AvgPool3d_x, AvgPool3d_y, AvgPool3d_z))
self.model_Linear = model_Linear
def forward(self, x, t=1):
if t == 0:
x = self.model_conv(x)
print("conv out", x.shape)
x = self.model_sigmod(x)
x = self.avgpool(x)
print("avgpool", x.shape)
x = torch.flatten(x, start_dim=1, end_dim=-1)
print("flatten", x.shape)
x = self.model_Linear(x)
print("linear", x.shape)
else:
x = self.model_conv(x)
x = self.avgpool(x)
x = torch.flatten(x, start_dim=1, end_dim=-1)
x = self.model_Linear(x)
return x
def run(train, test=None):
if test is None:
test = train
train_x, train_y= train
model = Moudle1()
device = torch.device('cuda:0')
# device = torch.device('cpu')
model.to(device)
learning_rate = 1e-4
optimizer = torch.optim.SGD(model.parameters(), lr=0.01) # 鍏锋湁閫氱敤浼樺寲绠楁硶鐨勪紭鍖栧寘锛屽SGD,Adam
#
loss_fn = torch.nn.CrossEntropyLoss(reduction='mean') # 涓昏鏄敤鏉ュ垽瀹氬疄闄呯殑杈撳嚭涓庢湡鏈涚殑杈撳嚭鐨勬帴杩戠▼搴?
# loss_fn = torch.nn.MSELoss(reduction='mean') # 涓昏鏄敤鏉ュ垽瀹氬疄闄呯殑杈撳嚭涓庢湡鏈涚殑杈撳嚭鐨勬帴杩戠▼搴?
for t in range(20000):
train_x = train_x.to(device)
train_y = train_y.to(device)
y_pred = model(train_x, t)
# y_pred = y_pred*we
# prob = F.softmax(y_pred, dim=1)
# prob = F.relu(y_pred)
# _, idx = torch.max(prob, dim=1)
loss = loss_fn(y_pred,train_y)
if loss.item() < 0.001:
break
# if t % 10 == 9:
print(t, loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
# if t%50==0:
writer.add_scalar('loss', loss.item(), global_step=t)
test_x, test_y = test
test_x = test_x.to(device)
test_y = test_y.to(device)
y_pred = model(test_x)
loss2 = loss_fn(y_pred, test_y)
print(loss2.item())
writer.close()
return model
random.seed(0)
torch.random.manual_seed(0)
def get():
x = random.random((120, 10, 10, 10)) + 0.00001
# key = np.full((3,3,3),0.5)
# key[1,1,1]=1.0
# iter = list(combinations_with_replacement(range(8), 3))
# y = []
# for ai, index in enumerate(iter):
# i, j, k = index
# print(ai, index)
# x[ai, i:i + 3, j:j + 3, k:k + 3] = key
# # x[ai, i:i + 3, j:j + 3, k:k + 3] = x[ai, i:i + 3, j:j + 3, k:k + 3] + key
# l1, l2, l3 = random.randint(0, 8, 3)
# x[ai, l1:l1 + 3, l2:l2 + 3, l3:l3 + 3] = x[ai, l1:l1 + 3, l2:l2 + 3, l3:l3 + 3] + key
# # y.append((i ** 2 + j ** 2 + k ** 2) ** 0.5)
# y.append((i + j + k))
iter = list(combinations_with_replacement(range(1,9), 3))
y = []
for ai, index in enumerate(iter):
i, j, k = index
print(ai, index)
x[ai, i, j, k] = 1.0
# x[ai, i:i + 3, j:j + 3, k:k + 3] = x[ai, i:i + 3, j:j + 3, k:k + 3] + key
l1, l2, l3 = random.randint(1, 9, 3)
x[ai, l1, l2, l3] = 1.0
# y.append((i ** 2 + j ** 2 + k ** 2) ** 0.5)
y.append((i + j + k-3))
x = torch.tensor(x)
x = x.unsqueeze(dim=1)
y = torch.tensor(y).reshape((-1, 1))
x = x.type(torch.float32)
y = y.type(torch.float32)
x = x / torch.max(x)
return x, y
def del_files(path_file):
ls = os.listdir(path_file)
for i in ls:
f_path = os.path.join(path_file, i)
# 判断是否是一个目录,若是,则递归删除
if os.path.isdir(f_path):
del_files(f_path)
else:
os.remove(f_path)
writer = tensorboard.SummaryWriter(log_dir="/home/iap13/wcx/tb/exp1", flush_secs=10)
data = [get() for i in range(10)]
x, y = zip(*data)
x = torch.cat(x, dim=0)
y = torch.cat(y, dim=0)
y_ = torch.zeros((1200, 22))
y = y.type(torch.long).squeeze()
y_ = torch.index_fill(y_, 1, y, torch.tensor(1))
# model = run((x, y), None)
# torch.save(model.state_dict(), "model_dict")
model = Moudle1()
model.load_state_dict(torch.load("model_dict"))
device = torch.device('cpu')
model.to(device)
model.eval()
target_layer = model.model_conv[-1]
# wrapped_model = GradCAM3d(model, target_layer)
wrapped_model = GradCAM3dpp(model, target_layer)
# wrapped_model = SmoothGradCAMpp(model, target_layer)
x = x.to(device)
y = y.to(device)
# for i in range(0, 1):
# xi = x[i]
# yi = y[i]
#
# tensor_shown = xi.unsqueeze(0)
#
# cams, idx = wrapped_model.forward(tensor_shown)
# cams = cams.squeeze().cpu().numpy()
# xi = xi.squeeze().cpu().numpy()
# for t in range(10):
# writer.add_images('countdown%d'%i,
# cams[t],
# global_step=t,
# dataformats='HW')
# writer.close()
i=2
xi = x[i]
yi = y[i]
tensor_shown = xi.unsqueeze(0)
cams, idx = wrapped_model.forward(tensor_shown)
cams = cams.squeeze().cpu().numpy()
xi = xi.squeeze().cpu().numpy()
for t in range(10):
writer.add_images('countdown%d'%i,
cams[t],
global_step=t,
dataformats='HW')
writer.close()
# model = Moudle1()
# writer.add_graph(model.eval(),x)
# writer.close()
|
[
"torch.nn.Dropout",
"os.remove",
"numpy.random.seed",
"torch.cat",
"numpy.random.randint",
"cams.cam3d.GradCAM3dpp",
"torch.device",
"os.path.join",
"torch.flatten",
"torch.nn.Conv3d",
"torch.load",
"torch.utils.tensorboard.SummaryWriter",
"torch.nn.Linear",
"torch.zeros",
"torch.random.manual_seed",
"torch.max",
"os.listdir",
"torch.nn.Sigmoid",
"torch.nn.AdaptiveAvgPool3d",
"torch.nn.ReLU",
"os.path.isdir",
"torch.nn.CrossEntropyLoss",
"numpy.random.random",
"torch.tensor"
] |
[((3557, 3571), 'numpy.random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (3568, 3571), False, 'from numpy import random\n'), ((3572, 3599), 'torch.random.manual_seed', 'torch.random.manual_seed', (['(0)'], {}), '(0)\n', (3596, 3599), False, 'import torch\n'), ((5152, 5227), 'torch.utils.tensorboard.SummaryWriter', 'tensorboard.SummaryWriter', ([], {'log_dir': '"""/home/iap13/wcx/tb/exp1"""', 'flush_secs': '(10)'}), "(log_dir='/home/iap13/wcx/tb/exp1', flush_secs=10)\n", (5177, 5227), False, 'from torch.utils import tensorboard\n'), ((5286, 5305), 'torch.cat', 'torch.cat', (['x'], {'dim': '(0)'}), '(x, dim=0)\n', (5295, 5305), False, 'import torch\n'), ((5310, 5329), 'torch.cat', 'torch.cat', (['y'], {'dim': '(0)'}), '(y, dim=0)\n', (5319, 5329), False, 'import torch\n'), ((5336, 5359), 'torch.zeros', 'torch.zeros', (['(1200, 22)'], {}), '((1200, 22))\n', (5347, 5359), False, 'import torch\n'), ((5595, 5614), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5607, 5614), False, 'import torch\n'), ((5748, 5780), 'cams.cam3d.GradCAM3dpp', 'GradCAM3dpp', (['model', 'target_layer'], {}), '(model, target_layer)\n', (5759, 5780), False, 'from cams.cam3d import GradCAM3dpp, GradCAM3d\n'), ((2355, 2377), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (2367, 2377), False, 'import torch\n'), ((2574, 2617), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'reduction': '"""mean"""'}), "(reduction='mean')\n", (2599, 2617), False, 'import torch\n'), ((4699, 4714), 'torch.tensor', 'torch.tensor', (['x'], {}), '(x)\n', (4711, 4714), False, 'import torch\n'), ((4921, 4942), 'os.listdir', 'os.listdir', (['path_file'], {}), '(path_file)\n', (4931, 4942), False, 'import os\n'), ((5425, 5440), 'torch.tensor', 'torch.tensor', (['(1)'], {}), '(1)\n', (5437, 5440), False, 'import torch\n'), ((5559, 5583), 'torch.load', 'torch.load', (['"""model_dict"""'], {}), "('model_dict')\n", (5569, 5583), False, 'import torch\n'), ((1060, 1072), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1070, 1072), False, 'from torch import nn\n'), ((1521, 1582), 'torch.nn.AdaptiveAvgPool3d', 'nn.AdaptiveAvgPool3d', (['(AvgPool3d_x, AvgPool3d_y, AvgPool3d_z)'], {}), '((AvgPool3d_x, AvgPool3d_y, AvgPool3d_z))\n', (1541, 1582), False, 'from torch import nn\n'), ((3621, 3653), 'numpy.random.random', 'random.random', (['(120, 10, 10, 10)'], {}), '((120, 10, 10, 10))\n', (3634, 3653), False, 'from numpy import random\n'), ((4549, 4572), 'numpy.random.randint', 'random.randint', (['(1)', '(9)', '(3)'], {}), '(1, 9, 3)\n', (4563, 4572), False, 'from numpy import random\n'), ((4855, 4867), 'torch.max', 'torch.max', (['x'], {}), '(x)\n', (4864, 4867), False, 'import torch\n'), ((4977, 5003), 'os.path.join', 'os.path.join', (['path_file', 'i'], {}), '(path_file, i)\n', (4989, 5003), False, 'import os\n'), ((5044, 5065), 'os.path.isdir', 'os.path.isdir', (['f_path'], {}), '(f_path)\n', (5057, 5065), False, 'import os\n'), ((838, 881), 'torch.nn.Conv3d', 'nn.Conv3d', (['D_in', 'D2', '(1)'], {'stride': '(1)', 'padding': '(0)'}), '(D_in, D2, 1, stride=1, padding=0)\n', (847, 881), False, 'from torch import nn\n'), ((1123, 1136), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1130, 1136), False, 'from torch import nn\n'), ((1150, 1162), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (1160, 1162), False, 'from torch import nn\n'), ((1176, 1206), 'torch.nn.Linear', 'nn.Linear', (['self.link', 'dens_out'], {}), '(self.link, dens_out)\n', (1185, 1206), False, 'from torch import nn\n'), ((1220, 1233), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1227, 1233), False, 'from torch import nn\n'), ((1873, 1914), 'torch.flatten', 'torch.flatten', (['x'], {'start_dim': '(1)', 'end_dim': '(-1)'}), '(x, start_dim=1, end_dim=-1)\n', (1886, 1914), False, 'import torch\n'), ((2124, 2165), 'torch.flatten', 'torch.flatten', (['x'], {'start_dim': '(1)', 'end_dim': '(-1)'}), '(x, start_dim=1, end_dim=-1)\n', (2137, 2165), False, 'import torch\n'), ((4750, 4765), 'torch.tensor', 'torch.tensor', (['y'], {}), '(y)\n', (4762, 4765), False, 'import torch\n'), ((5123, 5140), 'os.remove', 'os.remove', (['f_path'], {}), '(f_path)\n', (5132, 5140), False, 'import os\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import builtins
import copy
import inspect
import itertools
import operator
from collections.abc import Iterable
from functools import reduce
from math import ceil, log
import numpy as np
from ...config import options
from ...core.operand import OperandStage
from ...serialize import KeyField, AnyField, BoolField, Int32Field
from ..core import Tensor, TensorOrder
from ..array_utils import get_array_module, as_same_device, device, cp
from ..utils import check_out_param, validate_axis
from ..operands import TensorHasInput, TensorOperandMixin
from ..datasource import tensor as astensor
def numel(x, **kwargs):
xp = get_array_module(x)
return xp.sum(xp.ones_like(x), **kwargs)
def nannumel(x, **kwargs):
x_size = reduce(operator.mul, x.shape)
xp = get_array_module(x)
return x_size - xp.sum(xp.isnan(x), **kwargs)
class TensorReductionMixin(TensorOperandMixin):
__slots__ = ()
@classmethod
def _is_cum(cls):
return False
@classmethod
def _calc_order(cls, a, out):
return out.order if out is not None else a.order
@classmethod
def _is_sparse(cls, input_sparse, shape):
return False
def _call(self, a, out):
a = astensor(a)
if out is not None and not isinstance(out, Tensor):
raise TypeError(f'out should be Tensor object, got {type(out)} instead')
axis = getattr(self, 'axis', None)
keepdims = getattr(self, 'keepdims', None)
order = self._calc_order(a, out)
if self._is_cum():
if axis is None:
a, axis = a.ravel(), 0
setattr(self, '_axis', axis)
shape = a.shape
else:
axis = list(range(len(a.shape))) if axis is None else axis
if not isinstance(axis, Iterable):
axis = (validate_axis(a.ndim, axis),)
axis = set(axis)
shape = tuple(s if i not in axis else 1 for i, s in enumerate(a.shape)
if keepdims or i not in axis)
self.sparse = self._is_sparse(a.issparse(), shape)
t = self.new_tensor([a], shape, order=order)
if out is None:
return t
check_out_param(out, t, 'same_kind')
out_shape, out_dtype = out.shape, out.dtype
# if `out` is specified, use out's dtype and shape
if out_shape != t.shape:
if out.ndim > t.ndim:
raise ValueError('output has too many dimensions')
raise ValueError(f'output shape should be {t.shape}, got {out_shape}')
setattr(self, 'dtype', out_dtype)
out.data = t.data
return out
def _new_chunks(self, inputs, kws=None, **kw):
chunks = super()._new_chunks(inputs, kws=kws, **kw)
setattr(self, '_input', getattr(self, '_inputs')[0])
return chunks
def _new_tileables(self, inputs, kws=None, **kw):
tensors = super()._new_tileables(inputs, kws=kws, **kw)
setattr(self, '_input', getattr(self, '_inputs')[0])
return tensors
def __call__(self, a, out=None):
return self._call(a, out=out)
@staticmethod
def _reduced_shape(shape, axes):
return tuple(1 if i in axes else s for i, s in enumerate(shape))
@staticmethod
def _reduced_nsplits(nsplits, axes):
return tuple((1,) * len(c) if i in axes else c
for i, c in enumerate(nsplits))
@staticmethod
def _concatenate_shape(tensor, combine_block):
return tuple(builtins.sum(nsplit[i] for i in cb)
for nsplit, cb in zip(tensor.nsplits, combine_block))
@staticmethod
def _combine_split(ax, combine_size, chunk_shape):
if ax not in combine_size:
return tuple((i,) for i in range(chunk_shape[ax]))
else:
size = combine_size[ax]
shape = chunk_shape[ax]
index = tuple(range(shape))
return tuple(index[i:i + size] for i in range(0, shape, size))
def _get_op_kw(self):
return None
@classmethod
def get_axis(cls, axis):
return tuple(axis) if axis is not None else axis
@classmethod
def get_arg_axis(cls, axis, ndim):
return None if len(axis) == ndim or ndim == 1 else axis[0]
@classmethod
def _tree_reduction(cls, tensor, axis):
op = tensor.op
kw = getattr(op, '_get_op_kw')() or {}
keepdims = op.keepdims
combine_size = op.combine_size or options.combine_size
if isinstance(combine_size, dict):
combine_size = dict((ax, combine_size.get(ax)) for ax in axis)
else:
assert isinstance(combine_size, int)
n = builtins.max(int(combine_size ** (1.0 / (len(axis) or 1))), 2)
combine_size = dict((ax, n) for ax in axis)
times = 1
for i, n in enumerate(tensor.chunk_shape):
if i in combine_size and combine_size[i] != 1:
times = int(builtins.max(times, ceil(log(n, combine_size[i]))))
for i in range(times - 1):
[tensor] = cls._partial_reduction(tensor, axis, op.dtype, True, combine_size, OperandStage.combine)
return cls._partial_reduction(tensor, axis, op.dtype, keepdims, combine_size, OperandStage.agg, kw)
@classmethod
def _partial_reduction(cls, tensor, axis, dtype, keepdims, combine_size, stage, kw=None):
from ..merge.concatenate import TensorConcatenate
kw = kw or {}
axes = sorted(combine_size.keys())
op_type = type(tensor.op)
combine_blocks = [cls._combine_split(i, combine_size, tensor.chunk_shape)
for i in range(tensor.ndim)]
combine_blocks_idxes = [range(len(blocks)) for blocks in combine_blocks]
chunks = []
for combine_block_idx, combine_block in zip(itertools.product(*combine_blocks_idxes),
itertools.product(*combine_blocks)):
chks = [tensor.cix[idx] for idx in itertools.product(*combine_block)]
if len(chks) > 1:
op = TensorConcatenate(axis=axes, dtype=chks[0].dtype)
chk = op.new_chunk(chks, shape=cls._concatenate_shape(tensor, combine_block),
order=tensor.order)
else:
chk = chks[0]
shape = tuple(s if i not in combine_size else 1
for i, s in enumerate(chk.shape) if keepdims or i not in combine_size)
agg_op = op_type(stage=stage, axis=axis, dtype=dtype, keepdims=keepdims, **kw)
chunk = agg_op.new_chunk([chk], shape=shape,
index=tuple(idx for i, idx in enumerate(combine_block_idx)
if keepdims or i not in combine_size),
order=tensor.order)
chunks.append(chunk)
nsplits = [
tuple(c.shape[i] for c in chunks if builtins.all(idx == 0 for j, idx in enumerate(c.index) if j != i))
for i in range(len(chunks[0].shape))]
shape = tuple(builtins.sum(nsplit) for nsplit in nsplits)
agg_op = op_type(stage=stage, axis=axis, dtype=dtype, keepdims=keepdims, combine_size=combine_size, **kw)
return agg_op.new_tensors([tensor], shape, order=tensor.order,
chunks=chunks, nsplits=nsplits)
@classmethod
def tile(cls, op):
in_tensor = op.inputs[0]
out_tensor = op.outputs[0]
axis = tuple(range(in_tensor.ndim)) if op.axis is None else op.axis
if isinstance(axis, int):
axis = (axis,)
axis = tuple(validate_axis(in_tensor.ndim, ax) for ax in axis)
if len(in_tensor.chunks) == 1:
c = in_tensor.chunks[0]
new_op = op.copy().reset_key()
setattr(new_op, '_axis', axis)
shape = list(cls._reduced_shape(c.shape, axis))
nsplits = list(cls._reduced_nsplits(in_tensor.nsplits, axis))
chunk_index = list(c.index)
if not op.keepdims and axis:
for ax in axis:
shape[ax] = None
nsplits[ax] = None
chunk_index[ax] = None
shape = tuple(s for s in shape if s is not None)
nsplits = tuple(ns for ns in nsplits if ns is not None)
chunk_index = tuple(i for i in chunk_index if i is not None)
chunks = new_op.new_chunks([c], shape=shape, index=chunk_index, order=out_tensor.order)
return op.copy().new_tensors(op.inputs, op.outputs[0].shape, order=out_tensor.order,
chunks=chunks, nsplits=nsplits)
chunks = []
kw = getattr(op, '_get_op_kw')() or {}
for c in in_tensor.chunks:
chunk_op = type(op)(stage=OperandStage.map, axis=axis, dtype=op.dtype, keepdims=True,
combine_size=op.combine_size, **kw)
chunks.append(chunk_op.new_chunk([c], shape=cls._reduced_shape(c.shape, axis),
order=out_tensor.order, index=c.index))
new_op = op.copy()
tensor = new_op.new_tensor(op.inputs, cls._reduced_shape(in_tensor.shape, axis),
order=out_tensor.order,
nsplits=cls._reduced_nsplits(in_tensor.nsplits, axis), chunks=chunks)
return cls._tree_reduction(tensor, axis)
@classmethod
def execute_agg(cls, ctx, op):
(input_chunk,), device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)
axis = cls.get_axis(op.axis)
func_name = getattr(cls, '_func_name', None)
reduce_func = getattr(xp, func_name)
out = op.outputs[0]
with device(device_id):
if input_chunk.size == 0 and op.keepdims:
# input chunk is empty, when keepdims is True, return itself
ret = input_chunk
elif "dtype" in inspect.getfullargspec(reduce_func).args:
ret = reduce_func(input_chunk, axis=axis,
dtype=op.dtype,
keepdims=bool(op.keepdims))
else:
ret = reduce_func(input_chunk, axis=axis,
keepdims=bool(op.keepdims))
if hasattr(ret, 'astype'):
# for non-object dtype
ret = ret.astype(op.dtype, order=out.order.value, copy=False)
ctx[out.key] = ret
@classmethod
def execute_one_chunk(cls, ctx, op):
cls.execute_agg(ctx, op)
@classmethod
def execute(cls, ctx, op):
if op.stage == OperandStage.map:
return cls.execute_map(ctx, op)
elif op.stage == OperandStage.combine:
return cls.execute_combine(ctx, op)
elif op.stage == OperandStage.agg:
return cls.execute_agg(ctx, op)
else:
return cls.execute_one_chunk(ctx, op)
class TensorArgReductionMixin(TensorReductionMixin):
__slots__ = ()
@staticmethod
def _get_arg_axis(axis, ndim):
if axis is None:
axis = tuple(range(ndim))
ravel = True
elif isinstance(axis, int):
axis = validate_axis(ndim, axis)
axis = (axis,)
ravel = ndim == 1
else:
raise TypeError("axis must be either `None` or int, "
f"got '{axis}'")
return axis, ravel
@staticmethod
def _get_offset(tensor, axis, chunk, ravel):
nsplits = tensor.nsplits
offset = tuple(builtins.sum(split[:idx]) for split, idx in zip(nsplits, chunk.index))
if not ravel:
offset = offset[axis[0]]
return offset
@classmethod
def _calc_order(cls, a, out):
return out.order if out is not None else TensorOrder.C_ORDER
@classmethod
def tile(cls, op):
in_tensor = op.inputs[0]
out_tensor = op.outputs[0]
axis, ravel = cls._get_arg_axis(op.axis, in_tensor.ndim)
chunks = []
for c in in_tensor.chunks:
offset = cls._get_offset(in_tensor, axis, c, ravel)
chunk_op = type(op)(stage=OperandStage.map, axis=axis, dtype=op.dtype,
offset=offset, total_shape=in_tensor.shape,
combine_size=op.combine_size)
chunk = chunk_op.new_chunk([c], shape=cls._reduced_shape(c.shape, axis),
index=c.index, order=out_tensor.order)
chunks.append(chunk)
new_op = op.copy()
tensor = new_op.new_tensor(op.inputs, cls._reduced_shape(in_tensor.shape, axis),
order=out_tensor.order,
nsplits=cls._reduced_nsplits(in_tensor.nsplits, axis), chunks=chunks)
return cls._tree_reduction(tensor, axis)
@classmethod
def execute_agg(cls, ctx, op):
axis = cls.get_arg_axis(op.axis, op.inputs[0].ndim)
(vals, arg), device_id, xp = as_same_device(
ctx[op.inputs[0].key], device=op.device, ret_extra=True)
func_name = getattr(cls, '_func_name')
arg_func = getattr(xp, func_name)
with device(device_id):
if xp.any(xp.isnan(vals)) and 'nan' in func_name:
raise ValueError("All NaN slice encountered")
if axis is None:
local_args = arg_func(vals, axis=axis)
arg = arg.ravel()[local_args]
else:
local_args = arg_func(vals, axis=axis)
inds = np.ogrid[tuple(map(slice, local_args.shape))]
if xp != np:
inds = [xp.asarray(it) for it in inds]
inds.insert(axis, local_args)
arg = arg[tuple(inds)]
ctx[op.outputs[0].key] = arg
@classmethod
def execute_map(cls, ctx, op):
arg_axis = cls.get_arg_axis(op.axis, op.inputs[0].ndim)
(in_chunk,), device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)
func_name = getattr(cls, '_func_name')
agg_func_name = getattr(cls, '_agg_func_name')
arg_func = getattr(xp, func_name)
agg_func_name = getattr(xp, agg_func_name)
offset = op.offset
chunk = op.outputs[0]
with device(device_id):
vals = agg_func_name(in_chunk, axis=arg_axis)
if hasattr(vals, 'reshape'):
vals = vals.reshape(chunk.shape)
try:
arg = arg_func(in_chunk, axis=arg_axis)
if hasattr(arg, 'reshape'):
arg = arg.reshape(chunk.shape)
except ValueError:
# handle all NaN
arg = arg_func(xp.where(xp.isnan(in_chunk), np.inf, in_chunk),
axis=arg_axis).reshape(chunk.shape)
if arg_axis is None:
if xp == cp:
# we need to copy to do cpu computation, then copy back to gpu
# cuz unravel_index and ravel_multi_index are not implemented in cupy
in_chunk = in_chunk.get()
total_shape = op.total_shape
ind = np.unravel_index(arg.ravel()[0], in_chunk.shape)
total_ind = tuple(o + i for (o, i) in zip(offset, ind))
res = np.ravel_multi_index(total_ind, total_shape)
if xp == cp:
# copy back
with xp.cuda.Device(in_chunk.device.id):
arg[:] = xp.asarray(res)
else:
arg[:] = res
else:
arg += offset
ctx[op.outputs[0].key] = (vals, arg)
@classmethod
def execute_combine(cls, ctx, op):
axis = cls.get_arg_axis(op.axis, op.inputs[0].ndim)
(vals, arg), device_id, xp = as_same_device(
ctx[op.inputs[0].key], device=op.device, ret_extra=True)
func_name = getattr(cls, '_func_name')
arg_func = getattr(xp, func_name)
with device(device_id):
if axis is None:
local_args = arg_func(vals, axis=axis).reshape(op.outputs[0].shape)
vals = vals.ravel()[local_args]
arg = arg.ravel()[local_args]
else:
local_args = arg_func(vals, axis=axis)
inds = np.ogrid[tuple(map(slice, local_args.shape))]
if xp != np:
inds = [xp.asarray(it) for it in inds]
inds.insert(axis, local_args)
inds_tuple = tuple(inds)
vals = vals[inds_tuple].reshape(op.outputs[0].shape)
arg = arg[inds_tuple].reshape(op.outputs[0].shape)
ctx[op.outputs[0].key] = (vals, arg)
class TensorCumReductionMixin(TensorReductionMixin):
__slots__ = ()
@classmethod
def _is_cum(cls):
return True
@staticmethod
def _get_op_types():
raise NotImplementedError
@classmethod
def tile(cls, op):
from ..indexing.slice import TensorSlice
in_tensor = op.inputs[0]
out_tensor = op.outputs[0]
axis = op.axis
if not isinstance(axis, int):
raise ValueError("axis must be a integer")
axis = validate_axis(in_tensor.ndim, axis)
if axis is None:
raise NotImplementedError
op_type, bin_op_type = getattr(op, '_get_op_types')()
chunks = []
for c in in_tensor.chunks:
chunk_op = op_type(axis=op.axis, dtype=op.dtype)
chunks.append(chunk_op.new_chunk([c], shape=c.shape,
index=c.index, order=out_tensor.order))
inter_tensor = copy.copy(in_tensor)
inter_tensor._chunks = chunks
slc = [slice(None) if i != axis else slice(-1, None)
for i in range(in_tensor.ndim)]
output_chunks = []
for chunk in chunks:
if chunk.index[axis] == 0:
output_chunks.append(chunk)
continue
to_cum_chunks = []
for i in range(chunk.index[axis]):
to_cum_index = chunk.index[:axis] + (i,) + chunk.index[axis + 1:]
shape = chunk.shape[:axis] + (1,) + chunk.shape[axis + 1:]
to_cum_chunk = inter_tensor.cix[to_cum_index]
slice_op = TensorSlice(slices=slc, dtype=chunk.dtype)
sliced_chunk = slice_op.new_chunk([to_cum_chunk], shape=shape,
index=to_cum_index, order=out_tensor.order)
to_cum_chunks.append(sliced_chunk)
to_cum_chunks.append(chunk)
bin_op = bin_op_type(args=to_cum_chunks, dtype=chunk.dtype)
output_chunk = bin_op.new_chunk(to_cum_chunks, shape=chunk.shape,
index=chunk.index, order=out_tensor.order)
output_chunks.append(output_chunk)
new_op = op.copy()
return new_op.new_tensors(op.inputs, in_tensor.shape, order=out_tensor.order,
chunks=output_chunks, nsplits=in_tensor.nsplits)
@classmethod
def execute(cls, ctx, op):
(x,), device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)
func_name = getattr(cls, '_func_name')
cum_func = getattr(xp, func_name)
if xp != np:
func = getattr(xp, cum_func.__name__)
else:
func = cum_func
with device(device_id):
ctx[op.outputs[0].key] = func(x, axis=op.axis, dtype=op.dtype)
class TensorReduction(TensorHasInput):
_input = KeyField('input')
_out = KeyField('out')
_axis = AnyField('axis') # can be None or int or tuple of ints, just infer the data
_keepdims = BoolField('keepdims')
_combine_size = AnyField('combine_size')
@property
def axis(self):
return getattr(self, '_axis', None)
@property
def keepdims(self):
return getattr(self, '_keepdims', None)
@property
def combine_size(self):
return getattr(self, '_combine_size', None)
def _rewrite_stage(self, stage):
if stage == OperandStage.map and not hasattr(self, 'execute_map'):
return OperandStage.agg
elif stage == OperandStage.combine and not hasattr(self, 'execute_combine'):
return OperandStage.agg
return stage
class TensorCumReduction(TensorHasInput):
_input = KeyField('input')
_axis = Int32Field('axis')
@property
def axis(self):
return getattr(self, '_axis', None)
|
[
"inspect.getfullargspec",
"copy.copy",
"numpy.ravel_multi_index",
"itertools.product",
"functools.reduce",
"math.log",
"builtins.sum"
] |
[((1376, 1405), 'functools.reduce', 'reduce', (['operator.mul', 'x.shape'], {}), '(operator.mul, x.shape)\n', (1382, 1405), False, 'from functools import reduce\n'), ((18610, 18630), 'copy.copy', 'copy.copy', (['in_tensor'], {}), '(in_tensor)\n', (18619, 18630), False, 'import copy\n'), ((6480, 6520), 'itertools.product', 'itertools.product', (['*combine_blocks_idxes'], {}), '(*combine_blocks_idxes)\n', (6497, 6520), False, 'import itertools\n'), ((6574, 6608), 'itertools.product', 'itertools.product', (['*combine_blocks'], {}), '(*combine_blocks)\n', (6591, 6608), False, 'import itertools\n'), ((4152, 4187), 'builtins.sum', 'builtins.sum', (['(nsplit[i] for i in cb)'], {}), '(nsplit[i] for i in cb)\n', (4164, 4187), False, 'import builtins\n'), ((7778, 7798), 'builtins.sum', 'builtins.sum', (['nsplit'], {}), '(nsplit)\n', (7790, 7798), False, 'import builtins\n'), ((12382, 12407), 'builtins.sum', 'builtins.sum', (['split[:idx]'], {}), '(split[:idx])\n', (12394, 12407), False, 'import builtins\n'), ((16217, 16261), 'numpy.ravel_multi_index', 'np.ravel_multi_index', (['total_ind', 'total_shape'], {}), '(total_ind, total_shape)\n', (16237, 16261), True, 'import numpy as np\n'), ((6658, 6691), 'itertools.product', 'itertools.product', (['*combine_block'], {}), '(*combine_block)\n', (6675, 6691), False, 'import itertools\n'), ((10742, 10777), 'inspect.getfullargspec', 'inspect.getfullargspec', (['reduce_func'], {}), '(reduce_func)\n', (10764, 10777), False, 'import inspect\n'), ((5635, 5658), 'math.log', 'log', (['n', 'combine_size[i]'], {}), '(n, combine_size[i])\n', (5638, 5658), False, 'from math import ceil, log\n')]
|
import torch
import torch.nn.functional as F
import numpy
from babyai.rl.utils import DictList
# dictionary that defines what head is required for each extra info used for auxiliary supervision
required_heads = {'seen_state': 'binary',
'see_door': 'binary',
'see_obj': 'binary',
'obj_in_instr': 'binary',
'in_front_of_what': 'multiclass9', # multi class classifier with 9 possible classes
'visit_proportion': 'continuous01', # continous regressor with outputs in [0, 1]
'bot_action': 'binary'
}
class ExtraInfoCollector:
'''
This class, used in rl.algos.base, allows connecting the extra information from the environment, and the
corresponding predictions using the specific heads in the model. It transforms them so that they are easy to use
to evaluate losses
'''
def __init__(self, aux_info, shape, device):
self.aux_info = aux_info
self.shape = shape
self.device = device
self.collected_info = dict()
self.extra_predictions = dict()
for info in self.aux_info:
self.collected_info[info] = torch.zeros(*shape, device=self.device)
if required_heads[info] == 'binary' or required_heads[info].startswith('continuous'):
# we predict one number only
self.extra_predictions[info] = torch.zeros(*shape, 1, device=self.device)
elif required_heads[info].startswith('multiclass'):
# means that this is a multi-class classification and we need to predict the whole proba distr
n_classes = int(required_heads[info].replace('multiclass', ''))
self.extra_predictions[info] = torch.zeros(*shape, n_classes, device=self.device)
else:
raise ValueError("{} not supported".format(required_heads[info]))
def process(self, env_info):
# env_info is now a tuple of dicts
env_info = [{k: v for k, v in dic.items() if k in self.aux_info} for dic in env_info]
env_info = {k: [env_info[_][k] for _ in range(len(env_info))] for k in env_info[0].keys()}
# env_info is now a dict of lists
return env_info
def fill_dictionaries(self, index, env_info, extra_predictions):
for info in self.aux_info:
dtype = torch.long if required_heads[info].startswith('multiclass') else torch.float
self.collected_info[info][index] = torch.tensor(env_info[info], dtype=dtype, device=self.device)
self.extra_predictions[info][index] = extra_predictions[info]
def end_collection(self, exps):
collected_info = dict()
extra_predictions = dict()
for info in self.aux_info:
# T x P -> P x T -> P * T
collected_info[info] = self.collected_info[info].transpose(0, 1).reshape(-1)
if required_heads[info] == 'binary' or required_heads[info].startswith('continuous'):
# T x P x 1 -> P x T x 1 -> P * T
extra_predictions[info] = self.extra_predictions[info].transpose(0, 1).reshape(-1)
elif type(required_heads[info]) == int:
# T x P x k -> P x T x k -> (P * T) x k
k = required_heads[info] # number of classes
extra_predictions[info] = self.extra_predictions[info].transpose(0, 1).reshape(-1, k)
# convert the dicts to DictLists, and add them to the exps DictList.
exps.collected_info = DictList(collected_info)
exps.extra_predictions = DictList(extra_predictions)
return exps
class SupervisedLossUpdater:
'''
This class, used by PPO, allows the evaluation of the supervised loss when using extra information from the
environment. It also handles logging accuracies/L2 distances/etc...
'''
def __init__(self, aux_info, supervised_loss_coef, recurrence, device):
self.aux_info = aux_info
self.supervised_loss_coef = supervised_loss_coef
self.recurrence = recurrence
self.device = device
self.log_supervised_losses = []
self.log_supervised_accuracies = []
self.log_supervised_L2_losses = []
self.log_supervised_prevalences = []
self.batch_supervised_loss = 0
self.batch_supervised_accuracy = 0
self.batch_supervised_L2_loss = 0
self.batch_supervised_prevalence = 0
def init_epoch(self):
self.log_supervised_losses = []
self.log_supervised_accuracies = []
self.log_supervised_L2_losses = []
self.log_supervised_prevalences = []
def init_batch(self):
self.batch_supervised_loss = 0
self.batch_supervised_accuracy = 0
self.batch_supervised_L2_loss = 0
self.batch_supervised_prevalence = 0
def eval_subbatch(self, extra_predictions, sb):
supervised_loss = torch.tensor(0., device=self.device)
supervised_accuracy = torch.tensor(0., device=self.device)
supervised_L2_loss = torch.tensor(0., device=self.device)
supervised_prevalence = torch.tensor(0., device=self.device)
binary_classification_tasks = 0
classification_tasks = 0
regression_tasks = 0
for pos, info in enumerate(self.aux_info):
coef = self.supervised_loss_coef[pos]
pred = extra_predictions[info]
target = dict.__getitem__(sb.collected_info, info)
if required_heads[info] == 'binary':
binary_classification_tasks += 1
classification_tasks += 1
supervised_loss += coef * F.binary_cross_entropy_with_logits(pred.reshape(-1), target)
supervised_accuracy += ((pred.reshape(-1) > 0).float() == target).float().mean()
supervised_prevalence += target.mean()
elif required_heads[info].startswith('continuous'):
regression_tasks += 1
mse = F.mse_loss(pred.reshape(-1), target)
supervised_loss += coef * mse
supervised_L2_loss += mse
elif required_heads[info].startswith('multiclass'):
classification_tasks += 1
supervised_accuracy += (pred.argmax(1).float() == target).float().mean()
supervised_loss += coef * F.cross_entropy(pred, target.long())
else:
raise ValueError("{} not supported".format(required_heads[info]))
if binary_classification_tasks > 0:
supervised_prevalence /= binary_classification_tasks
else:
supervised_prevalence = torch.tensor(-1)
if classification_tasks > 0:
supervised_accuracy /= classification_tasks
else:
supervised_accuracy = torch.tensor(-1)
if regression_tasks > 0:
supervised_L2_loss /= regression_tasks
else:
supervised_L2_loss = torch.tensor(-1)
self.batch_supervised_loss += supervised_loss.item()
self.batch_supervised_accuracy += supervised_accuracy.item()
self.batch_supervised_L2_loss += supervised_L2_loss.item()
self.batch_supervised_prevalence += supervised_prevalence.item()
return supervised_loss
def update_batch_values(self):
self.batch_supervised_loss /= self.recurrence
self.batch_supervised_accuracy /= self.recurrence
self.batch_supervised_L2_loss /= self.recurrence
self.batch_supervised_prevalence /= self.recurrence
def update_epoch_logs(self):
self.log_supervised_losses.append(self.batch_supervised_loss)
self.log_supervised_accuracies.append(self.batch_supervised_accuracy)
self.log_supervised_L2_losses.append(self.batch_supervised_L2_loss)
self.log_supervised_prevalences.append(self.batch_supervised_prevalence)
def end_training(self, logs):
logs["supervised_loss"] = numpy.mean(self.log_supervised_losses)
logs["supervised_accuracy"] = numpy.mean(self.log_supervised_accuracies)
logs["supervised_L2_loss"] = numpy.mean(self.log_supervised_L2_losses)
logs["supervised_prevalence"] = numpy.mean(self.log_supervised_prevalences)
return logs
|
[
"torch.zeros",
"numpy.mean",
"babyai.rl.utils.DictList",
"torch.tensor"
] |
[((3547, 3571), 'babyai.rl.utils.DictList', 'DictList', (['collected_info'], {}), '(collected_info)\n', (3555, 3571), False, 'from babyai.rl.utils import DictList\n'), ((3605, 3632), 'babyai.rl.utils.DictList', 'DictList', (['extra_predictions'], {}), '(extra_predictions)\n', (3613, 3632), False, 'from babyai.rl.utils import DictList\n'), ((4934, 4971), 'torch.tensor', 'torch.tensor', (['(0.0)'], {'device': 'self.device'}), '(0.0, device=self.device)\n', (4946, 4971), False, 'import torch\n'), ((5001, 5038), 'torch.tensor', 'torch.tensor', (['(0.0)'], {'device': 'self.device'}), '(0.0, device=self.device)\n', (5013, 5038), False, 'import torch\n'), ((5067, 5104), 'torch.tensor', 'torch.tensor', (['(0.0)'], {'device': 'self.device'}), '(0.0, device=self.device)\n', (5079, 5104), False, 'import torch\n'), ((5136, 5173), 'torch.tensor', 'torch.tensor', (['(0.0)'], {'device': 'self.device'}), '(0.0, device=self.device)\n', (5148, 5173), False, 'import torch\n'), ((7960, 7998), 'numpy.mean', 'numpy.mean', (['self.log_supervised_losses'], {}), '(self.log_supervised_losses)\n', (7970, 7998), False, 'import numpy\n'), ((8037, 8079), 'numpy.mean', 'numpy.mean', (['self.log_supervised_accuracies'], {}), '(self.log_supervised_accuracies)\n', (8047, 8079), False, 'import numpy\n'), ((8117, 8158), 'numpy.mean', 'numpy.mean', (['self.log_supervised_L2_losses'], {}), '(self.log_supervised_L2_losses)\n', (8127, 8158), False, 'import numpy\n'), ((8199, 8242), 'numpy.mean', 'numpy.mean', (['self.log_supervised_prevalences'], {}), '(self.log_supervised_prevalences)\n', (8209, 8242), False, 'import numpy\n'), ((1208, 1247), 'torch.zeros', 'torch.zeros', (['*shape'], {'device': 'self.device'}), '(*shape, device=self.device)\n', (1219, 1247), False, 'import torch\n'), ((2519, 2580), 'torch.tensor', 'torch.tensor', (['env_info[info]'], {'dtype': 'dtype', 'device': 'self.device'}), '(env_info[info], dtype=dtype, device=self.device)\n', (2531, 2580), False, 'import torch\n'), ((6661, 6677), 'torch.tensor', 'torch.tensor', (['(-1)'], {}), '(-1)\n', (6673, 6677), False, 'import torch\n'), ((6819, 6835), 'torch.tensor', 'torch.tensor', (['(-1)'], {}), '(-1)\n', (6831, 6835), False, 'import torch\n'), ((6967, 6983), 'torch.tensor', 'torch.tensor', (['(-1)'], {}), '(-1)\n', (6979, 6983), False, 'import torch\n'), ((1438, 1480), 'torch.zeros', 'torch.zeros', (['*shape', '(1)'], {'device': 'self.device'}), '(*shape, 1, device=self.device)\n', (1449, 1480), False, 'import torch\n'), ((1783, 1833), 'torch.zeros', 'torch.zeros', (['*shape', 'n_classes'], {'device': 'self.device'}), '(*shape, n_classes, device=self.device)\n', (1794, 1833), False, 'import torch\n')]
|
import random as rd
import numpy as np
import networkx as nx
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn import metrics
from numpy import linalg as LA
import matplotlib.pyplot as plt
from sklearn.cluster import SpectralClustering
DG= nx.DiGraph()
G=nx.Graph()
Cluster=[14,15,16,17,18,19,20,21,22,26,122,125,56,57,58,64,65,67,68,71,98,47,49,50,59,61,93,94,42,7,8,9,10,12,13,23,24,25,28,29,30,31,33,34,35,36,37,38,39,40,41,43,44,48,60,90,123,62,63,92,95,99]
'''
Compute purity for clustering
'''
def purity(y_true,y_pred):
contigency_matrix=metrics.cluster.contingency_matrix(y_true,y_pred)
return np.sum(np.amax(contigency_matrix,axis=0))/np.sum(contigency_matrix)
'''
compute precision, recall and F score
for clustering
'''
def precision(y_true,y_pred):
tp=0.0
fp=0.0
n1=len(y_true)
for i in range(n1):
for j in range(i+1,n1):
if y_true[i]==y_true[j] and y_pred[i]==y_pred[j]:
tp+=1
if y_true[i]!=y_true[j] and y_pred[i]==y_pred[j]:
fp+=1
if tp==0 and fp==0:
return 0
else:
return tp/(tp+fp)
def recall(y_true,y_pred):
tp=0.0
fn=0.0
n1=len(y_true)
for i in range(n1):
for j in range(i+1,n1):
if y_true[i]==y_true[j] and y_pred[i]==y_pred[j]:
tp+=1
if y_true[i]==y_true[j] and y_pred[i]!=y_pred[j]:
fn+=1
if tp==0 and fn==0:
return 0
else:
return tp/(tp+fn)
def F_score(y_true,y_pred,beta):
P=precision(y_true,y_pred)
R=recall(y_true,y_pred)
if P==0 and R==0:
return 0
else:
return ((beta*beta+1)*P*R)/(beta*beta*P+R)
'''
test whether 3 nodes form motif instance of M6
'''
def is_motif(i,j,k):
global DG
nodes=[i,j,k]
H=DG.subgraph(nodes)
M6=nx.DiGraph()
for u in range(3):
M6.add_node(u)
M6.add_edge(0,1)
M6.add_edge(0,2)
M6.add_edge(1,2)
M6.add_edge(2,1)
return nx.is_isomorphic(H,M6)
'''
compute total motif instances of M6
'''
def count_motif(i,j):
global G
nb1=set(list(G[i]))
nb2=set(list(G[j]))
nb=list(nb1&nb2)
num=0
for k in nb:
if is_motif(i,j,k):
num+=1
return num
'''
initialize the network and regularize the adjacency matrix
'''
def initial_network(f1,f2):
global DG,Cluster,G
dic=dict()
n=len(Cluster)
for i in range(n):
dic[Cluster[i]]=i
DG1=nx.DiGraph()
nn=128
for i in range(nn):
names=f1.readline()
DG1.add_node(i,name=names,color=0)
for line in f2:
strli=line.split()
a=int(strli[0])
b=int(strli[1])
DG1.add_edge(a,b)
H=DG1.subgraph(Cluster)
for u in H.nodes():
DG.add_node(dic[u],name=H.nodes[u]['name'],color=0)
G.add_node(dic[u],name=H.nodes[u]['name'],color=0)
for e in H.edges():
DG.add_edge(dic[e[0]],dic[e[1]])
G.add_edge(dic[e[0]],dic[e[1]])
A=np.zeros((n, n))
for i in range(n):
for j in range(i+1,n):
num=count_motif(i,j)
A[i,j]=num
A[j,i]=num
return A
'''
regularize the motif adjacency matrix
'''
def regularize_matrix(A,tau):
global Cluster
n=len(Cluster)
T=np.ones((n,n))
A=A+(tau/n)*T
return A
'''
construct multihop matrix
'''
def multihop_matrix(A,k):
global G
print("hop number: "+str(k))
N=(A.shape)[0]
Ctmp=A
S=np.zeros((N,N))
S=S+A
for i in range(k-1):
Ctmp=np.matmul(Ctmp,A)
S=S+Ctmp
for i in range(N):
for j in range(N):
if A[i,j]!=0:
A[i,j]=S[i,j]
return A
'''
multihop spectral clustering
'''
def spectral_community_detect(A,groups):
global G
model = SpectralClustering(n_clusters=groups,eigen_solver='arpack',random_state=56,affinity='precomputed')
model.fit(A)
labels=model.labels_
return labels
'''
evaluate the community
'''
def evaluate_community(y_true,y_pred):
res1=purity(y_true,y_pred)
res2=normalized_mutual_info_score(y_true,y_pred,average_method='arithmetic')
res3=adjusted_rand_score(y_true,y_pred)
res4=F_score(y_true,y_pred,1)
return res1,res2,res3,res4
def main():
global G,DG
F1=open('bay-nodes.txt', 'r')
F2=open('bay-edges.txt','r')
Class1=[1,1,1,2,2,2,2,2,2,3,4,4,5,5,5,5,5,6,6,5,5,3,3,3,7,8,7,7,3,9,10,10,10,11,11,12,12,12,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,7,5,4,7,6,7,7,6]
Class2=[1,1,1,1,1,1,1,1,1,2,3,3,4,4,4,4,4,5,5,4,4,2,2,2,6,5,6,6,2,7,7,7,7,7,7,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,6,4,3,6,5,6,6,5]
A=initial_network(F1,F2)
tau=2.5
A=regularize_matrix(A,tau)
q=2
k=4
A=multihop_matrix(A,q)
pred=spectral_community_detect(A,k)
purity,nmi,ari,f1=evaluate_community(Class1,pred)
print("class1 Purity: "+str(purity))
print("class1 NMI: "+str(nmi))
print("class1 ARI: "+str(ari))
print("class1 F1: "+str(f1))
print("#########################")
purity,nmi,ari,f1=evaluate_community(Class2,pred)
print("class2 Purity: "+str(purity))
print("class2 NMI: "+str(nmi))
print("class2 ARI: "+str(ari))
print("class2 F1: "+str(f1))
main()
|
[
"sklearn.metrics.cluster.contingency_matrix",
"numpy.sum",
"sklearn.cluster.SpectralClustering",
"sklearn.metrics.cluster.adjusted_rand_score",
"numpy.zeros",
"numpy.ones",
"numpy.amax",
"networkx.Graph",
"numpy.matmul",
"networkx.is_isomorphic",
"networkx.DiGraph",
"sklearn.metrics.cluster.normalized_mutual_info_score"
] |
[((337, 349), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (347, 349), True, 'import networkx as nx\n'), ((353, 363), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (361, 363), True, 'import networkx as nx\n'), ((655, 705), 'sklearn.metrics.cluster.contingency_matrix', 'metrics.cluster.contingency_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (689, 705), False, 'from sklearn import metrics\n'), ((1980, 1992), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (1990, 1992), True, 'import networkx as nx\n'), ((2141, 2164), 'networkx.is_isomorphic', 'nx.is_isomorphic', (['H', 'M6'], {}), '(H, M6)\n', (2157, 2164), True, 'import networkx as nx\n'), ((2648, 2660), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (2658, 2660), True, 'import networkx as nx\n'), ((3187, 3203), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (3195, 3203), True, 'import numpy as np\n'), ((3487, 3502), 'numpy.ones', 'np.ones', (['(n, n)'], {}), '((n, n))\n', (3494, 3502), True, 'import numpy as np\n'), ((3690, 3706), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (3698, 3706), True, 'import numpy as np\n'), ((4036, 4142), 'sklearn.cluster.SpectralClustering', 'SpectralClustering', ([], {'n_clusters': 'groups', 'eigen_solver': '"""arpack"""', 'random_state': '(56)', 'affinity': '"""precomputed"""'}), "(n_clusters=groups, eigen_solver='arpack', random_state=\n 56, affinity='precomputed')\n", (4054, 4142), False, 'from sklearn.cluster import SpectralClustering\n'), ((4328, 4401), 'sklearn.metrics.cluster.normalized_mutual_info_score', 'normalized_mutual_info_score', (['y_true', 'y_pred'], {'average_method': '"""arithmetic"""'}), "(y_true, y_pred, average_method='arithmetic')\n", (4356, 4401), False, 'from sklearn.metrics.cluster import normalized_mutual_info_score\n'), ((4410, 4445), 'sklearn.metrics.cluster.adjusted_rand_score', 'adjusted_rand_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (4429, 4445), False, 'from sklearn.metrics.cluster import adjusted_rand_score\n'), ((759, 784), 'numpy.sum', 'np.sum', (['contigency_matrix'], {}), '(contigency_matrix)\n', (765, 784), True, 'import numpy as np\n'), ((3757, 3775), 'numpy.matmul', 'np.matmul', (['Ctmp', 'A'], {}), '(Ctmp, A)\n', (3766, 3775), True, 'import numpy as np\n'), ((724, 758), 'numpy.amax', 'np.amax', (['contigency_matrix'], {'axis': '(0)'}), '(contigency_matrix, axis=0)\n', (731, 758), True, 'import numpy as np\n')]
|
#!/opt/anaconda3/envs/py37/bin/python
import numpy as np
import twd97
import sys
from cntr_kml import cntr_kml
from pyproj import Proj
import rasterio
fname = sys.argv[1]
img = rasterio.open(fname)
data=np.flip(img.read()[0,:,:],[0])
l,b,r,t=img.bounds[:]
LL=False
if (l+r)/2==img.lnglat()[0]:LL=True
x0,y0=img.xy(0,0)
nx,ny=img.width, img.height
dx,dy=(r-l)/nx,-(t-b)/ny
x = np.array([x0+dx*i for i in range(nx)])
y = np.array([y0+dy*i for i in range(ny)])
y.sort()
if LL:
lon, lat = np.meshgrid(x, y)
else:
x_g, y_g = np.meshgrid(x, y)
Xcent,Ycent=(x[0]+x[-1])/2, (y[0]+y[-1])/2
Latitude_Pole, Longitude_Pole=twd97.towgs84(Xcent, Ycent)
pnyc = Proj(proj='lcc', datum='NAD83', lat_1=10, lat_2=40,
lat_0=Latitude_Pole, lon_0=Longitude_Pole, x_0=0, y_0=0.0)
xgl,ygl=x_g-Xcent, y_g-Ycent
lon,lat=pnyc(xgl, ygl, inverse=True)
result=cntr_kml(data, lon, lat, fname)
|
[
"rasterio.open",
"numpy.meshgrid",
"cntr_kml.cntr_kml",
"twd97.towgs84",
"pyproj.Proj"
] |
[((178, 198), 'rasterio.open', 'rasterio.open', (['fname'], {}), '(fname)\n', (191, 198), False, 'import rasterio\n'), ((855, 886), 'cntr_kml.cntr_kml', 'cntr_kml', (['data', 'lon', 'lat', 'fname'], {}), '(data, lon, lat, fname)\n', (863, 886), False, 'from cntr_kml import cntr_kml\n'), ((489, 506), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (500, 506), True, 'import numpy as np\n'), ((526, 543), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (537, 543), True, 'import numpy as np\n'), ((621, 648), 'twd97.towgs84', 'twd97.towgs84', (['Xcent', 'Ycent'], {}), '(Xcent, Ycent)\n', (634, 648), False, 'import twd97\n'), ((658, 772), 'pyproj.Proj', 'Proj', ([], {'proj': '"""lcc"""', 'datum': '"""NAD83"""', 'lat_1': '(10)', 'lat_2': '(40)', 'lat_0': 'Latitude_Pole', 'lon_0': 'Longitude_Pole', 'x_0': '(0)', 'y_0': '(0.0)'}), "(proj='lcc', datum='NAD83', lat_1=10, lat_2=40, lat_0=Latitude_Pole,\n lon_0=Longitude_Pole, x_0=0, y_0=0.0)\n", (662, 772), False, 'from pyproj import Proj\n')]
|
import numpy
from sympy import Rational as frac
from sympy import pi, sqrt
from ..helpers import article, fsd, pm, untangle
from ._helpers import Enr2Scheme
citation = article(
authors=["<NAME>", "<NAME>"],
title="Approximate integration formulas for certain spherically symmetric regions",
journal="Math. Comp.",
volume="17",
year="1963",
pages="105-135",
url="https://doi.org/10.1090/S0025-5718-1963-0161473-0",
)
def stroud_secrest_1(n):
data = [(frac(1, n + 1), sqrt(frac(1, 2)) * _nsimplex(n))]
points, weights = untangle(data)
weights *= sqrt(pi) ** n
return Enr2Scheme("Stroud-Secrest I", n, weights, points, 2, citation)
def stroud_secrest_2(n):
nu = sqrt(frac(n, 2))
data = [(frac(1, 2 * n), fsd(n, (nu, 1)))]
points, weights = untangle(data)
weights *= sqrt(pi) ** n
return Enr2Scheme("Stroud-Secrest II", n, weights, points, 3, citation)
def stroud_secrest_3(n):
nu = sqrt(frac(1, 2))
data = [(frac(1, 2 ** n), pm(n, nu))]
points, weights = untangle(data)
weights *= sqrt(pi) ** n
return Enr2Scheme("Stroud-Secrest III", n, weights, points, 3, citation)
def stroud_secrest_4(n):
nu = sqrt(frac(n + 2, 2))
xi = sqrt(frac(n + 2, 4))
A = frac(2, n + 2)
B = frac(4 - n, 2 * (n + 2) ** 2)
C = frac(1, (n + 2) ** 2)
data = [(A, numpy.full((1, n), 0)), (B, fsd(n, (nu, 1))), (C, fsd(n, (xi, 2)))]
points, weights = untangle(data)
weights *= sqrt(pi) ** n
return Enr2Scheme("Stroud-Secrest IV", n, weights, points, 5, citation)
def _nsimplex(n):
# construct the regular n-simplex points with 0 center
return numpy.array(
[
[-sqrt(frac(n + 1, (n + 1 - k) * (n - k))) for k in range(i)]
+ [sqrt(frac((n + 1) * (n - i), n + 1 - i))]
+ (n - i - 1) * [0]
for i in range(n)
]
+ [[-sqrt(frac(n + 1, (n + 1 - i) * (n - i))) for i in range(n)]]
)
|
[
"numpy.full",
"sympy.sqrt",
"sympy.Rational"
] |
[((1252, 1266), 'sympy.Rational', 'frac', (['(2)', '(n + 2)'], {}), '(2, n + 2)\n', (1256, 1266), True, 'from sympy import Rational as frac\n'), ((1275, 1304), 'sympy.Rational', 'frac', (['(4 - n)', '(2 * (n + 2) ** 2)'], {}), '(4 - n, 2 * (n + 2) ** 2)\n', (1279, 1304), True, 'from sympy import Rational as frac\n'), ((1313, 1334), 'sympy.Rational', 'frac', (['(1)', '((n + 2) ** 2)'], {}), '(1, (n + 2) ** 2)\n', (1317, 1334), True, 'from sympy import Rational as frac\n'), ((588, 596), 'sympy.sqrt', 'sqrt', (['pi'], {}), '(pi)\n', (592, 596), False, 'from sympy import pi, sqrt\n'), ((718, 728), 'sympy.Rational', 'frac', (['n', '(2)'], {}), '(n, 2)\n', (722, 728), True, 'from sympy import Rational as frac\n'), ((829, 837), 'sympy.sqrt', 'sqrt', (['pi'], {}), '(pi)\n', (833, 837), False, 'from sympy import pi, sqrt\n'), ((960, 970), 'sympy.Rational', 'frac', (['(1)', '(2)'], {}), '(1, 2)\n', (964, 970), True, 'from sympy import Rational as frac\n'), ((1066, 1074), 'sympy.sqrt', 'sqrt', (['pi'], {}), '(pi)\n', (1070, 1074), False, 'from sympy import pi, sqrt\n'), ((1198, 1212), 'sympy.Rational', 'frac', (['(n + 2)', '(2)'], {}), '(n + 2, 2)\n', (1202, 1212), True, 'from sympy import Rational as frac\n'), ((1228, 1242), 'sympy.Rational', 'frac', (['(n + 2)', '(4)'], {}), '(n + 2, 4)\n', (1232, 1242), True, 'from sympy import Rational as frac\n'), ((1472, 1480), 'sympy.sqrt', 'sqrt', (['pi'], {}), '(pi)\n', (1476, 1480), False, 'from sympy import pi, sqrt\n'), ((486, 500), 'sympy.Rational', 'frac', (['(1)', '(n + 1)'], {}), '(1, n + 1)\n', (490, 500), True, 'from sympy import Rational as frac\n'), ((743, 757), 'sympy.Rational', 'frac', (['(1)', '(2 * n)'], {}), '(1, 2 * n)\n', (747, 757), True, 'from sympy import Rational as frac\n'), ((985, 1000), 'sympy.Rational', 'frac', (['(1)', '(2 ** n)'], {}), '(1, 2 ** n)\n', (989, 1000), True, 'from sympy import Rational as frac\n'), ((1352, 1373), 'numpy.full', 'numpy.full', (['(1, n)', '(0)'], {}), '((1, n), 0)\n', (1362, 1373), False, 'import numpy\n'), ((507, 517), 'sympy.Rational', 'frac', (['(1)', '(2)'], {}), '(1, 2)\n', (511, 517), True, 'from sympy import Rational as frac\n'), ((1896, 1930), 'sympy.Rational', 'frac', (['(n + 1)', '((n + 1 - i) * (n - i))'], {}), '(n + 1, (n + 1 - i) * (n - i))\n', (1900, 1930), True, 'from sympy import Rational as frac\n'), ((1769, 1803), 'sympy.Rational', 'frac', (['((n + 1) * (n - i))', '(n + 1 - i)'], {}), '((n + 1) * (n - i), n + 1 - i)\n', (1773, 1803), True, 'from sympy import Rational as frac\n'), ((1694, 1728), 'sympy.Rational', 'frac', (['(n + 1)', '((n + 1 - k) * (n - k))'], {}), '(n + 1, (n + 1 - k) * (n - k))\n', (1698, 1728), True, 'from sympy import Rational as frac\n')]
|
import os
import time
import copy
import torch
import matplotlib
import torchvision
import torch.nn as nn
import numpy as np
import torch.optim as optim
import matplotlib.pyplot as plt
from pathlib import Path
from torch.optim import lr_scheduler
from torchvision import datasets, models, transforms
import libs.dirs as dirs
from libs.utils import *
from models.trainer_class import TrainModel
def torch_imshow(gridInput, mean, std, title=None):
gridInput = gridInput.numpy().transpose((1,2,0))
gridInput = std*gridInput + mean
gridInput = np.clip(gridInput, 0, 1)
ax = plt.imshow(gridInput)
plt.title(title)
# plt.pause(0.01)
# plt.imsave("../images/testgrid.png", gridInput)
if __name__ == "__main__":
datasetPath = Path(dirs.dataset) / "torch/hymenoptera_data"
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
dataTransforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean, std),
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean, std),
])
}
# Dataset loaders for train and val sets
imageDatasets = {
x: datasets.ImageFolder(str(datasetPath / x), dataTransforms[x]) for x in ['train', 'val']
}
# Get an image batch of the training set
# inputs, classes = next(iter(dataloaders['train']))
# Make a grid and display it
# imgGrid = torchvision.utils.make_grid(inputs)
# torch_imshow(imgGrid, mean, std, title=[classNames[x] for x in classes])
# plt.show()
# Instantiate trainer object
trainer = TrainModel()
# device = torch.device('cuda:0')
modelFineTune = trainer.define_model_resnet18(finetune=True)
criterion = nn.CrossEntropyLoss()
# Set optimizer
optimizerFineTune = optim.SGD(modelFineTune.parameters(), lr=0.001, momentum=0.9)
# Scheduler for learning rate decay
expLrScheduler = lr_scheduler.StepLR(optimizerFineTune, step_size=7, gamma=0.1)
# Perform training
trainer.load_data(imageDatasets, num_examples_per_batch=4)
modelFineTune = trainer.train(modelFineTune, criterion, optimizerFineTune, expLrScheduler, num_epochs=25)
|
[
"matplotlib.pyplot.title",
"torch.optim.lr_scheduler.StepLR",
"torchvision.transforms.RandomHorizontalFlip",
"models.trainer_class.TrainModel",
"matplotlib.pyplot.imshow",
"torchvision.transforms.Normalize",
"torch.nn.CrossEntropyLoss",
"numpy.clip",
"pathlib.Path",
"torchvision.transforms.CenterCrop",
"torchvision.transforms.RandomResizedCrop",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor"
] |
[((657, 681), 'numpy.clip', 'np.clip', (['gridInput', '(0)', '(1)'], {}), '(gridInput, 0, 1)\n', (664, 681), True, 'import numpy as np\n'), ((692, 713), 'matplotlib.pyplot.imshow', 'plt.imshow', (['gridInput'], {}), '(gridInput)\n', (702, 713), True, 'import matplotlib.pyplot as plt\n'), ((718, 734), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (727, 734), True, 'import matplotlib.pyplot as plt\n'), ((2004, 2016), 'models.trainer_class.TrainModel', 'TrainModel', ([], {}), '()\n', (2014, 2016), False, 'from models.trainer_class import TrainModel\n'), ((2142, 2163), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2161, 2163), True, 'import torch.nn as nn\n'), ((2333, 2395), 'torch.optim.lr_scheduler.StepLR', 'lr_scheduler.StepLR', (['optimizerFineTune'], {'step_size': '(7)', 'gamma': '(0.1)'}), '(optimizerFineTune, step_size=7, gamma=0.1)\n', (2352, 2395), False, 'from torch.optim import lr_scheduler\n'), ((857, 875), 'pathlib.Path', 'Path', (['dirs.dataset'], {}), '(dirs.dataset)\n', (861, 875), False, 'from pathlib import Path\n'), ((1058, 1091), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(224)'], {}), '(224)\n', (1086, 1091), False, 'from torchvision import datasets, models, transforms\n'), ((1113, 1146), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (1144, 1146), False, 'from torchvision import datasets, models, transforms\n'), ((1168, 1189), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1187, 1189), False, 'from torchvision import datasets, models, transforms\n'), ((1211, 1242), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['mean', 'std'], {}), '(mean, std)\n', (1231, 1242), False, 'from torchvision import datasets, models, transforms\n'), ((1312, 1334), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (1329, 1334), False, 'from torchvision import datasets, models, transforms\n'), ((1356, 1382), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (1377, 1382), False, 'from torchvision import datasets, models, transforms\n'), ((1404, 1425), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1423, 1425), False, 'from torchvision import datasets, models, transforms\n'), ((1447, 1478), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['mean', 'std'], {}), '(mean, std)\n', (1467, 1478), False, 'from torchvision import datasets, models, transforms\n')]
|
import colorsys
import copy
import os
import time
import numpy as np
import torch
from PIL import Image, ImageDraw, ImageFont
from nets.frcnn import FasterRCNN
from utils.utils import DecodeBox, get_new_img_size
#--------------------------------------------#
# 使用自己训练好的模型预测需要修改2个参数
# model_path和classes_path都需要修改!
# 如果出现shape不匹配
# 一定要注意训练时的NUM_CLASSES、
# model_path和classes_path参数的修改
#--------------------------------------------#
class FRCNN(object):
_defaults = {
"model_path" : 'model_data/voc_weights_resnet.pth',
"classes_path" : 'model_data/voc_classes.txt',
"confidence" : 0.5,
"iou" : 0.3,
"backbone" : "resnet50",
"cuda" : False,
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
#---------------------------------------------------#
# 初始化faster RCNN
#---------------------------------------------------#
def __init__(self, **kwargs):
self.__dict__.update(self._defaults)
self.class_names = self._get_class()
self.generate()
self.mean = torch.Tensor([0, 0, 0, 0]).repeat(self.num_classes+1)[None]
self.std = torch.Tensor([0.1, 0.1, 0.2, 0.2]).repeat(self.num_classes+1)[None]
if self.cuda:
self.mean = self.mean.cuda()
self.std = self.std.cuda()
self.decodebox = DecodeBox(self.std, self.mean, self.num_classes)
#---------------------------------------------------#
# 获得所有的分类
#---------------------------------------------------#
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
#---------------------------------------------------#
# 载入模型
#---------------------------------------------------#
def generate(self):
#-------------------------------#
# 计算总的类的数量
#-------------------------------#
self.num_classes = len(self.class_names)
#-------------------------------#
# 载入模型与权值
#-------------------------------#
self.model = FasterRCNN(self.num_classes,"predict",backbone=self.backbone).eval()
print('Loading weights into state dict...')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
state_dict = torch.load(self.model_path, map_location=device)
self.model.load_state_dict(state_dict)
if self.cuda:
# self.model = nn.DataParallel(self.model)
self.model = self.model.cuda()
print('{} model, anchors, and classes loaded.'.format(self.model_path))
# 画框设置不同的颜色
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
#---------------------------------------------------#
# 检测图片
#---------------------------------------------------#
def detect_image(self, image):
#-------------------------------------#
# 转换成RGB图片,可以用于灰度图预测。
#-------------------------------------#
image = image.convert("RGB")
image_shape = np.array(np.shape(image)[0:2])
old_width, old_height = image_shape[1], image_shape[0]
old_image = copy.deepcopy(image)
#---------------------------------------------------------#
# 给原图像进行resize,resize到短边为600的大小上
#---------------------------------------------------------#
width,height = get_new_img_size(old_width, old_height)
image = image.resize([width,height], Image.BICUBIC)
#-----------------------------------------------------------#
# 图片预处理,归一化。
#-----------------------------------------------------------#
photo = np.transpose(np.array(image,dtype = np.float32)/255, (2, 0, 1))
with torch.no_grad():
images = torch.from_numpy(np.asarray([photo]))
if self.cuda:
images = images.cuda()
roi_cls_locs, roi_scores, rois, _ = self.model(images)
#-------------------------------------------------------------#
# 利用classifier的预测结果对建议框进行解码,获得预测框
#-------------------------------------------------------------#
outputs = self.decodebox.forward(roi_cls_locs[0], roi_scores[0], rois, height = height, width = width, nms_iou = self.iou, score_thresh = self.confidence)
#---------------------------------------------------------#
# 如果没有检测出物体,返回原图
#---------------------------------------------------------#
if len(outputs)==0:
return old_image
outputs = np.array(outputs)
bbox = outputs[:,:4]
label = outputs[:, 4]
conf = outputs[:, 5]
bbox[:, 0::2] = (bbox[:, 0::2]) / width * old_width
bbox[:, 1::2] = (bbox[:, 1::2]) / height * old_height
font = ImageFont.truetype(font='model_data/simhei.ttf',size=np.floor(3e-2 * np.shape(image)[1] + 0.5).astype('int32'))
thickness = max((np.shape(old_image)[0] + np.shape(old_image)[1]) // old_width * 2, 1)
image = old_image
for i, c in enumerate(label):
predicted_class = self.class_names[int(c)]
score = conf[i]
left, top, right, bottom = bbox[i]
top = top - 5
left = left - 5
bottom = bottom + 5
right = right + 5
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(np.shape(image)[0], np.floor(bottom + 0.5).astype('int32'))
right = min(np.shape(image)[1], np.floor(right + 0.5).astype('int32'))
# 画框框
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
label = label.encode('utf-8')
print(label, top, left, bottom, right)
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=self.colors[int(c)])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=self.colors[int(c)])
draw.text(text_origin, str(label,'UTF-8'), fill=(0, 0, 0), font=font)
del draw
return image
def get_FPS(self, image, test_interval):
#-------------------------------------#
# 转换成RGB图片,可以用于灰度图预测。
#-------------------------------------#
image = image.convert("RGB")
image_shape = np.array(np.shape(image)[0:2])
old_width, old_height = image_shape[1], image_shape[0]
#---------------------------------------------------------#
# 给原图像进行resize,resize到短边为600的大小上
#---------------------------------------------------------#
width,height = get_new_img_size(old_width, old_height)
image = image.resize([width,height], Image.BICUBIC)
#-----------------------------------------------------------#
# 图片预处理,归一化。
#-----------------------------------------------------------#
photo = np.transpose(np.array(image,dtype = np.float32)/255, (2, 0, 1))
with torch.no_grad():
images = torch.from_numpy(np.asarray([photo]))
if self.cuda:
images = images.cuda()
roi_cls_locs, roi_scores, rois, _ = self.model(images)
#-------------------------------------------------------------#
# 利用classifier的预测结果对建议框进行解码,获得预测框
#-------------------------------------------------------------#
outputs = self.decodebox.forward(roi_cls_locs[0], roi_scores[0], rois, height = height, width = width, nms_iou = self.iou, score_thresh = self.confidence)
#---------------------------------------------------------#
# 如果没有检测出物体,返回原图
#---------------------------------------------------------#
if len(outputs)>0:
outputs = np.array(outputs)
bbox = outputs[:,:4]
label = outputs[:, 4]
conf = outputs[:, 5]
bbox[:, 0::2] = (bbox[:, 0::2]) / width * old_width
bbox[:, 1::2] = (bbox[:, 1::2]) / height * old_height
t1 = time.time()
for _ in range(test_interval):
with torch.no_grad():
roi_cls_locs, roi_scores, rois, _ = self.model(images)
#-------------------------------------------------------------#
# 利用classifier的预测结果对建议框进行解码,获得预测框
#-------------------------------------------------------------#
outputs = self.decodebox.forward(roi_cls_locs[0], roi_scores[0], rois, height = height, width = width, nms_iou = self.iou, score_thresh = self.confidence)
#---------------------------------------------------------#
# 如果没有检测出物体,返回原图
#---------------------------------------------------------#
if len(outputs)>0:
outputs = np.array(outputs)
bbox = outputs[:,:4]
label = outputs[:, 4]
conf = outputs[:, 5]
bbox[:, 0::2] = (bbox[:, 0::2]) / width * old_width
bbox[:, 1::2] = (bbox[:, 1::2]) / height * old_height
t2 = time.time()
tact_time = (t2 - t1) / test_interval
return tact_time
|
[
"copy.deepcopy",
"colorsys.hsv_to_rgb",
"torch.load",
"numpy.asarray",
"numpy.floor",
"utils.utils.get_new_img_size",
"utils.utils.DecodeBox",
"time.time",
"numpy.shape",
"torch.Tensor",
"numpy.array",
"nets.frcnn.FasterRCNN",
"torch.cuda.is_available",
"PIL.ImageDraw.Draw",
"torch.no_grad",
"os.path.expanduser"
] |
[((1528, 1576), 'utils.utils.DecodeBox', 'DecodeBox', (['self.std', 'self.mean', 'self.num_classes'], {}), '(self.std, self.mean, self.num_classes)\n', (1537, 1576), False, 'from utils.utils import DecodeBox, get_new_img_size\n'), ((1759, 1796), 'os.path.expanduser', 'os.path.expanduser', (['self.classes_path'], {}), '(self.classes_path)\n', (1777, 1796), False, 'import os\n'), ((2611, 2659), 'torch.load', 'torch.load', (['self.model_path'], {'map_location': 'device'}), '(self.model_path, map_location=device)\n', (2621, 2659), False, 'import torch\n'), ((3752, 3772), 'copy.deepcopy', 'copy.deepcopy', (['image'], {}), '(image)\n', (3765, 3772), False, 'import copy\n'), ((3984, 4023), 'utils.utils.get_new_img_size', 'get_new_img_size', (['old_width', 'old_height'], {}), '(old_width, old_height)\n', (4000, 4023), False, 'from utils.utils import DecodeBox, get_new_img_size\n'), ((7738, 7777), 'utils.utils.get_new_img_size', 'get_new_img_size', (['old_width', 'old_height'], {}), '(old_width, old_height)\n', (7754, 7777), False, 'from utils.utils import DecodeBox, get_new_img_size\n'), ((9207, 9218), 'time.time', 'time.time', ([], {}), '()\n', (9216, 9218), False, 'import time\n'), ((10317, 10328), 'time.time', 'time.time', ([], {}), '()\n', (10326, 10328), False, 'import time\n'), ((4342, 4357), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4355, 4357), False, 'import torch\n'), ((5180, 5197), 'numpy.array', 'np.array', (['outputs'], {}), '(outputs)\n', (5188, 5197), True, 'import numpy as np\n'), ((6377, 6398), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (6391, 6398), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((8096, 8111), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8109, 8111), False, 'import torch\n'), ((2391, 2454), 'nets.frcnn.FasterRCNN', 'FasterRCNN', (['self.num_classes', '"""predict"""'], {'backbone': 'self.backbone'}), "(self.num_classes, 'predict', backbone=self.backbone)\n", (2401, 2454), False, 'from nets.frcnn import FasterRCNN\n'), ((2552, 2577), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2575, 2577), False, 'import torch\n'), ((3647, 3662), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (3655, 3662), True, 'import numpy as np\n'), ((4277, 4310), 'numpy.array', 'np.array', (['image'], {'dtype': 'np.float32'}), '(image, dtype=np.float32)\n', (4285, 4310), True, 'import numpy as np\n'), ((4397, 4416), 'numpy.asarray', 'np.asarray', (['[photo]'], {}), '([photo])\n', (4407, 4416), True, 'import numpy as np\n'), ((6628, 6665), 'numpy.array', 'np.array', (['[left, top - label_size[1]]'], {}), '([left, top - label_size[1]])\n', (6636, 6665), True, 'import numpy as np\n'), ((6714, 6739), 'numpy.array', 'np.array', (['[left, top + 1]'], {}), '([left, top + 1])\n', (6722, 6739), True, 'import numpy as np\n'), ((7442, 7457), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (7450, 7457), True, 'import numpy as np\n'), ((8031, 8064), 'numpy.array', 'np.array', (['image'], {'dtype': 'np.float32'}), '(image, dtype=np.float32)\n', (8039, 8064), True, 'import numpy as np\n'), ((8151, 8170), 'numpy.asarray', 'np.asarray', (['[photo]'], {}), '([photo])\n', (8161, 8170), True, 'import numpy as np\n'), ((8904, 8921), 'numpy.array', 'np.array', (['outputs'], {}), '(outputs)\n', (8912, 8921), True, 'import numpy as np\n'), ((9275, 9290), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9288, 9290), False, 'import torch\n'), ((1241, 1267), 'torch.Tensor', 'torch.Tensor', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (1253, 1267), False, 'import torch\n'), ((1320, 1354), 'torch.Tensor', 'torch.Tensor', (['[0.1, 0.1, 0.2, 0.2]'], {}), '([0.1, 0.1, 0.2, 0.2])\n', (1332, 1354), False, 'import torch\n'), ((3098, 3121), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['*x'], {}), '(*x)\n', (3117, 3121), False, 'import colorsys\n'), ((6133, 6148), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6141, 6148), True, 'import numpy as np\n'), ((6217, 6232), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6225, 6232), True, 'import numpy as np\n'), ((9998, 10015), 'numpy.array', 'np.array', (['outputs'], {}), '(outputs)\n', (10006, 10015), True, 'import numpy as np\n'), ((6007, 6026), 'numpy.floor', 'np.floor', (['(top + 0.5)'], {}), '(top + 0.5)\n', (6015, 6026), True, 'import numpy as np\n'), ((6070, 6090), 'numpy.floor', 'np.floor', (['(left + 0.5)'], {}), '(left + 0.5)\n', (6078, 6090), True, 'import numpy as np\n'), ((6153, 6175), 'numpy.floor', 'np.floor', (['(bottom + 0.5)'], {}), '(bottom + 0.5)\n', (6161, 6175), True, 'import numpy as np\n'), ((6237, 6258), 'numpy.floor', 'np.floor', (['(right + 0.5)'], {}), '(right + 0.5)\n', (6245, 6258), True, 'import numpy as np\n'), ((5583, 5602), 'numpy.shape', 'np.shape', (['old_image'], {}), '(old_image)\n', (5591, 5602), True, 'import numpy as np\n'), ((5608, 5627), 'numpy.shape', 'np.shape', (['old_image'], {}), '(old_image)\n', (5616, 5627), True, 'import numpy as np\n'), ((5514, 5529), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (5522, 5529), True, 'import numpy as np\n')]
|
from copy import copy
from typing import Optional
import numpy as np
import pandas as pd
from fedot.core.log import Log, default_log
from fedot.core.repository.tasks import Task, TaskTypesEnum
NAME_CLASS_STR = "<class 'str'>"
NAME_CLASS_INT = "<class 'int'>"
NAME_CLASS_FLOAT = "<class 'float'>"
NAME_CLASS_NONE = "<class 'NoneType'>"
FEDOT_STR_NAN = 'fedot_nan'
# If unique values in the feature column is less than 13 - convert column into string type
CATEGORICAL_UNIQUE_TH = 13
MAX_CATEGORIES_TH = 30
class TableTypesCorrector:
"""
Class for checking types in input data. Also perform conversion for columns with types conflicts
"""
def __init__(self, log: Optional[Log] = None):
# Maximum allowed unique categories in categorical table (if more - transform it into float)
self.categorical_max_classes_th = MAX_CATEGORIES_TH
# Threshold to convert numerical into categorical column
self.numerical_min_uniques = CATEGORICAL_UNIQUE_TH
self.features_columns_info = {}
self.target_columns_info = {}
# Dictionary with information about converted during fitting columns
self.features_converted_columns = {}
self.target_converted_columns = {}
# Columns to delete due to types conflicts
self.columns_to_del = []
# Column ids for transformation due to number of unique values
self.numerical_into_str = []
self.categorical_into_float = []
# Indices of columns with filed string into numerical transformation
self.string_columns_transformation_failed = {}
# Is target column contains non-numerical cells during conversion
self.target_converting_has_errors = False
# Lists with column types for converting calculated on source input data
self.features_types = None
self.target_types = None
self.log = log or default_log(__name__)
def convert_data_for_fit(self, data: 'InputData'):
""" If column contain several data types - perform correction procedure """
# Convert features to have an ability to insert str into float table or vice versa
data.features = data.features.astype(object)
# Determine types for each column in features and target if it is necessary
self.features_columns_info = define_column_types(data.features)
self.target_columns_info = define_column_types(data.target)
# Correct types in features table
data.features = self.features_types_converting(features=data.features)
# Remain only correct columns
data.features = self.remove_incorrect_features(data.features, self.features_converted_columns)
# And in target(s)
data.target = self.target_types_converting(target=data.target, task=data.task)
data.supplementary_data.column_types = self.prepare_column_types_info(predictors=data.features,
target=data.target,
task=data.task)
# Launch conversion float and integer features into categorical
self._into_categorical_features_transformation_for_fit(data)
self._into_numeric_features_transformation_for_fit(data)
# Save info about features and target types
self.features_types = copy(data.supplementary_data.column_types['features'])
self.target_types = copy(data.supplementary_data.column_types['target'])
self._retain_columns_info_without_types_conflicts(data)
return data
def convert_data_for_predict(self, data: 'InputData'):
""" Prepare data for predict stage. Include only column types transformation """
# Ordering is important because after removing incorrect features - indices are obsolete
data.features = data.features.astype(object)
data.features = self.remove_incorrect_features(data.features, self.features_converted_columns)
data.features = apply_type_transformation(data.features, self.features_types, self.log)
data.target = apply_type_transformation(data.target, self.target_types, self.log)
data.supplementary_data.column_types = self.prepare_column_types_info(predictors=data.features,
target=data.target,
task=data.task)
# Convert column types
self._into_categorical_features_transformation_for_predict(data)
self._into_numeric_features_transformation_for_predict(data)
self._retain_columns_info_without_types_conflicts(data)
return data
def remove_incorrect_features(self, table: np.array, converted_columns: dict):
"""
Remove from the table columns with conflicts with types were not resolved
:param table: tabular dataset based on which new dataset will be generated
:param converted_columns: dictionary with actions with table
"""
if not converted_columns:
return table
self.columns_to_del = [column_id for column_id, new_type_name in converted_columns.items() if
new_type_name == 'removed']
if not self.columns_to_del:
# There are no columns to delete
return table
# Remove all "bad" columns
table = np.delete(table, self.columns_to_del, 1)
return table
def features_types_converting(self, features: np.array) -> np.array:
""" Convert all elements in the data in every feature column into one type
:param features: tabular features array
"""
features_with_mixed_types = find_mixed_types_columns(self.features_columns_info)
if not features_with_mixed_types:
return features
# There are mixed-types columns in features table - convert them
for mixed_column_id in features_with_mixed_types:
column_info = self.features_columns_info[mixed_column_id]
if column_info.get('str_number') > 0:
# There are string elements in the array
mixed_column = features[:, mixed_column_id]
updated_column, new_type_name = self._convert_feature_into_one_type(mixed_column, column_info,
mixed_column_id)
# Store information about converted columns
self.features_converted_columns.update({mixed_column_id: new_type_name})
if updated_column is not None:
features[:, mixed_column_id] = updated_column
return features
def target_types_converting(self, target: np.array, task: Task) -> np.array:
""" Convert all elements in every target column into one type
:param target: tabular target array
:param task: task to solve
"""
target_with_mixed_types = find_mixed_types_columns(self.target_columns_info)
if not target_with_mixed_types:
return target
# There are mixed-types columns in features table - convert them
for mixed_column_id in target_with_mixed_types:
column_info = self.target_columns_info[mixed_column_id]
if column_info.get('str_number') > 0:
# There are string elements in the array
mixed_column = target[:, mixed_column_id]
updated_column, new_type_name = self._convert_target_into_one_type(mixed_column, column_info,
mixed_column_id, task)
# Store information about converted columns
self.target_converted_columns.update({mixed_column_id: new_type_name})
if updated_column is not None:
target[:, mixed_column_id] = updated_column
return target
def prepare_column_types_info(self, predictors: np.array, target: np.array = None,
task: Task = None) -> dict:
""" Prepare information about columns in a form of dictionary
Dictionary has two keys: 'target' and 'features'
"""
if not self.features_columns_info:
# Information about column types is empty - there is a need to launch algorithm to collect info
self.features_columns_info = define_column_types(predictors)
predictors = self.features_types_converting(features=predictors)
if not self.target_columns_info and task.task_type is not TaskTypesEnum.ts_forecasting:
self.target_columns_info = define_column_types(target)
target = self.target_types_converting(target=target, task=task)
features_types = _generate_list_with_types(self.features_columns_info, self.features_converted_columns)
self._check_columns_vs_types_number(predictors, features_types)
if target is None or task.task_type is TaskTypesEnum.ts_forecasting:
return {'features': features_types}
else:
target_types = _generate_list_with_types(self.target_columns_info, self.target_converted_columns)
self._check_columns_vs_types_number(target, target_types)
return {'features': features_types, 'target': target_types}
def _retain_columns_info_without_types_conflicts(self, data: 'InputData'):
""" Update information in supplementary info - retain info only about remained columns.
Such columns have no conflicts with types converting.
"""
if len(self.string_columns_transformation_failed) > 0:
self.log.warn(f'Columns with indices {list(self.string_columns_transformation_failed.keys())} were '
f'removed during mixed types column converting due to conflicts.')
data.features = self.remove_incorrect_features(data.features, self.string_columns_transformation_failed)
remained_column_types = []
for i, col in enumerate(data.supplementary_data.column_types['features']):
if i not in self.string_columns_transformation_failed:
remained_column_types.append(col)
data.supplementary_data.column_types['features'] = remained_column_types
def _check_columns_vs_types_number(self, table: np.array, column_types: list):
# Check if columns number correct
n_rows, n_cols = table.shape
if n_cols != len(column_types):
# There is an incorrect types calculation
self.log.warn('Columns number and types numbers do not match.')
def _convert_feature_into_one_type(self, mixed_column: np.array, column_info: dict, mixed_column_id: int):
""" Determine new type for current feature column based on the string ratio. And then convert column into it.
:param mixed_column: one-dimensional array with several data types
:param column_info: dictionary with information about types in the column
:param mixed_column_id: index of column in dataset
"""
if len(column_info['types']) == 2 and NAME_CLASS_NONE in column_info['types']:
# Column contain only one data type and nans
filtered_types = [x for x in column_info['types'] if x != NAME_CLASS_NONE]
return mixed_column, filtered_types[0]
string_objects_number = column_info['str_number']
all_elements_number = string_objects_number + column_info['int_number'] + column_info['float_number']
string_ratio = string_objects_number / all_elements_number
if string_ratio > 0.5:
suggested_type = str
else:
suggested_type = _obtain_new_column_type(column_info)
try:
mixed_column = mixed_column.astype(suggested_type)
# If there were nans in the column - paste nan
if column_info['nan_number'] > 0:
mixed_column = mixed_column.astype(object)
mixed_column[column_info['nan_ids']] = np.nan
del column_info['nan_ids']
return mixed_column, str(suggested_type)
except ValueError:
# Cannot convert string objects into int or float (for example 'a' into int)
prefix = f'Feature column with index {mixed_column_id} contains ' \
f'following data types: {column_info["types"]}.'
self.log.warn(f'{prefix} String cannot be converted into {suggested_type}. Drop column.')
return None, 'removed'
def _convert_target_into_one_type(self, mixed_column: np.array, column_info: dict, mixed_column_id: int,
task: Task) -> [np.array, str]:
""" Convert target columns into one type based on column proportions of object and task """
if task.task_type is TaskTypesEnum.classification:
# For classification labels are string if at least one element is a string
suggested_type = str
else:
suggested_type = _obtain_new_column_type(column_info)
try:
mixed_column = mixed_column.astype(suggested_type)
return mixed_column, str(suggested_type)
except ValueError:
# Cannot convert string objects into int or float (for example 'a' into int)
target_column = pd.Series(mixed_column)
converted_column = pd.to_numeric(target_column, errors='coerce')
prefix = f'Target column with index {mixed_column_id} contains ' \
f'following data types: {column_info["types"]}.'
log_message = f'{prefix} String cannot be converted into {suggested_type}. Ignore non converted values.'
self.log.debug(log_message)
self.target_converting_has_errors = True
return converted_column.values, str(suggested_type)
def _into_categorical_features_transformation_for_fit(self, data: 'InputData'):
"""
Perform automated categorical features determination. If feature column
contains int or float values with few unique values (less than 13)
"""
n_rows, n_cols = data.features.shape
for column_id in range(n_cols):
# For every int/float column perform check
column_type = data.supplementary_data.column_types['features'][column_id]
if 'int' in column_type or 'float' in column_type:
numerical_column = pd.Series(data.features[:, column_id])
# Calculate number of unique values except nans
unique_numbers = len(numerical_column.dropna().unique())
if 2 < unique_numbers < self.numerical_min_uniques:
# Column need to be transformed into categorical (string) one
self.numerical_into_str.append(column_id)
# Convert into string
converted_array = convert_num_column_into_string_array(numerical_column)
# Store converted column into features table
data.features[:, column_id] = converted_array
# Update information about column types (in-place)
features_types = data.supplementary_data.column_types['features']
features_types[column_id] = NAME_CLASS_STR
def _into_categorical_features_transformation_for_predict(self, data: 'InputData'):
""" Apply conversion into categorical string column for every signed column """
if not self.numerical_into_str:
# There is no transformation for current table
return data
n_rows, n_cols = data.features.shape
for column_id in range(n_cols):
if column_id in self.numerical_into_str:
numerical_column = pd.Series(data.features[:, column_id])
# Column must be converted into categorical
converted_array = convert_num_column_into_string_array(numerical_column)
data.features[:, column_id] = converted_array
# Update information about column types (in-place)
features_types = data.supplementary_data.column_types['features']
features_types[column_id] = NAME_CLASS_STR
def _into_numeric_features_transformation_for_fit(self, data: 'InputData'):
"""
Automatically determine categorical features which should be converted into float
"""
n_rows, n_cols = data.features.shape
for column_id in range(n_cols):
# For every string column perform converting if necessary
column_type = data.supplementary_data.column_types['features'][column_id]
if 'str' in column_type:
string_column = pd.Series(data.features[:, column_id])
unique_numbers = len(string_column.dropna().unique())
if unique_numbers > self.categorical_max_classes_th:
# Number of nans in the column
nans_number = string_column.isna().sum()
# Column probably not an "actually categorical" but a column with an incorrectly defined type
converted_column = pd.to_numeric(string_column, errors='coerce')
# Calculate applied nans
result_nans_number = converted_column.isna().sum()
failed_objects_number = result_nans_number - nans_number
non_nan_all_objects_number = n_rows - nans_number
failed_ratio = failed_objects_number / non_nan_all_objects_number
# If all objects are truly strings - all objects transform into nan
is_column_contain_numerical_objects = failed_ratio != 1
if failed_ratio < 0.5:
# The majority of objects can be converted into numerical
data.features[:, column_id] = converted_column.values
# Update information about column types (in-place)
self.categorical_into_float.append(column_id)
features_types = data.supplementary_data.column_types['features']
features_types[column_id] = NAME_CLASS_FLOAT
elif failed_ratio >= 0.5 and is_column_contain_numerical_objects:
# Probably numerical column contains '?' or 'x' as nans equivalents
# Add columns to remove list
self.string_columns_transformation_failed.update({column_id: 'removed'})
def _into_numeric_features_transformation_for_predict(self, data: 'InputData'):
""" Apply conversion into float string column for every signed column """
if not self.categorical_into_float:
# There is no transformation for current table
return data
n_rows, n_cols = data.features.shape
for column_id in range(n_cols):
if column_id in self.categorical_into_float and column_id not in self.string_columns_transformation_failed:
string_column = pd.Series(data.features[:, column_id])
# Column must be converted into float from categorical
converted_column = pd.to_numeric(string_column, errors='coerce')
data.features[:, column_id] = converted_column.values
# Update information about column types (in-place)
features_types = data.supplementary_data.column_types['features']
features_types[column_id] = NAME_CLASS_FLOAT
def define_column_types(table: np.array):
""" Prepare information about types per columns. For each column store unique
types, which column contains. If column with mixed type contain str object
additional field 'str_ids' with indices of string objects is prepared
"""
# TODO: current processing is relatively computationally expensive - probably refactor needed
def type_ignoring_nans(item):
""" Return type of element in the array. If item is np.nan - return NoneType """
current_type = type(item)
if current_type is float and np.isnan(item):
# Check is current element is nan or not (np.nan is a float type)
return type(None)
return current_type
if table is None:
return {}
n_rows, n_columns = table.shape
columns_info = {}
for column_id in range(n_columns):
current_column = table[:, column_id]
# Check every element in numpy array - it can take a long time!
column_types = list(map(type_ignoring_nans, current_column))
# Store only unique values
set_column_types = set(column_types)
# Convert types into string names
column_types_names = list(map(str, set_column_types))
if len(column_types_names) > 1:
# There are several types in one column
types_names = np.array(column_types, dtype=str)
# Calculate number of string objects in the dataset
str_number = len(np.argwhere(types_names == NAME_CLASS_STR))
int_number = len(np.argwhere(types_names == NAME_CLASS_INT))
float_number = len(np.argwhere(types_names == NAME_CLASS_FLOAT))
# Store information about nans in the target
nan_ids = np.ravel(np.argwhere(types_names == NAME_CLASS_NONE))
nan_number = len(nan_ids)
columns_info.update({column_id: {'types': column_types_names,
'str_number': str_number,
'int_number': int_number,
'float_number': float_number,
'nan_number': nan_number,
'nan_ids': nan_ids}})
else:
# There is only one type, or several types such as int and float
columns_info.update({column_id: {'types': column_types_names}})
return columns_info
def find_mixed_types_columns(columns_info: dict):
""" Search for columns with several types in them """
columns_with_mixed_types = []
for column_id, information in columns_info.items():
column_types = information['types']
if len(column_types) > 1:
columns_with_mixed_types.append(column_id)
return columns_with_mixed_types
def apply_type_transformation(table: np.array, column_types: list, log: Log):
"""
Apply transformation for columns in dataset into desired type. Perform
transformation on predict stage when column types were already determined
during fit
"""
def type_by_name(current_type_name: str):
""" Return type by it's name """
if 'int' in current_type_name:
return int
elif 'str' in current_type_name:
return str
else:
return float
if table is None:
# Occurs if for predict stage there is no target info
return None
n_rows, n_cols = table.shape
for column_id in range(n_cols):
current_column = table[:, column_id]
current_type = type_by_name(column_types[column_id])
try:
table[:, column_id] = current_column.astype(current_type)
except ValueError as ex:
log.debug(f'Cannot convert column with id {column_id} into type {current_type} due to {ex}')
message = str(ex)
if 'NaN' not in message:
# Try to convert column from string into float
unseen_label = message.split("\'")[1]
if ',' in unseen_label:
# Most likely case: '20,000' must be converted into '20.000'
err = f'Column {column_id} contains both "." and ",". Standardize it.'
raise ValueError(err)
else:
# Most likely case: ['a', '1.5'] -> [np.nan, 1.5]
label_ids = np.ravel(np.argwhere(current_column == unseen_label))
current_column[label_ids] = np.nan
table[:, column_id] = current_column.astype(float)
return table
def convert_num_column_into_string_array(numerical_column: pd.Series) -> np.array:
""" Convert pandas column into numpy one-dimensional array """
# Convert into string
converted_column = numerical_column.astype(str)
converted_array = converted_column.values
# If there are nans - insert them
nan_ids = np.ravel(np.argwhere(converted_array == 'nan'))
if len(nan_ids) > 0:
converted_array = converted_array.astype(object)
converted_array[nan_ids] = np.nan
return converted_array
def _obtain_new_column_type(column_info):
""" Suggest in or float type based on the presence of nan and float values """
if column_info['float_number'] > 0 or column_info['nan_number'] > 0:
# Even if one of types are float - all elements should be converted into float
return float
else:
# It is available to convert numerical into integer type
return int
def _generate_list_with_types(columns_types_info: dict, converted_columns: dict) -> list:
""" Create list with types for all remained columns
:param columns_types_info: dictionary with initial column types
:param converted_columns: dictionary with transformed column types
"""
updated_column_types = []
for column_id, column_info in columns_types_info.items():
column_types = column_info['types']
if len(column_types) == 1:
# Column initially contain only one type
updated_column_types.append(column_types[0])
elif len(column_types) == 2 and NAME_CLASS_NONE in column_types:
# Column with one type and nans
filtered_types = [x for x in column_types if x != NAME_CLASS_NONE]
updated_column_types.append(filtered_types[0])
else:
if any('str' in column_type_name for column_type_name in column_types):
# Mixed-types column with string
new_column_type = converted_columns[column_id]
if new_column_type != 'removed':
updated_column_types.append(new_column_type)
else:
# Mixed-types with float and integer
updated_column_types.append(NAME_CLASS_FLOAT)
return updated_column_types
|
[
"copy.copy",
"numpy.isnan",
"numpy.array",
"pandas.Series",
"numpy.argwhere",
"fedot.core.log.default_log",
"numpy.delete",
"pandas.to_numeric"
] |
[((3397, 3451), 'copy.copy', 'copy', (["data.supplementary_data.column_types['features']"], {}), "(data.supplementary_data.column_types['features'])\n", (3401, 3451), False, 'from copy import copy\n'), ((3480, 3532), 'copy.copy', 'copy', (["data.supplementary_data.column_types['target']"], {}), "(data.supplementary_data.column_types['target'])\n", (3484, 3532), False, 'from copy import copy\n'), ((5482, 5522), 'numpy.delete', 'np.delete', (['table', 'self.columns_to_del', '(1)'], {}), '(table, self.columns_to_del, 1)\n', (5491, 5522), True, 'import numpy as np\n'), ((24740, 24777), 'numpy.argwhere', 'np.argwhere', (["(converted_array == 'nan')"], {}), "(converted_array == 'nan')\n", (24751, 24777), True, 'import numpy as np\n'), ((1903, 1924), 'fedot.core.log.default_log', 'default_log', (['__name__'], {}), '(__name__)\n', (1914, 1924), False, 'from fedot.core.log import Log, default_log\n'), ((20357, 20371), 'numpy.isnan', 'np.isnan', (['item'], {}), '(item)\n', (20365, 20371), True, 'import numpy as np\n'), ((21139, 21172), 'numpy.array', 'np.array', (['column_types'], {'dtype': 'str'}), '(column_types, dtype=str)\n', (21147, 21172), True, 'import numpy as np\n'), ((13500, 13523), 'pandas.Series', 'pd.Series', (['mixed_column'], {}), '(mixed_column)\n', (13509, 13523), True, 'import pandas as pd\n'), ((13555, 13600), 'pandas.to_numeric', 'pd.to_numeric', (['target_column'], {'errors': '"""coerce"""'}), "(target_column, errors='coerce')\n", (13568, 13600), True, 'import pandas as pd\n'), ((14613, 14651), 'pandas.Series', 'pd.Series', (['data.features[:, column_id]'], {}), '(data.features[:, column_id])\n', (14622, 14651), True, 'import pandas as pd\n'), ((15966, 16004), 'pandas.Series', 'pd.Series', (['data.features[:, column_id]'], {}), '(data.features[:, column_id])\n', (15975, 16004), True, 'import pandas as pd\n'), ((16930, 16968), 'pandas.Series', 'pd.Series', (['data.features[:, column_id]'], {}), '(data.features[:, column_id])\n', (16939, 16968), True, 'import pandas as pd\n'), ((19303, 19341), 'pandas.Series', 'pd.Series', (['data.features[:, column_id]'], {}), '(data.features[:, column_id])\n', (19312, 19341), True, 'import pandas as pd\n'), ((19449, 19494), 'pandas.to_numeric', 'pd.to_numeric', (['string_column'], {'errors': '"""coerce"""'}), "(string_column, errors='coerce')\n", (19462, 19494), True, 'import pandas as pd\n'), ((21266, 21308), 'numpy.argwhere', 'np.argwhere', (['(types_names == NAME_CLASS_STR)'], {}), '(types_names == NAME_CLASS_STR)\n', (21277, 21308), True, 'import numpy as np\n'), ((21339, 21381), 'numpy.argwhere', 'np.argwhere', (['(types_names == NAME_CLASS_INT)'], {}), '(types_names == NAME_CLASS_INT)\n', (21350, 21381), True, 'import numpy as np\n'), ((21414, 21458), 'numpy.argwhere', 'np.argwhere', (['(types_names == NAME_CLASS_FLOAT)'], {}), '(types_names == NAME_CLASS_FLOAT)\n', (21425, 21458), True, 'import numpy as np\n'), ((21549, 21592), 'numpy.argwhere', 'np.argwhere', (['(types_names == NAME_CLASS_NONE)'], {}), '(types_names == NAME_CLASS_NONE)\n', (21560, 21592), True, 'import numpy as np\n'), ((17375, 17420), 'pandas.to_numeric', 'pd.to_numeric', (['string_column'], {'errors': '"""coerce"""'}), "(string_column, errors='coerce')\n", (17388, 17420), True, 'import pandas as pd\n'), ((24213, 24256), 'numpy.argwhere', 'np.argwhere', (['(current_column == unseen_label)'], {}), '(current_column == unseen_label)\n', (24224, 24256), True, 'import numpy as np\n')]
|
"""rv_bis_corr.
Author: <NAME>
Calculate and plot RV vs BIS correlation
"""
import numpy as np
import statsmodels.api as sm
from scipy.stats import pearsonr
import scipy.stats as st
import matplotlib.pyplot as plt
def rv_bis_corr(data, confidence=0.05, name='last'):
"""Calculate RV vs BIS correlation and plot it.
Parameters
----------
data : dict
A dictionary containing the datasets. Each dataset must be an array
of size (5, m) in the following order: t, x, y, xerr, yerr.
confidence : float
The confidence level.
name : str, optional
Target name for saving the plot.
"""
# Linear Model fitting
tlow = np.inf
tup = -np.inf
x = np.array([])
y = np.array([])
for key in data.keys():
tl = data[key][:, 0].min()
tu = data[key][:, 0].max()
if tl < tlow:
tlow = tl
if tu > tup:
tup = tu
x = np.concatenate((x, data[key][:, 1]))
y = np.concatenate((y, data[key][:, 3]))
r, p_val = pearsonr(x, y)
X = sm.add_constant(x)
model = sm.OLS(y, X)
fitted = model.fit()
error_kwargs = {'lw': .75, 'zorder': 0}
# Confidence interval calculation
y_hat = fitted.predict(X)
y_err = y - y_hat
x_mean = X.T[1].mean()
n = len(x)
dof = n - fitted.df_model - 1 # Degrees of freedom
# 2 tailed t-stat calculation
t = st.t.ppf(1 - confidence / 2, df=dof)
s_err = np.sum(np.power(y_err, 2))
markers = ['o', 'v', '^', '>', '<', '8', 's', 'p', 'H', 'D', '*', 'd']
f, ax = plt.subplots(figsize=(20, 10))
ims = []
for i, key in enumerate(data.keys()):
x = data[key][:, 1]
y = data[key][:, 3]
xerr = data[key][:, 2]
yerr = data[key][:, 4]
ti = data[key][:, 0]
im = ax.scatter(
x, y, marker=markers[i], edgecolors='k', c=ti, cmap='cool_r', s=180
)
ims.append(im)
ax.errorbar(
x, y, xerr=xerr, yerr=yerr, marker=None,
linestyle='', ecolor='k', **error_kwargs
)
for im in ims:
im.set_clim(tlow, tup)
xmin, xmax = ax.get_xlim()
x_pred = np.linspace(xmin, xmax, 1000)
x_pred2 = sm.add_constant(x_pred)
y_pred = fitted.predict(x_pred2)
conf = t * np.sqrt((s_err / (n - 2)) *
(1. / n + (np.power((x_pred - x_mean), 2) /
((np.sum(np.power(x_pred, 2))) - n *
(np.power(x_mean, 2))))))
upper = y_pred + abs(conf)
lower = y_pred - abs(conf)
cb = f.colorbar(ims[-1], pad=.005)
lab = 'Pearson r: {:.3f}'.format(r)
ax.plot(x_pred, y_pred, '-', color='midnightblue', linewidth=2, label=lab)
ax.fill_between(x_pred, lower, upper, color='#888888', alpha=0.4)
ax.set_xlim(xmin, xmax)
ax.set_xlabel('RV (km s$^{-1}$)', fontsize=30)
ax.set_ylabel('Bisector Velocity Span (km s$^{-1}$)', fontsize=30)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(28)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(28)
cb.ax.tick_params(labelsize=28)
cb.set_label('JD - 2450000', rotation=270, labelpad=25, fontsize=30)
fname = '{}_bisector_rv.pdf'.format(name)
plt.legend(loc=0, prop={'size': 28})
plt.savefig(fname, bbox_inches='tight')
return fitted
|
[
"numpy.concatenate",
"statsmodels.api.OLS",
"numpy.power",
"matplotlib.pyplot.legend",
"scipy.stats.pearsonr",
"numpy.array",
"numpy.linspace",
"statsmodels.api.add_constant",
"scipy.stats.t.ppf",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] |
[((716, 728), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (724, 728), True, 'import numpy as np\n'), ((737, 749), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (745, 749), True, 'import numpy as np\n'), ((1048, 1062), 'scipy.stats.pearsonr', 'pearsonr', (['x', 'y'], {}), '(x, y)\n', (1056, 1062), False, 'from scipy.stats import pearsonr\n'), ((1072, 1090), 'statsmodels.api.add_constant', 'sm.add_constant', (['x'], {}), '(x)\n', (1087, 1090), True, 'import statsmodels.api as sm\n'), ((1103, 1115), 'statsmodels.api.OLS', 'sm.OLS', (['y', 'X'], {}), '(y, X)\n', (1109, 1115), True, 'import statsmodels.api as sm\n'), ((1418, 1454), 'scipy.stats.t.ppf', 'st.t.ppf', (['(1 - confidence / 2)'], {'df': 'dof'}), '(1 - confidence / 2, df=dof)\n', (1426, 1454), True, 'import scipy.stats as st\n'), ((1583, 1613), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (1595, 1613), True, 'import matplotlib.pyplot as plt\n'), ((2187, 2216), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(1000)'], {}), '(xmin, xmax, 1000)\n', (2198, 2216), True, 'import numpy as np\n'), ((2231, 2254), 'statsmodels.api.add_constant', 'sm.add_constant', (['x_pred'], {}), '(x_pred)\n', (2246, 2254), True, 'import statsmodels.api as sm\n'), ((3295, 3331), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)', 'prop': "{'size': 28}"}), "(loc=0, prop={'size': 28})\n", (3305, 3331), True, 'import matplotlib.pyplot as plt\n'), ((3336, 3375), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {'bbox_inches': '"""tight"""'}), "(fname, bbox_inches='tight')\n", (3347, 3375), True, 'import matplotlib.pyplot as plt\n'), ((946, 982), 'numpy.concatenate', 'np.concatenate', (['(x, data[key][:, 1])'], {}), '((x, data[key][:, 1]))\n', (960, 982), True, 'import numpy as np\n'), ((995, 1031), 'numpy.concatenate', 'np.concatenate', (['(y, data[key][:, 3])'], {}), '((y, data[key][:, 3]))\n', (1009, 1031), True, 'import numpy as np\n'), ((1474, 1492), 'numpy.power', 'np.power', (['y_err', '(2)'], {}), '(y_err, 2)\n', (1482, 1492), True, 'import numpy as np\n'), ((2370, 2398), 'numpy.power', 'np.power', (['(x_pred - x_mean)', '(2)'], {}), '(x_pred - x_mean, 2)\n', (2378, 2398), True, 'import numpy as np\n'), ((2446, 2465), 'numpy.power', 'np.power', (['x_pred', '(2)'], {}), '(x_pred, 2)\n', (2454, 2465), True, 'import numpy as np\n'), ((2510, 2529), 'numpy.power', 'np.power', (['x_mean', '(2)'], {}), '(x_mean, 2)\n', (2518, 2529), True, 'import numpy as np\n')]
|
import torch
import functools
from torch.optim import Adam
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from torchvision.datasets import MNIST
import tqdm
import numpy as np
from .model import ScoreNet
# @title Set up the SDE
device = None
def marginal_prob_std(t, sigma):
"""Compute the mean and standard deviation of $p_{0t}(x(t) | x(0))$.
Args:
t: A vector of time steps.
sigma: The $\sigma$ in our SDE.
Returns:
The standard deviation.
"""
t = torch.tensor(t, device=device)
return torch.sqrt((sigma ** (2 * t) - 1.) / 2. / np.log(sigma))
def diffusion_coeff(t, sigma):
"""Compute the diffusion coefficient of our SDE.
Args:
t: A vector of time steps.
sigma: The $\sigma$ in our SDE.
Returns:
The vector of diffusion coefficients.
"""
return torch.tensor(sigma ** t, device=device)
sigma = 25.0 # @param {'type':'number'}
marginal_prob_std_fn = functools.partial(marginal_prob_std, sigma=sigma)
diffusion_coeff_fn = functools.partial(diffusion_coeff, sigma=sigma)
#@title Define the loss function (double click to expand or collapse)
def loss_fn(model, x, marginal_prob_std, eps=1e-5):
"""The loss function for training score-based generative models.
Args:
model: A PyTorch model instance that represents a
time-dependent score-based model.
x: A mini-batch of training data.
marginal_prob_std: A function that gives the standard deviation of
the perturbation kernel.
eps: A tolerance value for numerical stability.
"""
random_t = torch.rand(x.shape[0], device=x.device) * (1. - eps) + eps
z = torch.randn_like(x)
std = marginal_prob_std(random_t)
perturbed_x = x + z * std[:, None, None, None]
score = model(perturbed_x, random_t)
loss = torch.mean(torch.sum((score * std[:, None, None, None] + z)**2, dim=(1,2,3)))
return loss
#@title Training (double click to expand or collapse)
def train_sde(*, data_loader, device_, n_epochs = 50, lr = 1e-4):
device = device_
score_model = torch.nn.DataParallel(ScoreNet(marginal_prob_std=marginal_prob_std_fn))
score_model = score_model.to(device)
optimizer = Adam(score_model.parameters(), lr=lr)
tqdm_epoch = tqdm.trange(n_epochs)
for epoch in tqdm_epoch:
avg_loss = 0.
num_items = 0
for x, y in data_loader:
x = x.to(device)
loss = loss_fn(score_model, x, marginal_prob_std_fn)
optimizer.zero_grad()
loss.backward()
optimizer.step()
avg_loss += loss.item() * x.shape[0]
num_items += x.shape[0]
# Print the averaged training loss so far.
tqdm_epoch.set_description('Average Loss: {:5f}'.format(avg_loss / num_items))
# Update the checkpoint after each epoch of training.
torch.save(score_model.state_dict(), 'ckpt.pth')
return score_model
# @title Define the Euler-Maruyama sampler (double click to expand or collapse)
## The number of sampling steps.
num_steps = 500 # @param {'type':'integer'}
def Euler_Maruyama_sampler(score_model,
marginal_prob_std=marginal_prob_std_fn,
diffusion_coeff=diffusion_coeff_fn,
batch_size=64,
num_steps=num_steps,
device='cuda',
eps=1e-3):
"""Generate samples from score-based models with the Euler-Maruyama solver.
Args:
score_model: A PyTorch model that represents the time-dependent score-based model.
marginal_prob_std: A function that gives the standard deviation of
the perturbation kernel.
diffusion_coeff: A function that gives the diffusion coefficient of the SDE.
batch_size: The number of samplers to generate by calling this function once.
num_steps: The number of sampling steps.
Equivalent to the number of discretized time steps.
device: 'cuda' for running on GPUs, and 'cpu' for running on CPUs.
eps: The smallest time step for numerical stability.
Returns:
Samples.
"""
t = torch.ones(batch_size, device=device)
init_x = torch.randn(batch_size, 1, 28, 28, device=device) \
* marginal_prob_std(t)[:, None, None, None]
time_steps = torch.linspace(1., eps, num_steps, device=device)
step_size = time_steps[0] - time_steps[1]
x = init_x
with torch.no_grad():
for time_step in tqdm.tqdm(time_steps):
batch_time_step = torch.ones(batch_size, device=device) * time_step
g = diffusion_coeff(batch_time_step)
mean_x = x + (g ** 2)[:, None, None, None] * score_model(x, batch_time_step) * step_size
x = mean_x + torch.sqrt(step_size) * g[:, None, None, None] * torch.randn_like(x)
# Do not include any noise in the last sampling step.
return mean_x
|
[
"functools.partial",
"torch.ones",
"tqdm.tqdm",
"numpy.log",
"torch.randn_like",
"tqdm.trange",
"torch.sqrt",
"torch.randn",
"torch.rand",
"torch.linspace",
"torch.no_grad",
"torch.sum",
"torch.tensor"
] |
[((981, 1030), 'functools.partial', 'functools.partial', (['marginal_prob_std'], {'sigma': 'sigma'}), '(marginal_prob_std, sigma=sigma)\n', (998, 1030), False, 'import functools\n'), ((1052, 1099), 'functools.partial', 'functools.partial', (['diffusion_coeff'], {'sigma': 'sigma'}), '(diffusion_coeff, sigma=sigma)\n', (1069, 1099), False, 'import functools\n'), ((531, 561), 'torch.tensor', 'torch.tensor', (['t'], {'device': 'device'}), '(t, device=device)\n', (543, 561), False, 'import torch\n'), ((875, 914), 'torch.tensor', 'torch.tensor', (['(sigma ** t)'], {'device': 'device'}), '(sigma ** t, device=device)\n', (887, 914), False, 'import torch\n'), ((1671, 1690), 'torch.randn_like', 'torch.randn_like', (['x'], {}), '(x)\n', (1687, 1690), False, 'import torch\n'), ((2263, 2284), 'tqdm.trange', 'tqdm.trange', (['n_epochs'], {}), '(n_epochs)\n', (2274, 2284), False, 'import tqdm\n'), ((4180, 4217), 'torch.ones', 'torch.ones', (['batch_size'], {'device': 'device'}), '(batch_size, device=device)\n', (4190, 4217), False, 'import torch\n'), ((4357, 4407), 'torch.linspace', 'torch.linspace', (['(1.0)', 'eps', 'num_steps'], {'device': 'device'}), '(1.0, eps, num_steps, device=device)\n', (4371, 4407), False, 'import torch\n'), ((1835, 1904), 'torch.sum', 'torch.sum', (['((score * std[:, None, None, None] + z) ** 2)'], {'dim': '(1, 2, 3)'}), '((score * std[:, None, None, None] + z) ** 2, dim=(1, 2, 3))\n', (1844, 1904), False, 'import torch\n'), ((4231, 4280), 'torch.randn', 'torch.randn', (['batch_size', '(1)', '(28)', '(28)'], {'device': 'device'}), '(batch_size, 1, 28, 28, device=device)\n', (4242, 4280), False, 'import torch\n'), ((4477, 4492), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4490, 4492), False, 'import torch\n'), ((4519, 4540), 'tqdm.tqdm', 'tqdm.tqdm', (['time_steps'], {}), '(time_steps)\n', (4528, 4540), False, 'import tqdm\n'), ((615, 628), 'numpy.log', 'np.log', (['sigma'], {}), '(sigma)\n', (621, 628), True, 'import numpy as np\n'), ((1606, 1645), 'torch.rand', 'torch.rand', (['x.shape[0]'], {'device': 'x.device'}), '(x.shape[0], device=x.device)\n', (1616, 1645), False, 'import torch\n'), ((4572, 4609), 'torch.ones', 'torch.ones', (['batch_size'], {'device': 'device'}), '(batch_size, device=device)\n', (4582, 4609), False, 'import torch\n'), ((4846, 4865), 'torch.randn_like', 'torch.randn_like', (['x'], {}), '(x)\n', (4862, 4865), False, 'import torch\n'), ((4797, 4818), 'torch.sqrt', 'torch.sqrt', (['step_size'], {}), '(step_size)\n', (4807, 4818), False, 'import torch\n')]
|
"""Bayesian Optimization sampler : Defined only for continuous domains.
For discrete inputs define another sampler"""
from verifai.samplers.domain_sampler import DomainSampler
import numpy as np
class BayesOptSampler(DomainSampler):
def __init__(self, domain, BO_params):
try:
import GPyOpt
except ModuleNotFoundError:
import sys
sys.exit('BayesOptSampler requires GPyOpt to be installed')
super().__init__(domain)
self.dimension = domain.standardizedDimension
if not self.dimension:
raise RuntimeError(f'{self.__class__.__name__} supports only'
' continuous standardizable Domains')
self.f = BO_params.f
self.init_num = BO_params.init_num
self.bounds = []
for i in range(self.dimension):
self.bounds.append({'name':'x_'+str(i), 'type': 'continuous',
'domain': (0,1)})
self.X = None
self.Y = None
self.BO = None
def nextSample(self):
import GPyOpt # do this here to avoid slow import when unused
if self.X is None or len(self.X) < self.init_num:
print("Doing random sampling")
sample = np.random.uniform(0,1, self.dimension)
if self.X is None:
self.X= np.atleast_2d(sample)
sample = self.domain.unstandardize(sample)
self.Y = np.atleast_2d(self.f(sample))
else:
self.X = np.vstack((self.X, np.atleast_2d(sample)))
sample = self.domain.unstandardize(sample)
self.Y = np.vstack((self.Y, np.atleast_2d(self.f(sample))))
return sample
print("Doing BO")
self.BO = GPyOpt.methods.BayesianOptimization(
f=lambda sample: self.f(self.domain.unstandardize(tuple(sample[0]))),
domain=self.bounds, X=self.X, Y=self.Y, normalize_Y=False)
self.BO.run_optimization(1)
self.X = np.vstack((self.X,np.atleast_2d(self.BO.X[-1])))
self.Y = np.vstack((self.Y, np.atleast_2d(self.BO.Y[-1])))
return self.domain.unstandardize(tuple(self.X[-1]))
|
[
"numpy.random.uniform",
"sys.exit",
"numpy.atleast_2d"
] |
[((1261, 1300), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'self.dimension'], {}), '(0, 1, self.dimension)\n', (1278, 1300), True, 'import numpy as np\n'), ((390, 449), 'sys.exit', 'sys.exit', (['"""BayesOptSampler requires GPyOpt to be installed"""'], {}), "('BayesOptSampler requires GPyOpt to be installed')\n", (398, 449), False, 'import sys\n'), ((1355, 1376), 'numpy.atleast_2d', 'np.atleast_2d', (['sample'], {}), '(sample)\n', (1368, 1376), True, 'import numpy as np\n'), ((2044, 2072), 'numpy.atleast_2d', 'np.atleast_2d', (['self.BO.X[-1]'], {}), '(self.BO.X[-1])\n', (2057, 2072), True, 'import numpy as np\n'), ((2111, 2139), 'numpy.atleast_2d', 'np.atleast_2d', (['self.BO.Y[-1]'], {}), '(self.BO.Y[-1])\n', (2124, 2139), True, 'import numpy as np\n'), ((1553, 1574), 'numpy.atleast_2d', 'np.atleast_2d', (['sample'], {}), '(sample)\n', (1566, 1574), True, 'import numpy as np\n')]
|
#! /usr/bin/env python
# -*- coding: utf8 -*-
'''
Copyright 2018 University of Liège
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
interfaceData.py
Matrix and vector representation of interface data.
Authors : <NAME>, <NAME>, <NAME>
'''
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import numpy as np
import scipy as sp
import sys
import ccupydo
np.set_printoptions(threshold=sys.maxsize)
# ----------------------------------------------------------------------
# FlexInterfaceData class
# ----------------------------------------------------------------------
class FlexInterfaceData(ccupydo.CFlexInterfaceData):
"""
Description
"""
def __init__(self, val_nPoint, val_nDim, mpiComm=None):
"""
Des.
"""
self.mpiComm = mpiComm
ccupydo.CFlexInterfaceData.__init__(self, val_nPoint, val_nDim, mpiComm)
#self.mpiComm = mpiComm
#self.nPoint = val_nPoint
#self.nDim = val_nDim
#self.dataContainer = []
#if mpiComm != None:
# for iDim in range(self.nDim):
# from petsc4py import PETSc
# data = PETSc.Vec().create(self.mpiComm)
# data.setType('mpi')
# data.setSizes(self.nPoint)
# data.set(0.0)
# self.dataContainer.append(data)
# self.myid = self.mpiComm.Get_rank()
# self.mpiSize = self.mpiComm.Get_size()
# startIndex , stopIndex = self.dataContainer[0].getOwnershipRange()
# #!!! stopIndex is 1 more than the true index !!!
# #startIndex , stopIndex = self.getOwnershipRange()
# self.indexing = self.mpiComm.allgather((startIndex , stopIndex))
#else:
# for iDim in range(self.nDim):
# data = np.zeros(self.nPoint, dtype=float)
# self.dataContainer.append(data)
# self.myid = 0
# self.mpiSize = 1
def __setitem__(self, index, values):
"""
Des.
"""
if type(values) != list:
raise TypeError("FlexInterfaceData.__setitem__ needs list as argument !")
if len(values) != self.nDim:
raise IndexError("Length of values does not match nDim !")
else:
for iDim in range(self.nDim):
self.setValue(iDim, index, values[iDim])
def __add__(self, dataToAdd):
"""
Des.
"""
if type(dataToAdd) == type(self):
if self.nDim != dataToAdd.nDim:
raise IndexError("Dimensions do not match for + operator !")
if self.nPoint != dataToAdd.nPoint:
raise IndexError("Lengthes do not match for + operator !")
newData = FlexInterfaceData(self.nPoint, self.nDim, self.comm)
self.copy(newData)
newData.add(dataToAdd)
return newData
def __radd__(self, dataToAdd):
"""
Des.
"""
newData = self + dataToAdd
return newData
def __iadd__(self, dataToAdd):
"""
Des.
"""
if type(dataToAdd) == type(self):
if self.nDim != dataToAdd.nDim:
raise IndexError("Dimensions do not match for + operator !")
if self.nPoint != dataToAdd.nPoint:
raise IndexError("Lengthes do not match for + operator !")
self.add(dataToAdd)
return self
def __sub__(self, dataToSub):
"""
Des.
"""
if type(dataToSub) == type(self):
if self.nDim != dataToSub.nDim:
raise IndexError("Dimensions do not match for + operator !")
if self.nPoint != dataToSub.nPoint:
raise IndexError("Lengthes do not match for + operator !")
newData = FlexInterfaceData(self.nPoint, self.nDim, self.comm)
self.copy(newData)
newData.sub(dataToSub)
return newData
def __rsub__(self, dataToSub):
"""
Des.
"""
if type(dataToSub) == type(self):
if self.nDim != dataToSub.nDim:
raise IndexError("Dimensions do not match for + operator !")
if self.nPoint != dataToSub.nPoint:
raise IndexError("Lengthes do not match for + operator !")
newData = -1*self + dataToSub
return newData
def __isub__(self, dataToSub):
"""
Des.
"""
if type(dataToSub) == type(self):
if self.nDim != dataToSub.nDim:
raise IndexError("Dimensions do not match for + operator !")
if self.nPoint != dataToSub.nPoint:
raise IndexError("Lengthes do not match for + operator !")
self.sub(dataToSub)
return self
def __mul__(self, mulVal):
"""
Des.
"""
newData = FlexInterfaceData(self.nPoint, self.nDim, self.comm)
self.copy(newData)
newData.scale(mulVal)
return newData
def __rmul__(self, mulVal):
"""
Des
"""
newData = self*mulVal
return newData
def __imul__(self, mulVal):
"""
Des.
"""
self.scale(mulVal)
return self
def dot(self, dataToDot):
dotList = []
if self.mpiComm != None:
dotList = ccupydo.CFlexInterfaceData.dot(self, dataToDot)
else:
for iDim in range(self.nDim):
myData = self.getData(iDim)
dotData = dataToDot.getData(iDim)
val_dot = myData.dot(dotData)
dotList.append(val_dot)
return dotList
def sum(self):
sumList = []
if self.mpiComm != None:
sumList = ccupydo.CFlexInterfaceData.sum(self)
else:
for iDim in range(self.nDim):
myData = self.getData(iDim)
val_sum = myData.sum()
sumList.append(val_sum)
return sumList
def norm(self):
normList = []
if self.mpiComm != None:
normList = ccupydo.CFlexInterfaceData.norm(self)
else:
for iDim in range(self.nDim):
myData = self.getData(iDim)
val_norm = np.linalg.norm(myData)
normList.append(val_norm)
return normList
# ----------------------------------------------------------------------
# InterfaceMatrix class
# ----------------------------------------------------------------------
class InterfaceMatrix(ccupydo.CInterfaceMatrix):
"""
Define a matrix based on fluid-structure interface data.
Designed for parallel computations (also works in serial).
Inherited public members :
-createDense()
-createSparse()
-createSparseFullAlloc()
-setValue()
-assemble()
-getMat()
"""
def __init__(self, sizes, mpiComm=None):
"""
Overloaded constructor
"""
ccupydo.CInterfaceMatrix.__init__(self, sizes[0],sizes[1])
self.sizes = sizes
self.mpiComm = mpiComm
def mult(self, Data , DataOut):
"""
Performs interface matrix-data multiplication.
"""
if self.mpiComm != None:
ccupydo.CInterfaceMatrix.mult(self, Data, DataOut)
else:
PyH = self.getMat();
dim = Data.getDim()
for iDim in range(dim):
np.dot(PyH, Data.getData(iDim), DataOut.getData(iDim))
|
[
"ccupydo.CFlexInterfaceData.dot",
"ccupydo.CInterfaceMatrix.mult",
"numpy.set_printoptions",
"ccupydo.CFlexInterfaceData.__init__",
"ccupydo.CInterfaceMatrix.__init__",
"numpy.linalg.norm",
"ccupydo.CFlexInterfaceData.norm",
"ccupydo.CFlexInterfaceData.sum"
] |
[((947, 989), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'sys.maxsize'}), '(threshold=sys.maxsize)\n', (966, 989), True, 'import numpy as np\n'), ((1397, 1469), 'ccupydo.CFlexInterfaceData.__init__', 'ccupydo.CFlexInterfaceData.__init__', (['self', 'val_nPoint', 'val_nDim', 'mpiComm'], {}), '(self, val_nPoint, val_nDim, mpiComm)\n', (1432, 1469), False, 'import ccupydo\n'), ((7623, 7682), 'ccupydo.CInterfaceMatrix.__init__', 'ccupydo.CInterfaceMatrix.__init__', (['self', 'sizes[0]', 'sizes[1]'], {}), '(self, sizes[0], sizes[1])\n', (7656, 7682), False, 'import ccupydo\n'), ((5954, 6001), 'ccupydo.CFlexInterfaceData.dot', 'ccupydo.CFlexInterfaceData.dot', (['self', 'dataToDot'], {}), '(self, dataToDot)\n', (5984, 6001), False, 'import ccupydo\n'), ((6372, 6408), 'ccupydo.CFlexInterfaceData.sum', 'ccupydo.CFlexInterfaceData.sum', (['self'], {}), '(self)\n', (6402, 6408), False, 'import ccupydo\n'), ((6725, 6762), 'ccupydo.CFlexInterfaceData.norm', 'ccupydo.CFlexInterfaceData.norm', (['self'], {}), '(self)\n', (6756, 6762), False, 'import ccupydo\n'), ((7903, 7953), 'ccupydo.CInterfaceMatrix.mult', 'ccupydo.CInterfaceMatrix.mult', (['self', 'Data', 'DataOut'], {}), '(self, Data, DataOut)\n', (7932, 7953), False, 'import ccupydo\n'), ((6890, 6912), 'numpy.linalg.norm', 'np.linalg.norm', (['myData'], {}), '(myData)\n', (6904, 6912), True, 'import numpy as np\n')]
|
from __future__ import division
from future.utils import iteritems, itervalues
from builtins import map, zip
import numpy as np
import itertools
import collections
import operator
import copy
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec, GridSpecFromSubplotSpec
from matplotlib import cm
from warnings import warn
from scipy.special import logsumexp
from pyhsmm.basic.abstractions import Model, ModelGibbsSampling, \
ModelEM, ModelMAPEM, ModelMeanField, ModelMeanFieldSVI, ModelParallelTempering
from pyhsmm.internals import hmm_states, hsmm_states, hsmm_inb_states, \
initial_state, transitions
from pyhsmm.util.general import list_split
from pyhsmm.util.profiling import line_profiled
from pybasicbayes.util.stats import atleast_2d
from pybasicbayes.distributions.gaussian import Gaussian
from pyhsmm.util.plot import annotate_heatmap, heatmap
################
# HMM Mixins #
################
class _HMMBase(Model):
_states_class = hmm_states.HMMStatesPython
_trans_class = transitions.HMMTransitions
_trans_conc_class = transitions.HMMTransitionsConc
_init_state_class = initial_state.HMMInitialState
def __init__(self,
obs_distns,
trans_distn=None,
alpha=None,alpha_a_0=None,alpha_b_0=None,trans_matrix=None,
init_state_distn=None,init_state_concentration=None,pi_0=None):
self.obs_distns = obs_distns
self.states_list = []
if trans_distn is not None:
self.trans_distn = trans_distn
elif not None in (alpha_a_0,alpha_b_0):
self.trans_distn = self._trans_conc_class(
num_states=len(obs_distns),
alpha_a_0=alpha_a_0,alpha_b_0=alpha_b_0,
trans_matrix=trans_matrix)
else:
self.trans_distn = self._trans_class(
num_states=len(obs_distns),alpha=alpha,trans_matrix=trans_matrix)
if init_state_distn is not None:
if init_state_distn == 'uniform':
self.init_state_distn = initial_state.UniformInitialState(model=self)
else:
self.init_state_distn = init_state_distn
else:
self.init_state_distn = self._init_state_class(
model=self,
init_state_concentration=init_state_concentration,
pi_0=pi_0)
self._clear_caches()
def plot_trans_distn(self, states_list):
fig, ax = plt.subplots()
tmat = self.trans_distn.trans_matrix
im, cbar = heatmap(tmat, states_list, states_list, ax=ax,
cmap="Blues", cbarlabel="Transition probability")
texts = annotate_heatmap(im, valfmt="{x:.2f} ")
fig.tight_layout()
plt.show()
return fig
def add_data(self,data,stateseq=None,fixed_stateseq=False,**kwargs):
self.states_list.append(
self._states_class(
model=self,data=data,
stateseq=stateseq, fixed_stateseq=fixed_stateseq,
**kwargs))
return self.states_list[-1]
def generate(self,T,keep=True):
s = self._states_class(model=self,T=T,initialize_from_prior=True)
data = self._generate_obs(s)
if keep:
self.states_list.append(s)
return data, s.stateseq
def _generate_obs(self,s):
if s.data is None:
# generating brand new data sequence
counts = np.bincount(s.stateseq,minlength=self.num_states)
obs = [iter(o.rvs(count)) for o, count in zip(s.obs_distns,counts)]
s.data = np.squeeze(np.vstack([next(obs[state]) for state in s.stateseq]))
else:
# filling in missing data
data = s.data
nan_idx, = np.where(np.isnan(atleast_2d(data)).any(1))
counts = np.bincount(s.stateseq[nan_idx],minlength=self.num_states)
obs = [iter(o.rvs(count)) for o, count in zip(s.obs_distns,counts)]
for idx, state in zip(nan_idx, s.stateseq[nan_idx]):
data[idx] = next(obs[state])
return s.data
def log_likelihood(self,data=None,**kwargs):
if data is not None:
if isinstance(data,np.ndarray):
self.add_data(data=data,generate=False,**kwargs)
return self.states_list.pop().log_likelihood()
else:
assert isinstance(data,list)
loglike = 0.
for d in data:
self.add_data(data=d,generate=False,**kwargs)
loglike += self.states_list.pop().log_likelihood()
return loglike
else:
return sum(s.log_likelihood() for s in self.states_list)
def predict(self,seed_data,timesteps,**kwargs):
padshape = (timesteps, seed_data.shape[1]) if seed_data.ndim == 2 else timesteps
full_data = np.concatenate((seed_data,np.nan*np.ones(padshape)))
self.add_data(full_data,**kwargs)
s = self.states_list.pop()
s.resample() # fills in states
return self._generate_obs(s), s.stateseq # fills in nan obs
def predictive_likelihoods(self,test_data,forecast_horizons,num_procs=None,**kwargs):
assert all(k > 0 for k in forecast_horizons)
self.add_data(data=test_data,**kwargs)
s = self.states_list.pop()
alphal = s.messages_forwards_log()
cmaxes = alphal.max(axis=1)
scaled_alphal = np.exp(alphal - cmaxes[:,None])
if not num_procs:
prev_k = 0
outs = []
for k in forecast_horizons:
step = k - prev_k
cmaxes = cmaxes[:-step]
scaled_alphal = scaled_alphal[:-step].dot(np.linalg.matrix_power(s.trans_matrix,step))
future_likelihoods = logsumexp(
np.log(scaled_alphal) + cmaxes[:,None] + s.aBl[k:],axis=1)
past_likelihoods = logsumexp(alphal[:-k],axis=1)
outs.append(future_likelihoods - past_likelihoods)
prev_k = k
else:
from joblib import Parallel, delayed
from . import parallel
parallel.cmaxes = cmaxes
parallel.alphal = alphal
parallel.scaled_alphal = scaled_alphal
parallel.trans_matrix = s.trans_matrix
parallel.aBl = s.aBl
outs = Parallel(n_jobs=num_procs,backend='multiprocessing')\
(delayed(parallel._get_predictive_likelihoods)(k)
for k in forecast_horizons)
return outs
@property
def stateseqs(self):
return [s.stateseq for s in self.states_list]
@property
def stateseqs_norep(self):
return [s.stateseq_norep for s in self.states_list]
@property
def durations(self):
return [s.durations for s in self.states_list]
@property
def datas(self):
return [s.data for s in self.states_list]
@property
def num_states(self):
return len(self.obs_distns)
@property
def num_parameters(self):
return sum(o.num_parameters() for o in self.obs_distns) \
+ self.num_states**2 - self.num_states
@property
def used_states(self):
'a list of the used states in the order they appear'
c = itertools.count()
canonical_ids = collections.defaultdict(lambda: next(c))
for s in self.states_list:
for state in s.stateseq:
canonical_ids[state]
return list(map(operator.itemgetter(0),
sorted(canonical_ids.items(),key=operator.itemgetter(1))))
@property
def state_usages(self):
if len(self.states_list) > 0:
state_usages = sum(np.bincount(s.stateseq,minlength=self.num_states)
for s in self.states_list)
return state_usages / state_usages.sum()
else:
return np.ones(self.num_states)
### predicting
def heldout_viterbi(self,data,**kwargs):
self.add_data(data=data,stateseq=np.zeros(len(data)),**kwargs)
s = self.states_list.pop()
s.Viterbi()
return s.stateseq
def heldout_state_marginals(self,data,**kwargs):
self.add_data(data=data,stateseq=np.zeros(len(data)),**kwargs)
s = self.states_list.pop()
s.E_step()
return s.expected_states
def _resample_from_mf(self):
self.trans_distn._resample_from_mf()
self.init_state_distn._resample_from_mf()
for o in self.obs_distns:
o._resample_from_mf()
### caching
def _clear_caches(self):
for s in self.states_list:
s.clear_caches()
def __getstate__(self):
self._clear_caches()
return self.__dict__.copy()
### plotting
_fig_sz = 6
def make_figure(self, fig_size=[12,6],**kwargs):
if len(self.states_list) <= 2:
fig = plt.figure(figsize=fig_size,**kwargs)
else:
fig = plt.figure(figsize=fig_size,**kwargs)
return fig
def plot(self,fig=None,plot_slice=slice(None),update=False,draw=True, fig_size=[12,6]):
update = update and (fig is not None)
fig = fig if fig else self.make_figure(fig_size=fig_size)
feature_ax, stateseq_axs = self._get_axes(fig)
try:
sp1_artists = self.plot_observations(feature_ax,plot_slice=plot_slice,update=update)
except IndexError:
sp1_artists = []
assert len(stateseq_axs) == len(self.states_list)
sp2_artists = \
[artist for s,ax,data in zip(self.states_list,stateseq_axs,self.datas)
for artist in self.plot_stateseq(s,ax,plot_slice,update=update,draw=False)]
if draw: plt.draw()
return sp1_artists + sp2_artists
def _get_axes(self,fig):
# TODO is attaching theseplot to the figure a good idea? why not save them
# here and reuse them if we recognize the figure being passed in
sz = self._fig_sz
if hasattr(fig,'_feature_ax') and hasattr(fig,'_stateseq_axs'):
return fig._feature_ax, fig._stateseq_axs
else:
if len(self.states_list) <= 2:
gs = GridSpec(sz+len(self.states_list),1)
feature_ax = plt.subplot(gs[:sz,:])
stateseq_axs = [plt.subplot(gs[sz+idx]) for idx in range(len(self.states_list))]
else:
gs = GridSpec(1,2)
sgs = GridSpecFromSubplotSpec(len(self.states_list),1,subplot_spec=gs[1])
feature_ax = plt.subplot(gs[0])
stateseq_axs = [plt.subplot(sgs[idx]) for idx in range(len(self.states_list))]
for ax in stateseq_axs:
ax.grid('off')
fig._feature_ax, fig._stateseq_axs = feature_ax, stateseq_axs
return feature_ax, stateseq_axs
def plot_observations(self,ax=None,color=None,plot_slice=slice(None),update=False):
ax = ax if ax else plt.gca()
state_colors = self._get_colors(color)
scatter_artists = self._plot_2d_data_scatter(ax,state_colors,plot_slice,update)
param_artists = self._plot_2d_obs_params(ax,state_colors,update)
return scatter_artists + param_artists
def _plot_2d_data_scatter(self,ax=None,state_colors=None,plot_slice=slice(None),update=False):
# TODO this is a special-case hack. breaks for 1D obs. only looks at
# first two components of ND obs.
# should only do this if the obs collection has a 2D_feature method
ax = ax if ax else plt.gca()
state_colors = state_colors if state_colors else self._get_colors()
artists = []
for s, data in zip(self.states_list,self.datas):
data = data[plot_slice]
colorseq = [state_colors[state] for state in s.stateseq[plot_slice]]
if update and hasattr(s,'_data_scatter'):
s._data_scatter.set_offsets(data[:,:2])
s._data_scatter.set_color(colorseq)
else:
s._data_scatter = ax.scatter(data[:,0],data[:,1],c=colorseq,s=5)
artists.append(s._data_scatter)
return artists
def _plot_2d_obs_params(self,ax=None,state_colors=None,update=False):
if not all(hasattr(o,'plot') for o in self.obs_distns):
return []
keepaxis = ax is not None
ax = ax if ax else plt.gca()
axis = ax.axis()
state_colors = state_colors if state_colors else self._get_colors()
usages = self.state_usages
artists = []
for state, (o, w) in enumerate(zip(self.obs_distns,usages)):
if o.D > 2:
if isinstance(o, Gaussian):
o = Gaussian(o.mu[:2], o.sigma[:2, :2])
else:
warn("High-dimensional distribution may not plot correctly in 2D")
artists.extend(
o.plot(
color=state_colors[state], label='%d' % state,
alpha=min(0.25,1.-(1.-w)**2)/0.25,
ax=ax, update=update,draw=False))
if keepaxis: ax.axis(axis)
return artists
def _get_colors(self,color=None,scalars=False,color_method=None):
color_method = color_method if color_method else 'usage'
if color is None:
cmap = cm.get_cmap('tab20')
if color_method == 'usage':
freqs = self.state_usages
used_states = sorted(self.used_states, key=lambda x: freqs[x], reverse=True)
elif color_method == 'order':
used_states = self.used_states
else:
raise ValueError("color_method must be 'usage' or 'order'")
unused_states = [idx for idx in range(self.num_states) if idx not in used_states]
#colorseq = np.random.RandomState(0).permutation(np.linspace(0,1,self.num_states))
colorseq = np.linspace(0, 1, self.num_states)
colors = dict((idx, v if scalars else cmap(v)) for idx, v in zip(sorted(self.used_states), colorseq))
#colors = dict((idx, v if scalars else cmap(v)) for idx, v in zip(used_states,colorseq))
for state in unused_states:
colors[state] = cmap(1.)
return colors
elif isinstance(color,dict):
return color
else:
return dict((idx,color) for idx in range(self.num_states))
def plot_stateseq(self,s,ax=None,plot_slice=slice(None),update=False,draw=True):
s = self.states_list[s] if isinstance(s,int) else s
ax = ax if ax else plt.gca()
state_colors = self._get_colors(scalars=True)
self._plot_stateseq_pcolor(s,ax,state_colors,plot_slice,update)
try:
data_values_artist = self._plot_stateseq_data_values(s,ax,state_colors,plot_slice,update)
except Exception:
data_values_artist = None
if draw: plt.draw()
return [data_values_artist]
def _plot_stateseq_pcolor(self,s,ax=None,state_colors=None,
plot_slice=slice(None),update=False,color_method=None):
from pyhsmm.util.general import rle
s = self.states_list[s] if isinstance(s,int) else s
ax = ax if ax else plt.gca()
state_colors = state_colors if state_colors \
else self._get_colors(scalars=True,color_method=color_method)
if update and hasattr(s,'_pcolor_im') and s._pcolor_im in ax.images:
s._pcolor_im.remove()
data = s.data[plot_slice]
stateseq = s.stateseq[plot_slice]
stateseq_norep, durations = rle(stateseq)
datamin, datamax = data.min(), data.max()
x, y = np.hstack((0,durations.cumsum())), np.array([datamin,datamax])
C = np.atleast_2d([state_colors[state] for state in stateseq_norep])
s._pcolor_im = ax.pcolormesh(x,y,C,vmin=0,vmax=1,alpha=0.9, cmap="tab20")
ax.set_ylim((datamin,datamax))
ax.set_xlim((0,len(stateseq)))
ax.set_yticks([])
ax.set_xticks([])
def _plot_stateseq_data_values(self,s,ax,state_colors,plot_slice,update):
from matplotlib.collections import LineCollection
from pyhsmm.util.general import AR_striding, rle
data = s.data[plot_slice]
stateseq = s.stateseq[plot_slice]
colorseq = np.tile(np.array([state_colors[state] for state in stateseq[:-1]]),data.shape[1])
if update and hasattr(s,'_data_lc'):
s._data_lc.set_array(colorseq)
else:
ts = np.arange(len(stateseq))
segments = np.vstack(
[AR_striding(np.hstack((ts[:,None], scalarseq[:,None])),1).reshape(-1,2,2)
for scalarseq in data.T])
lc = s._data_lc = LineCollection(segments)
lc.set_array(colorseq)
lc.set_linewidth(0.5)
ax.add_collection(lc)
return s._data_lc
class _HMMGibbsSampling(_HMMBase,ModelGibbsSampling):
@line_profiled
def resample_model(self,num_procs=0):
self.resample_parameters()
self.resample_states(num_procs=num_procs)
@line_profiled
def resample_parameters(self):
self.resample_obs_distns()
self.resample_trans_distn()
self.resample_init_state_distn()
def resample_obs_distns(self):
for state, distn in enumerate(self.obs_distns):
distn.resample([s.data[s.stateseq == state] for s in self.states_list])
self._clear_caches()
@line_profiled
def resample_trans_distn(self):
self.trans_distn.resample([s.stateseq for s in self.states_list])
self._clear_caches()
def resample_init_state_distn(self):
self.init_state_distn.resample([s.stateseq[0] for s in self.states_list])
self._clear_caches()
def resample_states(self,num_procs=0):
if num_procs == 0:
for s in self.states_list:
s.resample()
else:
self._joblib_resample_states(self.states_list,num_procs)
def copy_sample(self):
new = copy.copy(self)
new.obs_distns = [o.copy_sample() for o in self.obs_distns]
new.trans_distn = self.trans_distn.copy_sample()
new.init_state_distn = self.init_state_distn.copy_sample(new)
new.states_list = [s.copy_sample(new) for s in self.states_list]
return new
### joblib parallel legacy here
def _joblib_resample_states(self,states_list,num_procs):
from joblib import Parallel, delayed
from . import parallel
# warn('joblib is segfaulting on OS X only, not sure why')
if len(states_list) > 0:
joblib_args = list_split(
[self._get_joblib_pair(s) for s in states_list],
num_procs)
parallel.model = self
parallel.args = joblib_args
raw_stateseqs = Parallel(n_jobs=num_procs,backend='multiprocessing')\
(delayed(parallel._get_sampled_stateseq)(idx)
for idx in range(len(joblib_args)))
for s, (stateseq, log_likelihood) in zip(
[s for grp in list_split(states_list,num_procs) for s in grp],
[seq for grp in raw_stateseqs for seq in grp]):
s.stateseq, s._normalizer = stateseq, log_likelihood
def _get_joblib_pair(self,states_obj):
return (states_obj.data,states_obj._kwargs)
class _HMMMeanField(_HMMBase,ModelMeanField):
def meanfield_coordinate_descent_step(self,compute_vlb=True,num_procs=0):
# we want to update the states factor last to make the VLB
# computation efficient, but to update the parameters first we have to
# ensure everything in states_list has expected statistics computed
self._meanfield_update_states_list(
[s for s in self.states_list if not hasattr(s, 'expected_states')],
num_procs)
self.meanfield_update_parameters()
self.meanfield_update_states(num_procs)
if compute_vlb:
return self.vlb(states_last_updated=True)
def meanfield_update_parameters(self):
self.meanfield_update_obs_distns()
self.meanfield_update_trans_distn()
self.meanfield_update_init_state_distn()
def meanfield_update_obs_distns(self):
for state, o in enumerate(self.obs_distns):
o.meanfieldupdate(
[s.data for s in self.states_list],
[s.expected_states[:,state] for s in self.states_list])
self._clear_caches()
def meanfield_update_trans_distn(self):
self.trans_distn.meanfieldupdate(
[s.expected_transcounts for s in self.states_list])
self._clear_caches()
def meanfield_update_init_state_distn(self):
self.init_state_distn.meanfieldupdate(
[s.expected_states[0] for s in self.states_list])
self._clear_caches()
def meanfield_update_states(self,num_procs=0):
self._meanfield_update_states_list(self.states_list,num_procs=num_procs)
def _meanfield_update_states_list(self,states_list,num_procs=0):
if num_procs == 0:
for s in states_list:
s.meanfieldupdate()
else:
self._joblib_meanfield_update_states(states_list,num_procs)
def vlb(self, states_last_updated=False):
vlb = 0.
vlb += sum(s.get_vlb(states_last_updated) for s in self.states_list)
vlb += self.trans_distn.get_vlb()
vlb += self.init_state_distn.get_vlb()
vlb += sum(o.get_vlb() for o in self.obs_distns)
return vlb
### joblib parallel legacy
def _joblib_meanfield_update_states(self,states_list,num_procs):
if len(states_list) > 0:
from joblib import Parallel, delayed
from . import parallel
joblib_args = list_split(
[self._get_joblib_pair(s) for s in states_list],
num_procs)
parallel.model = self
parallel.args = joblib_args
allstats = Parallel(n_jobs=num_procs,backend='multiprocessing')\
(delayed(parallel._get_stats)(idx) for idx in range(len(joblib_args)))
for s, stats in zip(
[s for grp in list_split(states_list) for s in grp],
[s for grp in allstats for s in grp]):
s.all_expected_stats = stats
def _get_joblib_pair(self,states_obj):
return (states_obj.data,states_obj._kwargs)
class _HMMSVI(_HMMBase,ModelMeanFieldSVI):
# NOTE: classes with this mixin should also have the _HMMMeanField mixin for
# joblib/multiprocessing legacy to work
def meanfield_sgdstep(self,minibatch,prob,stepsize,num_procs=0,**kwargs):
## compute the local mean field step for the minibatch
mb_states_list = self._get_mb_states_list(minibatch,**kwargs)
if num_procs == 0:
for s in mb_states_list:
s.meanfieldupdate()
else:
self._joblib_meanfield_update_states(mb_states_list,num_procs)
## take a global step on the parameters
self._meanfield_sgdstep_parameters(mb_states_list,prob,stepsize)
def _get_mb_states_list(self,minibatch,**kwargs):
minibatch = minibatch if isinstance(minibatch,list) else [minibatch]
mb_states_list = []
for mb in minibatch:
self.add_data(mb,generate=False,**kwargs)
mb_states_list.append(self.states_list.pop())
return mb_states_list
def _meanfield_sgdstep_parameters(self,mb_states_list,prob,stepsize):
self._meanfield_sgdstep_obs_distns(mb_states_list,prob,stepsize)
self._meanfield_sgdstep_trans_distn(mb_states_list,prob,stepsize)
self._meanfield_sgdstep_init_state_distn(mb_states_list,prob,stepsize)
def _meanfield_sgdstep_obs_distns(self,mb_states_list,prob,stepsize):
for state, o in enumerate(self.obs_distns):
o.meanfield_sgdstep(
[s.data for s in mb_states_list],
[s.expected_states[:,state] for s in mb_states_list],
prob,stepsize)
def _meanfield_sgdstep_trans_distn(self,mb_states_list,prob,stepsize):
self.trans_distn.meanfield_sgdstep(
[s.expected_transcounts for s in mb_states_list],
prob,stepsize)
def _meanfield_sgdstep_init_state_distn(self,mb_states_list,prob,stepsize):
self.init_state_distn.meanfield_sgdstep(
[s.expected_states[0] for s in mb_states_list],
prob,stepsize)
class _HMMEM(_HMMBase,ModelEM, ModelMAPEM):
def EM_step(self):
assert len(self.states_list) > 0, 'Must have data to run EM'
self._clear_caches()
self._E_step()
self._M_step()
def MAP_EM_step(self):
assert len(self.states_list) > 0, 'Must have data to run BEM'
self._clear_caches()
self._E_step()
self._BM_step()
def _BM_step(self):
self._BM_step_obs_distns()
self._BM_step_init_state_distn()
self._BM_step_trans_distn()
def _BM_step_obs_distns(self):
for state, distn in enumerate(self.obs_distns):
distn.MAP([s.data for s in self.states_list],
[s.expected_states[:,state] for s in self.states_list])
def _BM_step_init_state_distn(self):
self.init_state_distn.MAP(
expected_states_list=[s.expected_states[0] for s in self.states_list])
def _BM_step_trans_distn(self):
self.trans_distn.MAP(
expected_transcounts=[s.expected_transcounts for s in self.states_list])
def _E_step(self):
for s in self.states_list:
s.E_step()
def _M_step(self):
self._M_step_obs_distns()
self._M_step_init_state_distn()
self._M_step_trans_distn()
def _M_step_obs_distns(self):
for state, distn in enumerate(self.obs_distns):
distn.max_likelihood([s.data for s in self.states_list],
[s.expected_states[:,state] for s in self.states_list])
def _M_step_init_state_distn(self):
self.init_state_distn.max_likelihood(
expected_states_list=[s.expected_states[0] for s in self.states_list])
def _M_step_trans_distn(self):
self.trans_distn.max_likelihood(
expected_transcounts=[s.expected_transcounts for s in self.states_list])
def BIC(self,data=None):
'''
BIC on the passed data. If passed data is None (default), calculates BIC
on the model's assigned data
'''
# NOTE: in principle this method computes the BIC only after finding the
# maximum likelihood parameters (or, of course, an EM fixed-point as an
# approximation!)
assert data is None and len(self.states_list) > 0, 'Must have data to get BIC'
if data is None:
return -2*sum(self.log_likelihood(s.data).sum() for s in self.states_list) + \
self.num_parameters() * np.log(
sum(s.data.shape[0] for s in self.states_list))
else:
return -2*self.log_likelihood(data) + self.num_parameters() * np.log(data.shape[0])
class _HMMViterbiEM(_HMMBase,ModelMAPEM):
def Viterbi_EM_fit(self, tol=0.1, maxiter=20):
return self.MAP_EM_fit(tol, maxiter)
def Viterbi_EM_step(self):
assert len(self.states_list) > 0, 'Must have data to run Viterbi EM'
self._clear_caches()
self._Viterbi_E_step()
self._Viterbi_M_step()
def _Viterbi_E_step(self):
for s in self.states_list:
s.Viterbi()
def _Viterbi_M_step(self):
self._Viterbi_M_step_obs_distns()
self._Viterbi_M_step_init_state_distn()
self._Viterbi_M_step_trans_distn()
def _Viterbi_M_step_obs_distns(self):
for state, distn in enumerate(self.obs_distns):
distn.max_likelihood([s.data[s.stateseq == state] for s in self.states_list])
def _Viterbi_M_step_init_state_distn(self):
self.init_state_distn.max_likelihood(
samples=np.array([s.stateseq[0] for s in self.states_list]))
def _Viterbi_M_step_trans_distn(self):
self.trans_distn.max_likelihood([s.stateseq for s in self.states_list])
MAP_EM_step = Viterbi_EM_step # for the ModelMAPEM interface
class _WeakLimitHDPMixin(object):
def __init__(self,
obs_distns,
trans_distn=None,alpha=None,alpha_a_0=None,alpha_b_0=None,
gamma=None,gamma_a_0=None,gamma_b_0=None,trans_matrix=None,
**kwargs):
if trans_distn is not None:
trans_distn = trans_distn
elif not None in (alpha_a_0,alpha_b_0):
trans_distn = self._trans_conc_class(
num_states=len(obs_distns),
alpha_a_0=alpha_a_0,alpha_b_0=alpha_b_0,
gamma_a_0=gamma_a_0,gamma_b_0=gamma_b_0,
trans_matrix=trans_matrix)
else:
trans_distn = self._trans_class(
num_states=len(obs_distns),alpha=alpha,gamma=gamma,
trans_matrix=trans_matrix)
super(_WeakLimitHDPMixin,self).__init__(
obs_distns=obs_distns,trans_distn=trans_distn,**kwargs)
class _HMMPossibleChangepointsMixin(object):
_states_class = hmm_states.HMMStatesPossibleChangepoints
def add_data(self,data,changepoints=None,**kwargs):
return super(_HMMPossibleChangepointsMixin,self).add_data(
data=data,changepoints=changepoints,**kwargs)
def _get_mb_states_list(self,minibatch,changepoints=None,**kwargs):
if changepoints is not None:
if not isinstance(minibatch,(list,tuple)):
assert isinstance(minibatch,np.ndarray)
assert isinstance(changepoints,list) and isinstance(changepoints[0],tuple)
minibatch = [minibatch]
changepoints = [changepoints]
else:
assert isinstance(changepoints,(list,tuple)) \
and isinstance(changepoints[0],(list,tuple)) \
and isinstance(changepoints[0][0],tuple)
assert len(minibatch) == len(changepoints)
changepoints = changepoints if changepoints is not None \
else [None]*len(minibatch)
mb_states_list = []
for data, changes in zip(minibatch,changepoints):
self.add_data(data,changepoints=changes,generate=False,**kwargs)
mb_states_list.append(self.states_list.pop())
return mb_states_list
def log_likelihood(self,data=None,changepoints=None,**kwargs):
if data is not None:
if isinstance(data,np.ndarray):
assert isinstance(changepoints,list) or changepoints is None
self.add_data(data=data,changepoints=changepoints,
generate=False,**kwargs)
return self.states_list.pop().log_likelihood()
else:
assert isinstance(data,list) and (changepoints is None
or isinstance(changepoints,list) and len(changepoints) == len(data))
changepoints = changepoints if changepoints is not None \
else [None]*len(data)
loglike = 0.
for d, c in zip(data,changepoints):
self.add_data(data=d,changepoints=c,generate=False,**kwargs)
loglike += self.states_list.pop().log_likelihood()
return loglike
else:
return sum(s.log_likelihood() for s in self.states_list)
class _HMMParallelTempering(_HMMBase,ModelParallelTempering):
@property
def temperature(self):
return self._temperature if hasattr(self,'_temperature') else 1.
@temperature.setter
def temperature(self,T):
self._temperature = T
self._clear_caches()
def swap_sample_with(self,other):
self.obs_distns, other.obs_distns = other.obs_distns, self.obs_distns
self.trans_distn, other.trans_distn = other.trans_distn, self.trans_distn
self.init_state_distn, other.init_state_distn = \
other.init_state_distn, self.init_state_distn
self.init_state_distn.model = self
other.init_state_distn.model = other
for s1, s2 in zip(self.states_list, other.states_list):
s1.stateseq, s2.stateseq = s2.stateseq, s1.stateseq
self._clear_caches()
@property
def energy(self):
energy = 0.
for s in self.states_list:
for state, datum in zip(s.stateseq,s.data):
energy += self.obs_distns[state].energy(datum)
return energy
################
# HMM models #
################
class HMMPython(_HMMGibbsSampling,_HMMSVI,_HMMMeanField,_HMMEM,
_HMMViterbiEM,_HMMParallelTempering):
pass
class HMM(HMMPython):
_states_class = hmm_states.HMMStatesEigen
class WeakLimitHDPHMMPython(_WeakLimitHDPMixin,HMMPython):
# NOTE: shouldn't really inherit EM or ViterbiEM, but it's convenient!
_trans_class = transitions.WeakLimitHDPHMMTransitions
_trans_conc_class = transitions.WeakLimitHDPHMMTransitionsConc
class WeakLimitHDPHMM(_WeakLimitHDPMixin,HMM):
_trans_class = transitions.WeakLimitHDPHMMTransitions
_trans_conc_class = transitions.WeakLimitHDPHMMTransitionsConc
class DATruncHDPHMMPython(_WeakLimitHDPMixin,HMMPython):
# NOTE: weak limit mixin is poorly named; we just want its init method
_trans_class = transitions.DATruncHDPHMMTransitions
_trans_conc_class = None
class DATruncHDPHMM(_WeakLimitHDPMixin,HMM):
_trans_class = transitions.DATruncHDPHMMTransitions
_trans_conc_class = None
class WeakLimitStickyHDPHMM(WeakLimitHDPHMM):
# TODO concentration resampling, too!
def __init__(self,obs_distns,
kappa=None,alpha=None,gamma=None,trans_matrix=None,
alpha_a_0=None,alpha_b_0=None,gamma_a_0=None,gamma_b_0=None,
**kwargs):
assert (None not in (alpha,gamma)) ^ \
(None not in (alpha_a_0,alpha_b_0,gamma_a_0,gamma_b_0))
if None not in (alpha,gamma):
trans_distn = transitions.WeakLimitStickyHDPHMMTransitions(
num_states=len(obs_distns),
kappa=kappa,alpha=alpha,gamma=gamma,trans_matrix=trans_matrix)
else:
trans_distn = transitions.WeakLimitStickyHDPHMMTransitionsConc(
num_states=len(obs_distns),
kappa=kappa,
alpha_a_0=alpha_a_0,alpha_b_0=alpha_b_0,
gamma_a_0=gamma_a_0,gamma_b_0=gamma_b_0,
trans_matrix=trans_matrix)
super(WeakLimitStickyHDPHMM,self).__init__(
obs_distns=obs_distns,trans_distn=trans_distn,**kwargs)
class HMMPossibleChangepoints(_HMMPossibleChangepointsMixin,HMM):
pass
#################
# HSMM Mixins #
#################
class _HSMMBase(_HMMBase):
_states_class = hsmm_states.HSMMStatesPython
_trans_class = transitions.HSMMTransitions
_trans_conc_class = transitions.HSMMTransitionsConc
# _init_steady_state_class = initial_state.HSMMSteadyState # TODO
def __init__(self,dur_distns,**kwargs):
self.dur_distns = dur_distns
super(_HSMMBase,self).__init__(**kwargs)
def add_data(self,data,stateseq=None,trunc=None,
right_censoring=True,left_censoring=False,**kwargs):
self.states_list.append(self._states_class(
model=self,
data=np.asarray(data),
stateseq=stateseq,
right_censoring=right_censoring,
left_censoring=left_censoring,
trunc=trunc,
**kwargs))
return self.states_list[-1]
def summary(self, state, state_list):
print('Model summary for state '+str(state_list[state]))
print('-----------------------------------------')
print(' Duration model')
print(' '+self.dur_distns[state].toString())
print('')
print(' Emission model')
print(' '+self.obs_distns[state].toString())
def total_summary(self, state_list):
print('Complete Model Summary')
print('###############################')
for state in range(len(state_list)):
self.summary(state, state_list)
print('')
self.plot_trans_distn(state_list)
@property
def num_parameters(self):
return sum(o.num_parameters() for o in self.obs_distns) \
+ sum(d.num_parameters() for d in self.dur_distns) \
+ self.num_states**2 - self.num_states
def plot_durations(self,colors=None,states_objs=None):
if colors is None:
colors = self._get_colors()
if states_objs is None:
states_objs = self.states_list
used_states = self.used_states
for state,d in enumerate(self.dur_distns):
if state in used_states:
d.plot(color=colors[state],
data=[s.durations[s.stateseq_norep == state]
for s in states_objs])
plt.title('Durations')
# def plot(self,color=None):
# plt.gcf() #.set_size_inches((10,10))
# colors = self._get_colors(self.states_list)
#
# num_subfig_cols = len(self.states_list)
# for subfig_idx,s in enumerate(self.states_list):
# plt.subplot(3,num_subfig_cols,1+subfig_idx)
# self.plot_observations(colors=colors,states_objs=[s])
#
# plt.subplot(3,num_subfig_cols,1+num_subfig_cols+subfig_idx)
# s.plot(colors_dict=colors)
#
# plt.subplot(3,num_subfig_cols,1+2*num_subfig_cols+subfig_idx)
# self.plot_durations(colors=colors,states_objs=[s])
class _HSMMGibbsSampling(_HSMMBase,_HMMGibbsSampling):
@line_profiled
def resample_parameters(self,**kwargs):
self.resample_dur_distns()
super(_HSMMGibbsSampling,self).resample_parameters(**kwargs)
def resample_dur_distns(self):
for state, distn in enumerate(self.dur_distns):
distn.resample_with_censoring_and_truncation(
data=
[s.durations_censored[s.untrunc_slice][s.stateseq_norep[s.untrunc_slice] == state]
for s in self.states_list],
censored_data=
[s.durations_censored[s.trunc_slice][s.stateseq_norep[s.trunc_slice] == state]
for s in self.states_list])
self._clear_caches()
def copy_sample(self):
new = super(_HSMMGibbsSampling,self).copy_sample()
new.dur_distns = [d.copy_sample() for d in self.dur_distns]
return new
class _HSMMEM(_HSMMBase,_HMMEM):
def _M_step(self):
super(_HSMMEM,self)._M_step()
self._M_step_dur_distns()
def _M_step_dur_distns(self):
for state, distn in enumerate(self.dur_distns):
distn.max_likelihood(
[np.arange(1,s.expected_durations[state].shape[0]+1)
for s in self.states_list],
[s.expected_durations[state] for s in self.states_list])
class _HSMMMeanField(_HSMMBase,_HMMMeanField):
def meanfield_update_parameters(self):
super(_HSMMMeanField,self).meanfield_update_parameters()
self.meanfield_update_dur_distns()
def meanfield_update_dur_distns(self):
for state, d in enumerate(self.dur_distns):
d.meanfieldupdate(
[np.arange(1,s.expected_durations[state].shape[0]+1)
for s in self.states_list],
[s.expected_durations[state] for s in self.states_list])
def vlb(self, **kwargs):
vlb = super(_HSMMMeanField,self).vlb(**kwargs)
vlb += sum(d.get_vlb() for d in self.dur_distns)
return vlb
class _HSMMSVI(_HSMMBase,_HMMSVI):
def _meanfield_sgdstep_parameters(self,mb_states_list,prob,stepsize):
super(_HSMMSVI,self)._meanfield_sgdstep_parameters(mb_states_list,prob,stepsize)
self._meanfield_sgdstep_dur_distns(mb_states_list,prob,stepsize)
def _meanfield_sgdstep_dur_distns(self,mb_states_list,prob,stepsize):
for state, d in enumerate(self.dur_distns):
d.meanfield_sgdstep(
[np.arange(1,s.expected_durations[state].shape[0]+1)
for s in mb_states_list],
[s.expected_durations[state] for s in mb_states_list],
prob,stepsize)
class _HSMMINBEMMixin(_HMMEM,ModelEM):
def EM_step(self):
super(_HSMMINBEMMixin,self).EM_step()
for state, distn in enumerate(self.dur_distns):
distn.max_likelihood(data=None,stats=(
sum(s.expected_dur_ns[state] for s in self.states_list),
sum(s.expected_dur_tots[state] for s in self.states_list)))
class _HSMMViterbiEM(_HSMMBase,_HMMViterbiEM):
def Viterbi_EM_step(self):
super(_HSMMViterbiEM,self).Viterbi_EM_step()
self._Viterbi_M_step_dur_distns()
def _Viterbi_M_step_dur_distns(self):
for state, distn in enumerate(self.dur_distns):
distn.max_likelihood(
[s.durations[s.stateseq_norep == state] for s in self.states_list])
def _Viterbi_M_step_trans_distn(self):
self.trans_distn.max_likelihood([s.stateseq_norep for s in self.states_list])
class _HSMMPossibleChangepointsMixin(_HMMPossibleChangepointsMixin):
_states_class = hsmm_states.HSMMStatesPossibleChangepoints
class _HSMMParallelTempering(_HSMMBase,_HMMParallelTempering):
def swap_sample_with(self,other):
self.dur_distns, other.dur_distns = other.dur_distns, self.dur_distns
super(_HSMMParallelTempering,self).swap_sample_with(other)
class _DelayedMixin(object):
def resample_dur_distns(self):
for state, distn in enumerate(self.dur_distns):
distn.resample_with_censoring_and_truncation(
data=
[s.durations_censored[s.untrunc_slice][s.stateseq_norep[s.untrunc_slice] == state]
- s.delays[state] for s in self.states_list],
censored_data=
[s.durations_censored[s.trunc_slice][s.stateseq_norep[s.trunc_slice] == state]
- s.delays[state] for s in self.states_list])
self._clear_caches()
#################
# HSMM Models #
#################
class HSMMPython(_HSMMGibbsSampling,_HSMMSVI,_HSMMMeanField,
_HSMMViterbiEM,_HSMMEM,_HSMMParallelTempering):
_trans_class = transitions.HSMMTransitions
_trans_conc_class = transitions.HSMMTransitionsConc
class HSMM(HSMMPython):
_states_class = hsmm_states.HSMMStatesEigen
class GeoHSMM(HSMMPython):
_states_class = hsmm_states.GeoHSMMStates
class DelayedGeoHSMM(_DelayedMixin,HSMMPython):
_states_class = hsmm_states.DelayedGeoHSMMStates
class WeakLimitHDPHSMMPython(_WeakLimitHDPMixin,HSMMPython):
# NOTE: shouldn't technically inherit EM or ViterbiEM, but it's convenient
_trans_class = transitions.WeakLimitHDPHSMMTransitions
_trans_conc_class = transitions.WeakLimitHDPHSMMTransitionsConc
class WeakLimitHDPHSMM(_WeakLimitHDPMixin,HSMM):
_trans_class = transitions.WeakLimitHDPHSMMTransitions
_trans_conc_class = transitions.WeakLimitHDPHSMMTransitionsConc
class WeakLimitGeoHDPHSMM(WeakLimitHDPHSMM):
_states_class = hsmm_states.GeoHSMMStates
def _M_step_dur_distns(self):
warn('untested!')
for state, distn in enumerate(self.dur_distns):
distn.max_likelihood(
stats=(
sum(s._expected_ns[state] for s in self.states_list),
sum(s._expected_tots[state] for s in self.states_list),
))
class WeakLimitDelayedGeoHSMM(_DelayedMixin,WeakLimitHDPHSMM):
_states_class = hsmm_states.DelayedGeoHSMMStates
class DATruncHDPHSMM(_WeakLimitHDPMixin,HSMM):
# NOTE: weak limit mixin is poorly named; we just want its init method
_trans_class = transitions.DATruncHDPHSMMTransitions
_trans_conc_class = None
class HSMMIntNegBin(_HSMMGibbsSampling,_HSMMMeanField,_HSMMSVI,_HSMMViterbiEM,
_HSMMParallelTempering):
_trans_class = transitions.HSMMTransitions
_trans_conc_class = transitions.HSMMTransitionsConc
_states_class = hsmm_inb_states.HSMMStatesIntegerNegativeBinomial
def _resample_from_mf(self):
super(HSMMIntNegBin,self)._resample_from_mf()
for d in self.dur_distns:
d._resample_from_mf()
def _vlb(self):
return 0. # TODO
def predictive_likelihoods(self,test_data,forecast_horizons,**kwargs):
self.add_data(data=test_data,**kwargs)
s = self.states_list.pop()
alphal = s.hmm_messages_forwards_log()
cmaxes = alphal.max(axis=1)
scaled_alphal = np.exp(alphal - cmaxes[:,None])
prev_k = 0
outs = []
for k in forecast_horizons:
step = k - prev_k
cmaxes = cmaxes[:-step]
scaled_alphal = scaled_alphal[:-step].dot(np.linalg.matrix_power(s.hmm_trans_matrix,step))
future_likelihoods = logsumexp(
np.log(scaled_alphal) + cmaxes[:,None] + s.hmm_aBl[k:],axis=1)
past_likelihoods = logsumexp(alphal[:-k],axis=1)
outs.append(future_likelihoods - past_likelihoods)
prev_k = k
return outs
class WeakLimitHDPHSMMIntNegBin(_WeakLimitHDPMixin,HSMMIntNegBin):
_trans_class = transitions.WeakLimitHDPHSMMTransitions
_trans_conc_class = transitions.WeakLimitHDPHSMMTransitionsConc
class HSMMIntNegBinVariant(_HSMMGibbsSampling,_HSMMINBEMMixin,_HSMMViterbiEM,
_HSMMParallelTempering):
_trans_class = transitions.HSMMTransitions
_trans_conc_class = transitions.HSMMTransitionsConc
_states_class = hsmm_inb_states.HSMMStatesIntegerNegativeBinomialVariant
class WeakLimitHDPHSMMIntNegBinVariant(_WeakLimitHDPMixin,HSMMIntNegBinVariant):
_trans_class = transitions.WeakLimitHDPHSMMTransitions
_trans_conc_class = transitions.WeakLimitHDPHSMMTransitionsConc
class GeoHSMMPossibleChangepoints(_HSMMPossibleChangepointsMixin,GeoHSMM):
pass
class HSMMPossibleChangepoints(_HSMMPossibleChangepointsMixin,HSMMPython):
pass
class WeakLimitHDPHSMMPossibleChangepoints(_HSMMPossibleChangepointsMixin,WeakLimitHDPHSMM):
pass
class WeakLimitHDPHSMMDelayedIntNegBin(_DelayedMixin,_WeakLimitHDPMixin,HSMMIntNegBin):
_states_class = hsmm_inb_states.HSMMStatesDelayedIntegerNegativeBinomial
_trans_class = transitions.WeakLimitHDPHSMMTransitions
_trans_conc_class = transitions.WeakLimitHDPHSMMTransitionsConc
def __init__(self,dur_distns,delay=0,**kwargs):
for d in dur_distns:
d.delay = delay
super(WeakLimitHDPHSMMDelayedIntNegBin,self).__init__(dur_distns=dur_distns,**kwargs)
class WeakLimitHDPHSMMTruncatedIntNegBin(_WeakLimitHDPMixin,HSMMIntNegBin):
_states_class = hsmm_inb_states.HSMMStatesTruncatedIntegerNegativeBinomial
_trans_class = transitions.WeakLimitHDPHSMMTransitions
_trans_conc_class = transitions.WeakLimitHDPHSMMTransitionsConc
def __init__(self,dur_distns,delay=0,**kwargs):
for d in dur_distns:
d.delay = delay
super(WeakLimitHDPHSMMTruncatedIntNegBin,self).__init__(dur_distns=dur_distns,**kwargs)
def resample_dur_distns(self):
for state, distn in enumerate(self.dur_distns):
distn.resample_with_censoring_and_truncation(
# regular data
data =
[s.durations_censored[s.untrunc_slice][s.stateseq_norep[s.untrunc_slice] == state]
for s in self.states_list],
# right censoring due to HSMM states
censored_data =
[s.durations_censored[s.trunc_slice][s.stateseq_norep[s.trunc_slice] == state]
for s in self.states_list],
# left truncation level
left_truncation_level = distn.delay,
)
self._clear_caches()
##########
# meta #
##########
class _SeparateTransMixin(object):
def __init__(self,*args,**kwargs):
super(_SeparateTransMixin,self).__init__(*args,**kwargs)
make_factory = (lambda distn: lambda: copy.deepcopy(distn))
self.trans_distns = collections.defaultdict(make_factory(self.trans_distn))
self._trans_distn_prototype = self.trans_distn
del self.trans_distn
self.init_state_distns = collections.defaultdict(make_factory(self.init_state_distn))
self._init_state_distn_prototype = self.init_state_distn
del self.init_state_distn
def __getstate__(self):
dct = self.__dict__.copy()
dct['trans_distns'] = dict(self.trans_distns.items())
dct['init_state_distns'] = dict(self.init_state_distns.items())
return dct
def __setstate__(self,dct):
self.__dict__.update(dct)
self.trans_distns = collections.defaultdict(
lambda: copy.deepcopy(self._trans_distn_prototype))
self.init_state_distns = collections.defaultdict(
lambda: copy.deepcopy(self._init_state_distn_prototype))
self.trans_distns.update(dct['trans_distns'])
self.init_state_distns.update(dct['init_state_distns'])
### parallel tempering
def swap_sample_with(self,other):
self.trans_distns, other.trans_distns = self.trans_distns, other.trans_distns
self.init_state_distns, other.init_state_distns = \
other.init_state_distns, self.init_state_distns
for d1, d2 in zip(self.init_state_distns.values(),other.init_state_distns.values()):
d1.model = self
d2.model = other
super(_SeparateTransMixin,self).swap_sample_with(other)
### Gibbs sampling
def resample_trans_distn(self):
for group_id, trans_distn in iteritems(self.trans_distns):
trans_distn.resample([s.stateseq for s in self.states_list
if hash(s.group_id) == hash(group_id)])
self._clear_caches()
def resample_init_state_distn(self):
for group_id, init_state_distn in iteritems(self.init_state_distns):
init_state_distn.resample([s.stateseq[0] for s in self.states_list
if hash(s.group_id) == hash(group_id)])
self._clear_caches()
### Mean field
def meanfield_update_trans_distn(self):
for group_id, trans_distn in iteritems(self.trans_distns):
states_list = [s for s in self.states_list if hash(s.group_id) == hash(group_id)]
if len(states_list) > 0:
trans_distn.meanfieldupdate([s.expected_transcounts for s in states_list])
def meanfield_update_init_state_distn(self):
for group_id, init_state_distn in iteritems(self.init_state_distns):
states_list = [s for s in self.states_list if hash(s.group_id) == hash(group_id)]
if len(states_list) > 0:
init_state_distn.meanfieldupdate([s.expected_states[0] for s in states_list])
def _vlb(self):
vlb = 0.
vlb += sum(s.get_vlb() for s in self.states_list)
vlb += sum(trans_distn.get_vlb()
for trans_distn in itervalues(self.trans_distns))
vlb += sum(init_state_distn.get_vlb()
for init_state_distn in itervalues(self.init_state_distns))
vlb += sum(o.get_vlb() for o in self.obs_distns)
return vlb
### SVI
def _meanfield_sgdstep_trans_distn(self,mb_states_list,prob,stepsize):
for group_id, trans_distn in iteritems(self.trans_distns):
trans_distn.meanfield_sgdstep(
[s.expected_transcounts for s in mb_states_list
if hash(s.group_id) == hash(group_id)],
prob,stepsize)
def _meanfield_sgdstep_init_state_distn(self,mb_states_list,prob,stepsize):
for group_id, init_state_distn in iteritems(self.init_state_distns):
init_state_distn.meanfield_sgdstep(
[s.expected_states[0] for s in mb_states_list
if hash(s.group_id) == hash(group_id)],
prob,stepsize)
### EM
def EM_step(self):
raise NotImplementedError
### Viterbi
def Viterbi_EM_step(self):
raise NotImplementedError
class HMMSeparateTrans(_SeparateTransMixin,HMM):
_states_class = hmm_states.HMMStatesEigenSeparateTrans
class WeakLimitHDPHMMSeparateTrans(_SeparateTransMixin,WeakLimitHDPHMM):
_states_class = hmm_states.HMMStatesEigenSeparateTrans
class WeakLimitStickyHDPHMMSeparateTrans(_SeparateTransMixin,WeakLimitStickyHDPHMM):
_states_class = hmm_states.HMMStatesEigenSeparateTrans
class WeakLimitHDPHSMMSeparateTrans(_SeparateTransMixin,WeakLimitHDPHSMM):
_states_class = hsmm_states.HSMMStatesSeparateTrans
class HSMMPossibleChangepointsSeparateTrans(
_SeparateTransMixin,
HSMMPossibleChangepoints):
_states_class = hsmm_states.HSMMStatesPossibleChangepointsSeparateTrans
class WeakLimitHDPHSMMPossibleChangepointsSeparateTrans(
_SeparateTransMixin,
WeakLimitHDPHSMMPossibleChangepoints):
_states_class = hsmm_states.HSMMStatesPossibleChangepointsSeparateTrans
# class WeakLimitHDPHSMMPossibleChangepointsSeparateTrans(
# _SeparateTransMixin,
# WeakLimitHDPHSMMPossibleChangepoints):
# _states_class = hsmm_states.HSMMStatesPossibleChangepointsSeparateTrans
class WeakLimitHDPHSMMIntNegBinSeparateTrans(
_SeparateTransMixin,
WeakLimitHDPHSMMIntNegBin):
_states_class = hsmm_inb_states.HSMMStatesIntegerNegativeBinomialSeparateTrans
class WeakLimitHDPHSMMDelayedIntNegBinSeparateTrans(
_SeparateTransMixin,
WeakLimitHDPHSMMDelayedIntNegBin):
_states_class = hsmm_inb_states.HSMMStatesDelayedIntegerNegativeBinomialSeparateTrans
# TODO is this method needed?
def resample_dur_distns(self):
for state, distn in enumerate(self.dur_distns):
distn.resample_with_censoring_and_truncation(
data=
[s.durations_censored[s.untrunc_slice][s.stateseq_norep[s.untrunc_slice] == state]
- s.delays[state] for s in self.states_list],
censored_data=
[s.durations_censored[s.trunc_slice][s.stateseq_norep[s.trunc_slice] == state]
- s.delays[state] for s in self.states_list])
self._clear_caches()
class WeakLimitHDPHSMMTruncatedIntNegBinSeparateTrans(
_SeparateTransMixin,
WeakLimitHDPHSMMTruncatedIntNegBin):
_states_class = hsmm_inb_states.HSMMStatesTruncatedIntegerNegativeBinomialSeparateTrans
|
[
"matplotlib.pyplot.title",
"matplotlib.cm.get_cmap",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.exp",
"scipy.special.logsumexp",
"matplotlib.pyplot.gca",
"numpy.atleast_2d",
"pybasicbayes.util.stats.atleast_2d",
"joblib.Parallel",
"matplotlib.pyplot.draw",
"numpy.linspace",
"numpy.bincount",
"pyhsmm.util.general.rle",
"future.utils.iteritems",
"matplotlib.pyplot.subplots",
"matplotlib.collections.LineCollection",
"copy.deepcopy",
"matplotlib.pyplot.show",
"future.utils.itervalues",
"numpy.asarray",
"itertools.count",
"numpy.hstack",
"pyhsmm.util.general.list_split",
"numpy.linalg.matrix_power",
"joblib.delayed",
"matplotlib.pyplot.subplot",
"numpy.log",
"pyhsmm.util.plot.heatmap",
"copy.copy",
"pybasicbayes.distributions.gaussian.Gaussian",
"numpy.array",
"pyhsmm.util.plot.annotate_heatmap",
"builtins.zip",
"pyhsmm.internals.initial_state.UniformInitialState",
"warnings.warn",
"matplotlib.gridspec.GridSpec",
"operator.itemgetter"
] |
[((2496, 2510), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2508, 2510), True, 'import matplotlib.pyplot as plt\n'), ((2577, 2678), 'pyhsmm.util.plot.heatmap', 'heatmap', (['tmat', 'states_list', 'states_list'], {'ax': 'ax', 'cmap': '"""Blues"""', 'cbarlabel': '"""Transition probability"""'}), "(tmat, states_list, states_list, ax=ax, cmap='Blues', cbarlabel=\n 'Transition probability')\n", (2584, 2678), False, 'from pyhsmm.util.plot import annotate_heatmap, heatmap\n'), ((2717, 2756), 'pyhsmm.util.plot.annotate_heatmap', 'annotate_heatmap', (['im'], {'valfmt': '"""{x:.2f} """'}), "(im, valfmt='{x:.2f} ')\n", (2733, 2756), False, 'from pyhsmm.util.plot import annotate_heatmap, heatmap\n'), ((2793, 2803), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2801, 2803), True, 'import matplotlib.pyplot as plt\n'), ((5522, 5554), 'numpy.exp', 'np.exp', (['(alphal - cmaxes[:, None])'], {}), '(alphal - cmaxes[:, None])\n', (5528, 5554), True, 'import numpy as np\n'), ((7399, 7416), 'itertools.count', 'itertools.count', ([], {}), '()\n', (7414, 7416), False, 'import itertools\n'), ((11808, 11841), 'builtins.zip', 'zip', (['self.states_list', 'self.datas'], {}), '(self.states_list, self.datas)\n', (11811, 11841), False, 'from builtins import map, zip\n'), ((15743, 15756), 'pyhsmm.util.general.rle', 'rle', (['stateseq'], {}), '(stateseq)\n', (15746, 15756), False, 'from pyhsmm.util.general import AR_striding, rle\n'), ((15898, 15962), 'numpy.atleast_2d', 'np.atleast_2d', (['[state_colors[state] for state in stateseq_norep]'], {}), '([state_colors[state] for state in stateseq_norep])\n', (15911, 15962), True, 'import numpy as np\n'), ((18200, 18215), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (18209, 18215), False, 'import copy\n'), ((30659, 30687), 'builtins.zip', 'zip', (['minibatch', 'changepoints'], {}), '(minibatch, changepoints)\n', (30662, 30687), False, 'from builtins import map, zip\n'), ((32619, 32659), 'builtins.zip', 'zip', (['self.states_list', 'other.states_list'], {}), '(self.states_list, other.states_list)\n', (32622, 32659), False, 'from builtins import map, zip\n'), ((37465, 37487), 'matplotlib.pyplot.title', 'plt.title', (['"""Durations"""'], {}), "('Durations')\n", (37474, 37487), True, 'import matplotlib.pyplot as plt\n'), ((43791, 43808), 'warnings.warn', 'warn', (['"""untested!"""'], {}), "('untested!')\n", (43795, 43808), False, 'from warnings import warn\n'), ((45196, 45228), 'numpy.exp', 'np.exp', (['(alphal - cmaxes[:, None])'], {}), '(alphal - cmaxes[:, None])\n', (45202, 45228), True, 'import numpy as np\n'), ((50302, 50330), 'future.utils.iteritems', 'iteritems', (['self.trans_distns'], {}), '(self.trans_distns)\n', (50311, 50330), False, 'from future.utils import iteritems, itervalues\n'), ((50572, 50605), 'future.utils.iteritems', 'iteritems', (['self.init_state_distns'], {}), '(self.init_state_distns)\n', (50581, 50605), False, 'from future.utils import iteritems, itervalues\n'), ((50873, 50901), 'future.utils.iteritems', 'iteritems', (['self.trans_distns'], {}), '(self.trans_distns)\n', (50882, 50901), False, 'from future.utils import iteritems, itervalues\n'), ((51217, 51250), 'future.utils.iteritems', 'iteritems', (['self.init_state_distns'], {}), '(self.init_state_distns)\n', (51226, 51250), False, 'from future.utils import iteritems, itervalues\n'), ((52004, 52032), 'future.utils.iteritems', 'iteritems', (['self.trans_distns'], {}), '(self.trans_distns)\n', (52013, 52032), False, 'from future.utils import iteritems, itervalues\n'), ((52367, 52400), 'future.utils.iteritems', 'iteritems', (['self.init_state_distns'], {}), '(self.init_state_distns)\n', (52376, 52400), False, 'from future.utils import iteritems, itervalues\n'), ((3511, 3561), 'numpy.bincount', 'np.bincount', (['s.stateseq'], {'minlength': 'self.num_states'}), '(s.stateseq, minlength=self.num_states)\n', (3522, 3561), True, 'import numpy as np\n'), ((3894, 3953), 'numpy.bincount', 'np.bincount', (['s.stateseq[nan_idx]'], {'minlength': 'self.num_states'}), '(s.stateseq[nan_idx], minlength=self.num_states)\n', (3905, 3953), True, 'import numpy as np\n'), ((4063, 4096), 'builtins.zip', 'zip', (['nan_idx', 's.stateseq[nan_idx]'], {}), '(nan_idx, s.stateseq[nan_idx])\n', (4066, 4096), False, 'from builtins import map, zip\n'), ((8009, 8033), 'numpy.ones', 'np.ones', (['self.num_states'], {}), '(self.num_states)\n', (8016, 8033), True, 'import numpy as np\n'), ((9012, 9050), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'fig_size'}), '(figsize=fig_size, **kwargs)\n', (9022, 9050), True, 'import matplotlib.pyplot as plt\n'), ((9082, 9120), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'fig_size'}), '(figsize=fig_size, **kwargs)\n', (9092, 9120), True, 'import matplotlib.pyplot as plt\n'), ((9843, 9853), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (9851, 9853), True, 'import matplotlib.pyplot as plt\n'), ((11090, 11099), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11097, 11099), True, 'import matplotlib.pyplot as plt\n'), ((11677, 11686), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11684, 11686), True, 'import matplotlib.pyplot as plt\n'), ((12512, 12521), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (12519, 12521), True, 'import matplotlib.pyplot as plt\n'), ((12720, 12748), 'builtins.zip', 'zip', (['self.obs_distns', 'usages'], {}), '(self.obs_distns, usages)\n', (12723, 12748), False, 'from builtins import map, zip\n'), ((13456, 13476), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""tab20"""'], {}), "('tab20')\n", (13467, 13476), False, 'from matplotlib import cm\n'), ((14049, 14083), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'self.num_states'], {}), '(0, 1, self.num_states)\n', (14060, 14083), True, 'import numpy as np\n'), ((14728, 14737), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (14735, 14737), True, 'import matplotlib.pyplot as plt\n'), ((15062, 15072), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (15070, 15072), True, 'import matplotlib.pyplot as plt\n'), ((15375, 15384), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (15382, 15384), True, 'import matplotlib.pyplot as plt\n'), ((15858, 15886), 'numpy.array', 'np.array', (['[datamin, datamax]'], {}), '([datamin, datamax])\n', (15866, 15886), True, 'import numpy as np\n'), ((16475, 16533), 'numpy.array', 'np.array', (['[state_colors[state] for state in stateseq[:-1]]'], {}), '([state_colors[state] for state in stateseq[:-1]])\n', (16483, 16533), True, 'import numpy as np\n'), ((16895, 16919), 'matplotlib.collections.LineCollection', 'LineCollection', (['segments'], {}), '(segments)\n', (16909, 16919), False, 'from matplotlib.collections import LineCollection\n'), ((32879, 32902), 'builtins.zip', 'zip', (['s.stateseq', 's.data'], {}), '(s.stateseq, s.data)\n', (32882, 32902), False, 'from builtins import map, zip\n'), ((45630, 45660), 'scipy.special.logsumexp', 'logsumexp', (['alphal[:-k]'], {'axis': '(1)'}), '(alphal[:-k], axis=1)\n', (45639, 45660), False, 'from scipy.special import logsumexp\n'), ((2073, 2118), 'pyhsmm.internals.initial_state.UniformInitialState', 'initial_state.UniformInitialState', ([], {'model': 'self'}), '(model=self)\n', (2106, 2118), False, 'from pyhsmm.internals import hmm_states, hsmm_states, hsmm_inb_states, initial_state, transitions\n'), ((6006, 6036), 'scipy.special.logsumexp', 'logsumexp', (['alphal[:-k]'], {'axis': '(1)'}), '(alphal[:-k], axis=1)\n', (6015, 6036), False, 'from scipy.special import logsumexp\n'), ((6459, 6512), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'num_procs', 'backend': '"""multiprocessing"""'}), "(n_jobs=num_procs, backend='multiprocessing')\n", (6467, 6512), False, 'from joblib import Parallel, delayed\n'), ((7615, 7637), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (7634, 7637), False, 'import operator\n'), ((9687, 9734), 'builtins.zip', 'zip', (['self.states_list', 'stateseq_axs', 'self.datas'], {}), '(self.states_list, stateseq_axs, self.datas)\n', (9690, 9734), False, 'from builtins import map, zip\n'), ((10380, 10403), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[:sz, :]'], {}), '(gs[:sz, :])\n', (10391, 10403), True, 'import matplotlib.pyplot as plt\n'), ((10539, 10553), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(1)', '(2)'], {}), '(1, 2)\n', (10547, 10553), False, 'from matplotlib.gridspec import GridSpec, GridSpecFromSubplotSpec\n'), ((10673, 10691), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0]'], {}), '(gs[0])\n', (10684, 10691), True, 'import matplotlib.pyplot as plt\n'), ((19022, 19075), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'num_procs', 'backend': '"""multiprocessing"""'}), "(n_jobs=num_procs, backend='multiprocessing')\n", (19030, 19075), False, 'from joblib import Parallel, delayed\n'), ((22220, 22273), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'num_procs', 'backend': '"""multiprocessing"""'}), "(n_jobs=num_procs, backend='multiprocessing')\n", (22228, 22273), False, 'from joblib import Parallel, delayed\n'), ((28334, 28385), 'numpy.array', 'np.array', (['[s.stateseq[0] for s in self.states_list]'], {}), '([s.stateseq[0] for s in self.states_list])\n', (28342, 28385), True, 'import numpy as np\n'), ((31606, 31629), 'builtins.zip', 'zip', (['data', 'changepoints'], {}), '(data, changepoints)\n', (31609, 31629), False, 'from builtins import map, zip\n'), ((45422, 45470), 'numpy.linalg.matrix_power', 'np.linalg.matrix_power', (['s.hmm_trans_matrix', 'step'], {}), '(s.hmm_trans_matrix, step)\n', (45444, 45470), True, 'import numpy as np\n'), ((48674, 48694), 'copy.deepcopy', 'copy.deepcopy', (['distn'], {}), '(distn)\n', (48687, 48694), False, 'import copy\n'), ((49420, 49462), 'copy.deepcopy', 'copy.deepcopy', (['self._trans_distn_prototype'], {}), '(self._trans_distn_prototype)\n', (49433, 49462), False, 'import copy\n'), ((49546, 49593), 'copy.deepcopy', 'copy.deepcopy', (['self._init_state_distn_prototype'], {}), '(self._init_state_distn_prototype)\n', (49559, 49593), False, 'import copy\n'), ((3615, 3640), 'builtins.zip', 'zip', (['s.obs_distns', 'counts'], {}), '(s.obs_distns, counts)\n', (3618, 3640), False, 'from builtins import map, zip\n'), ((4007, 4032), 'builtins.zip', 'zip', (['s.obs_distns', 'counts'], {}), '(s.obs_distns, counts)\n', (4010, 4032), False, 'from builtins import map, zip\n'), ((4986, 5003), 'numpy.ones', 'np.ones', (['padshape'], {}), '(padshape)\n', (4993, 5003), True, 'import numpy as np\n'), ((5798, 5842), 'numpy.linalg.matrix_power', 'np.linalg.matrix_power', (['s.trans_matrix', 'step'], {}), '(s.trans_matrix, step)\n', (5820, 5842), True, 'import numpy as np\n'), ((7826, 7876), 'numpy.bincount', 'np.bincount', (['s.stateseq'], {'minlength': 'self.num_states'}), '(s.stateseq, minlength=self.num_states)\n', (7837, 7876), True, 'import numpy as np\n'), ((10435, 10460), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[sz + idx]'], {}), '(gs[sz + idx])\n', (10446, 10460), True, 'import matplotlib.pyplot as plt\n'), ((10724, 10745), 'matplotlib.pyplot.subplot', 'plt.subplot', (['sgs[idx]'], {}), '(sgs[idx])\n', (10735, 10745), True, 'import matplotlib.pyplot as plt\n'), ((12842, 12877), 'pybasicbayes.distributions.gaussian.Gaussian', 'Gaussian', (['o.mu[:2]', 'o.sigma[:2, :2]'], {}), '(o.mu[:2], o.sigma[:2, :2])\n', (12850, 12877), False, 'from pybasicbayes.distributions.gaussian import Gaussian\n'), ((12920, 12986), 'warnings.warn', 'warn', (['"""High-dimensional distribution may not plot correctly in 2D"""'], {}), "('High-dimensional distribution may not plot correctly in 2D')\n", (12924, 12986), False, 'from warnings import warn\n'), ((27408, 27429), 'numpy.log', 'np.log', (['data.shape[0]'], {}), '(data.shape[0])\n', (27414, 27429), True, 'import numpy as np\n'), ((35851, 35867), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (35861, 35867), True, 'import numpy as np\n'), ((39304, 39358), 'numpy.arange', 'np.arange', (['(1)', '(s.expected_durations[state].shape[0] + 1)'], {}), '(1, s.expected_durations[state].shape[0] + 1)\n', (39313, 39358), True, 'import numpy as np\n'), ((39833, 39887), 'numpy.arange', 'np.arange', (['(1)', '(s.expected_durations[state].shape[0] + 1)'], {}), '(1, s.expected_durations[state].shape[0] + 1)\n', (39842, 39887), True, 'import numpy as np\n'), ((40629, 40683), 'numpy.arange', 'np.arange', (['(1)', '(s.expected_durations[state].shape[0] + 1)'], {}), '(1, s.expected_durations[state].shape[0] + 1)\n', (40638, 40683), True, 'import numpy as np\n'), ((51649, 51678), 'future.utils.itervalues', 'itervalues', (['self.trans_distns'], {}), '(self.trans_distns)\n', (51659, 51678), False, 'from future.utils import iteritems, itervalues\n'), ((51766, 51800), 'future.utils.itervalues', 'itervalues', (['self.init_state_distns'], {}), '(self.init_state_distns)\n', (51776, 51800), False, 'from future.utils import iteritems, itervalues\n'), ((6534, 6579), 'joblib.delayed', 'delayed', (['parallel._get_predictive_likelihoods'], {}), '(parallel._get_predictive_likelihoods)\n', (6541, 6579), False, 'from joblib import Parallel, delayed\n'), ((7688, 7710), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (7707, 7710), False, 'import operator\n'), ((19097, 19136), 'joblib.delayed', 'delayed', (['parallel._get_sampled_stateseq'], {}), '(parallel._get_sampled_stateseq)\n', (19104, 19136), False, 'from joblib import Parallel, delayed\n'), ((19295, 19329), 'pyhsmm.util.general.list_split', 'list_split', (['states_list', 'num_procs'], {}), '(states_list, num_procs)\n', (19305, 19329), False, 'from pyhsmm.util.general import list_split\n'), ((22295, 22323), 'joblib.delayed', 'delayed', (['parallel._get_stats'], {}), '(parallel._get_stats)\n', (22302, 22323), False, 'from joblib import Parallel, delayed\n'), ((22433, 22456), 'pyhsmm.util.general.list_split', 'list_split', (['states_list'], {}), '(states_list)\n', (22443, 22456), False, 'from pyhsmm.util.general import list_split\n'), ((45536, 45557), 'numpy.log', 'np.log', (['scaled_alphal'], {}), '(scaled_alphal)\n', (45542, 45557), True, 'import numpy as np\n'), ((3847, 3863), 'pybasicbayes.util.stats.atleast_2d', 'atleast_2d', (['data'], {}), '(data)\n', (3857, 3863), False, 'from pybasicbayes.util.stats import atleast_2d\n'), ((5912, 5933), 'numpy.log', 'np.log', (['scaled_alphal'], {}), '(scaled_alphal)\n', (5918, 5933), True, 'import numpy as np\n'), ((16757, 16801), 'numpy.hstack', 'np.hstack', (['(ts[:, None], scalarseq[:, None])'], {}), '((ts[:, None], scalarseq[:, None]))\n', (16766, 16801), True, 'import numpy as np\n')]
|
import os
import tensorflow as tf
import numpy as np
import quaternion
import datetime
import time
def test_linspace():
# tf ops must take float variables
# better use np.linspace instead
x = tf.linspace(0., 3., 4)
print("linspace", x)
def test_gather():
coords = tf.tile(tf.expand_dims(tf.linspace(0., 10., 11), 1), (1, 3))
# print(coords)
indices = tf.cast(tf.linspace(0., 10., 6), tf.int32)
extracted = tf.gather(coords, indices)
# print(extracted)
assert (np.isclose(extracted[:, 0].numpy(), indices.numpy()).all())
print("!!! test_gather passed")
def test_pad():
img = tf.ones((4, 5, 3), dtype=tf.float32)
# print("original channel 0", img[:, :, 0])
paddings = tf.constant([[1, 1], [1, 1], [0, 0]])
pad = tf.pad(img, paddings, "CONSTANT")
# print("paddings", paddings)
# print("padded shape:", pad.shape)
print("padded channel 0", pad[:, :, 1])
def test_rotation_vector():
quat = quaternion.from_float_array(np.array([np.cos(np.pi/3), 0, np.sin(np.pi/3), 0]))
print("quaterion angle pi*2/3 about y-axis", quat)
rvec = quaternion.as_rotation_vector(quat)
print("rotation vector:", rvec)
assert (np.isclose(np.linalg.norm(rvec), np.pi*2/3))
print("!!! test_rotation_vector passed")
def test_time():
nowtime = datetime.datetime.now()
print("nowtime", nowtime)
print("formatted time", nowtime.strftime("%m%d_%H%M%S"))
print("asctime", time.asctime())
def test_casting():
data = "1.1"
try:
data = int(data)
except Exception as e:
print(e)
print(type(e))
print(str(e))
def test():
np.set_printoptions(precision=3, suppress=True)
test_linspace()
test_gather()
test_pad()
test_rotation_vector()
test_time()
test_casting()
if __name__ == "__main__":
test()
|
[
"time.asctime",
"tensorflow.ones",
"numpy.set_printoptions",
"tensorflow.linspace",
"tensorflow.gather",
"tensorflow.pad",
"tensorflow.constant",
"numpy.sin",
"numpy.linalg.norm",
"quaternion.as_rotation_vector",
"numpy.cos",
"datetime.datetime.now"
] |
[((206, 230), 'tensorflow.linspace', 'tf.linspace', (['(0.0)', '(3.0)', '(4)'], {}), '(0.0, 3.0, 4)\n', (217, 230), True, 'import tensorflow as tf\n'), ((442, 468), 'tensorflow.gather', 'tf.gather', (['coords', 'indices'], {}), '(coords, indices)\n', (451, 468), True, 'import tensorflow as tf\n'), ((628, 664), 'tensorflow.ones', 'tf.ones', (['(4, 5, 3)'], {'dtype': 'tf.float32'}), '((4, 5, 3), dtype=tf.float32)\n', (635, 664), True, 'import tensorflow as tf\n'), ((728, 765), 'tensorflow.constant', 'tf.constant', (['[[1, 1], [1, 1], [0, 0]]'], {}), '([[1, 1], [1, 1], [0, 0]])\n', (739, 765), True, 'import tensorflow as tf\n'), ((776, 809), 'tensorflow.pad', 'tf.pad', (['img', 'paddings', '"""CONSTANT"""'], {}), "(img, paddings, 'CONSTANT')\n", (782, 809), True, 'import tensorflow as tf\n'), ((1115, 1150), 'quaternion.as_rotation_vector', 'quaternion.as_rotation_vector', (['quat'], {}), '(quat)\n', (1144, 1150), False, 'import quaternion\n'), ((1322, 1345), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1343, 1345), False, 'import datetime\n'), ((1654, 1701), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)', 'suppress': '(True)'}), '(precision=3, suppress=True)\n', (1673, 1701), True, 'import numpy as np\n'), ((391, 416), 'tensorflow.linspace', 'tf.linspace', (['(0.0)', '(10.0)', '(6)'], {}), '(0.0, 10.0, 6)\n', (402, 416), True, 'import tensorflow as tf\n'), ((1210, 1230), 'numpy.linalg.norm', 'np.linalg.norm', (['rvec'], {}), '(rvec)\n', (1224, 1230), True, 'import numpy as np\n'), ((1458, 1472), 'time.asctime', 'time.asctime', ([], {}), '()\n', (1470, 1472), False, 'import time\n'), ((311, 337), 'tensorflow.linspace', 'tf.linspace', (['(0.0)', '(10.0)', '(11)'], {}), '(0.0, 10.0, 11)\n', (322, 337), True, 'import tensorflow as tf\n'), ((1007, 1024), 'numpy.cos', 'np.cos', (['(np.pi / 3)'], {}), '(np.pi / 3)\n', (1013, 1024), True, 'import numpy as np\n'), ((1027, 1044), 'numpy.sin', 'np.sin', (['(np.pi / 3)'], {}), '(np.pi / 3)\n', (1033, 1044), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
import multiprocessing
from multiprocessing import Process, Manager, Queue
import math
from PyProM.src.data.importing import Import
import sys
import os
from PyProM.src.utility.util_profile import Util_Profile
from PyProM.src.utility.util_multiprocessing import Util_Multiprocessing
import time
from functools import wraps
def timefn(fn):
@wraps(fn)
def measure_time(*args, **kwargs):
t1 = time.time()
result = fn(*args, **kwargs)
t2 = time.time()
print("@timefn: {} took {} seconds".format(fn.__name__, t2-t1))
return result
return measure_time
timefn = Util_Profile.timefn
class Eventlog(pd.DataFrame):
"""docstring for Eventlog"""
def __init__(self, *args, **kwargs):
super(Eventlog, self).__init__(*args, **kwargs)
self._columns = []
@property
def _constructor(self):
return Eventlog
@classmethod
def from_xes(cls, path):
_import = Import(path, format='xes')
dict_eventlog = _import.eventlog
if isinstance(dict_eventlog, dict):
print("import dict and produce eventlog")
df = Eventlog.from_dict(dict_eventlog)
return df
@classmethod
def from_txt(cls, path, sep='\t', encoding=None, **kwargs):
if 'dtype' in kwargs:
dtype = kwargs['dtype']
else:
dtype = None
if 'index_col' in kwargs:
index_col = kwargs['index_col']
else:
index_col=False
df = pd.read_csv(path, sep = sep, index_col = index_col, dtype=dtype, encoding=encoding)
return Eventlog(df)
"""
def __call__(self, path, format='xes'):
if format == 'xes':
_import = Import(path, format='xes')
dict_eventlog = _import.eventlog
return self.dict_to_dataframe(dict_eventlog)
if format == 'txt':
return self.csv_to_dataframe(path)
"""
@timefn
def assign_caseid(self, *args):
count = 0
for arg in args:
if count == 0:
self['CASE_ID'] = self[arg].apply(str)
else:
self['CASE_ID'] += '_' + self[arg].apply(str)
#del self[arg]
count +=1
self._columns.append('CASE_ID')
return self
@timefn
def assign_activity(self, *args):
count = 0
for arg in args:
if count == 0:
self['Activity'] = self[arg].apply(str)
else:
self['Activity'] += '_' + self[arg].apply(str)
#del self[arg]
count +=1
self._columns.append('Activity')
return self
@timefn
def assign_resource(self, *args):
count = 0
for arg in args:
if count == 0:
self['Resource'] = self[arg].astype(str)
else:
self['Resource'] += '_' + self[arg].astype(str)
#del self[arg]
count +=1
self._columns.append('Resource')
return self
@timefn
def assign_timestamp(self, name, new_name = 'TIMESTAMP', _format = '%Y/%m/%d %H:%M:%S', errors='ignore'):
print(_format)
self[name] = pd.to_datetime(self[name], format = _format, errors=errors)
self.rename(columns={name: new_name}, inplace=True)
#self.loc[pd.isna(self[name]),name] = '-'
self._columns.append(new_name)
return self
def assign_attr(self, **kwargs):
"""
이 함수는, ~~~~다.
#할일: 컬럼명만 바꾸는 것으로!
:param kwargs: old_col=데이터에 포함된 컬럼명, new_col=생성한 이벤트로그에 지정할 컬럼명
:return: 이벤트로그
"""
if 'old_col' in kwargs:
old_col = kwargs['old_col']
if 'new_col' in kwargs:
new_col = kwargs['new_col']
else:
new_col = kwargs['old_col']
self[new_col] = self[old_col]
self._columns.append(new_col)
del self[old_col]
self._columns.append(new_col)
return self
def assign_cluster(self, *args):
count = 0
for arg in args:
if count == 0:
self['Cluster'] = self[arg].astype(str)
else:
self['Cluster'] += '_' + self[arg].astype(str)
#del self[arg]
count +=1
self._columns.append('Cluster')
return self
def sort(self, by=['CASE_ID']):
self = self.sort_values(by)
return self
def clear_columns(self, *args, **kwargs):
if 'extra' in kwargs:
extra = kwargs['extra']
else:
extra = []
self = self[self._columns]
return self
def join_columns(self, col_name, *args):
if len(args) < 2:
print("join_columns requires at least 2 columns")
count = 0
tmp = self.copy(deep=True)
for arg in args:
if count == 0:
self[col_name] = tmp[arg].astype(str)
else:
self[col_name] += '/' + tmp[arg].astype(str)
#del self[arg]
count +=1
return self
"""
utility functions
"""
def get_event_trace(self, workers, value = 'Activity'):
output = self.parallelize(self._get_event_trace, workers, value)
event_trace = Util_Multiprocessing.join_dict(output)
return event_trace
def _get_event_trace(self, eventlog, x, value='Activity'):
event_trace = dict()
count = 0
for instance in eventlog.itertuples():
index = instance.Index
if value == 'Activity':
ai = eventlog.get_activity_by_index(index)
elif value == 'Resource':
ai = eventlog.get_resource_by_index(index)
elif value == 'TIMESTAMP':
ai = eventlog.get_timestamp_by_index(index)
else:
ai = eventlog.get_col_value_by_index(value, index)
if index == 0:
event_trace[instance.CASE_ID] = [ai]
continue
caseid = eventlog.get_caseid_by_index(index-1)
if instance.CASE_ID == caseid:
event_trace[instance.CASE_ID].append(ai)
else:
event_trace[instance.CASE_ID] = [ai]
print("Finish")
x.append(event_trace)
def _get_trace_count(self, event_trace):
trace_count = dict()
traces = event_trace.values()
for trace in traces:
trace = tuple(trace)
if trace not in trace_count:
trace_count[trace] = 0
trace_count[trace] += 1
return trace_count
def get_caseids(self):
unique_caseids = self['CASE_ID'].unique()
return unique_caseids
def get_activities(self):
unique_activities = self['Activity'].unique()
return unique_activities
def get_resources(self):
unique_resources = self['Resource'].unique()
return unique_resources
def get_timestamps(self):
unique_timestamps = self['TIMESTAMP'].unique()
return unique_timestamps
#특정 col의 unique한 값을 리스트 형태로 리턴
def get_col_values(self,col):
return list(set(self[col]))
def get_first_caseid(self):
return self['CASE_ID'][0]
def get_caseid_by_index(self,index):
return self['CASE_ID'][index]
def get_resource_by_index(self, index):
return self['Resource'][index]
def get_activity_by_index(self, index):
return self['Activity'][index]
def get_timestamp_by_index(self, index):
return self['TIMESTAMP'][index]
def get_col_value_by_index(self, col, index):
return self[col][index]
#특정 col의 특정 value를 포함하는 row를 리턴
def get_col_value(self, col, value):
value_df = self.loc[self[col]==value]
value_df.name = value
return value_df
def change_col_value(self, col, old_val, new_val):
self.loc[self[col]==old_val, col] = new_val
return self
def col_val_to_numeric(self, col):
"""
To make a chart using bokeh, x values and y values must be numeric.
Accordingly, change column values to numeric so that it can be properly drawn by bokeh
Key arguements
col -- column to be converted to numeric
"""
self.sort_values(by=col, inplace=True)
self.reset_index(drop=True, inplace=True)
indexs = []
i=1
for index, instance in self.iterrows():
if index==0:
indexs.append(i)
continue
value = self[col][index-1]
if instance[col] != value:
i+=1
indexs.append(i)
self.loc[:, 'new_col'] = indexs
return self
def filter(self, criterion, value):
return self.loc[self[criterion] == value, :]
# 특정 col에 특정 value를 포함하는 row를 삭제
def remove_col_value(self, col, value):
return self.loc[self[col] != value]
#eventlog의 event 총 개수를 리턴
def count_event(self):
return len(self.index)
#eventlog 내 case의 개수를 리턴
def count_case(self):
return len(set(self['CASE_ID']))
#특정 col의 unique한 값의 개수를 리턴
def count_col_values(self, col):
return len(set(self[col]))
#모든 col의 unique한 값의 개수를 프린트함
def show_col_counts(self):
columns = self.columns
for col in columns:
print("unique counts of {}: {}".format(col,len(set(self[col]))))
def count_col_case(self, col):
col_case = self.groupby(col).CASE_ID.apply(list).apply(set)
col_case_count = col_case.apply(len)
col_case_count_mean = np.mean(col_case_count)
col_case_count_std = np.std(col_case_count)
print("CLUSTER count: {}".format(col_case_count))
print("CLUSTER count mean: {}".format(col_case_count_mean))
print("CLUSTER count std: {}".format(col_case_count_std))
return col_case_count
def count_duplicate_values(self, eventlog, **kwargs):
"""특정 값이 중복되는 경우 중복횟수의 빈도를 return함
e.g. 1번 중복: 100, 2번 중복: 300
Keyword arguments:
col -- 특정 col이 중복된 것을 확인하고 싶은 경우 (default: Activity)
"""
if 'col' in kwargs:
col = kwargs['col']
traces = eventlog.get_event_trace(workers=4, value=col)
else:
traces = eventlog.get_event_trace(workers=4, value='Activity')
count=0
inv_act_counts = []
for t in traces:
act_count = dict(Counter(traces[t]))
inv_act_count = dict()
for k,v in act_count.items():
if v < 2:
continue
if v in inv_act_count:
inv_act_count[v].append(k)
else:
inv_act_count[v] = [k]
inv_act_counts.append(inv_act_count)
count_result_step = dict()
for inv_act_count in inv_act_counts:
for k in inv_act_count:
if k not in count_result_step:
count_result_step[k] = 1
else:
count_result_step[k] += 1
result = pd.DataFrame(list(count_result_step.items()), columns=['repetition', 'count'])
return result
def count_loops(self, eventlog, **kwargs):
"""step이 연속된 경우를 count함. Step1-->Step1인 경우 1, Step1-->Step1-->Step1인 경우 2, 동시에 동일 device에서 수행되었는지도 계산함
Keyword arguments:
col -- 특정 col이 중복된 것을 확인하고 싶은 경우 (default: Activity)
value -- 특정 값이 연속된 것을 확인하고 싶은 경우 e.g. 'Null'
"""
if 'col' in kwargs:
col = kwargs['col']
traces = eventlog.get_event_trace(workers=4, value=col)
else:
traces = eventlog.get_event_trace(workers=4, value='Activity')
count=0
if 'value' in kwargs:
value = kwargs['value']
else:
value = 'default'
for t, r in zip(traces, resource_traces):
for index, act in enumerate(traces[t]):
if index == len(traces[t]) -1:
continue
if value == 'default':
count+=1
else:
if act == value and traces[t][index+1] == value:
count+=1
print("count_consecutives: {}".format(count))
return count
def describe(self):
print("# events: {}".format(len(self)))
print("# cases: {}".format(len(set(self['CASE_ID']))))
print("# activities: {}".format(len(set(self['Activity']))))
print("# resources: {}".format(len(set(self['Resource']))))
try:
print("average yield: {}".format(np.mean(self['VALUE'])))
except AttributeError:
print("yield not exists")
def split_on_case(self, split):
caseid = self.get_caseids()
sub_cases = []
for d in np.array_split(caseid, split):
sub_cases.append(d)
sub_logs = []
for i in range(len(sub_cases)):
sub_log = self.loc[self['CASE_ID'].isin(sub_cases[i]), :]
sub_log.reset_index(drop=True, inplace=True)
sub_logs.append(sub_log)
return sub_logs
def parallelize(self, func, workers=multiprocessing.cpu_count(), *args):
sublogs = self.split_on_case(workers)
output = Queue()
manager = Manager()
output = manager.list()
# Setup a list of processes that we want to run
processes = [Process(target=func, args=(sublogs[i], output)+args) for i in range(len(sublogs))]
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
return output
#Relation Dictionary(key : AfterActovoty,, value : PreActivity list)
##You need to specify the objective of this function
##Additionally, please try to make the code below more efficient. (Both in terms of performance and visibility)
def relation_dictionary(self, pre_col, aft_col):
relation_set = {}
aft_activity_list = self.get_col_values(pre_col)
for i in aft_activity_list:
relation_set[i] = []
for i in range(len(self)):
relation_set[self[aft_col][i]].append(self[pre_col][i])
return relation_set
if __name__ == '__main__':
"""
eventlog = Eventlog.from_xes('./example/running_example.xes')
print(type(eventlog))
"""
eventlog = Eventlog.from_txt('/Users/GYUNAM/Desktop/LAB/SAMSUNG_PROJECT/IMPLE/input/Sample_data.txt')
eventlog = eventlog.assign_caseid('ROOT_LOT_ID', 'WAFER_ID')
eventlog = eventlog.assign_timestamp('TKIN_TIME', 'TKOUT_TIME')
print(eventlog)
|
[
"pandas.read_csv",
"numpy.std",
"multiprocessing.Manager",
"time.time",
"PyProM.src.utility.util_multiprocessing.Util_Multiprocessing.join_dict",
"numpy.mean",
"pandas.to_datetime",
"PyProM.src.data.importing.Import",
"functools.wraps",
"numpy.array_split",
"multiprocessing.Queue",
"multiprocessing.Process",
"multiprocessing.cpu_count"
] |
[((383, 392), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (388, 392), False, 'from functools import wraps\n'), ((436, 447), 'time.time', 'time.time', ([], {}), '()\n', (445, 447), False, 'import time\n'), ((486, 497), 'time.time', 'time.time', ([], {}), '()\n', (495, 497), False, 'import time\n'), ((910, 936), 'PyProM.src.data.importing.Import', 'Import', (['path'], {'format': '"""xes"""'}), "(path, format='xes')\n", (916, 936), False, 'from PyProM.src.data.importing import Import\n'), ((1359, 1438), 'pandas.read_csv', 'pd.read_csv', (['path'], {'sep': 'sep', 'index_col': 'index_col', 'dtype': 'dtype', 'encoding': 'encoding'}), '(path, sep=sep, index_col=index_col, dtype=dtype, encoding=encoding)\n', (1370, 1438), True, 'import pandas as pd\n'), ((2703, 2760), 'pandas.to_datetime', 'pd.to_datetime', (['self[name]'], {'format': '_format', 'errors': 'errors'}), '(self[name], format=_format, errors=errors)\n', (2717, 2760), True, 'import pandas as pd\n'), ((4375, 4413), 'PyProM.src.utility.util_multiprocessing.Util_Multiprocessing.join_dict', 'Util_Multiprocessing.join_dict', (['output'], {}), '(output)\n', (4405, 4413), False, 'from PyProM.src.utility.util_multiprocessing import Util_Multiprocessing\n'), ((8022, 8045), 'numpy.mean', 'np.mean', (['col_case_count'], {}), '(col_case_count)\n', (8029, 8045), True, 'import numpy as np\n'), ((8069, 8091), 'numpy.std', 'np.std', (['col_case_count'], {}), '(col_case_count)\n', (8075, 8091), True, 'import numpy as np\n'), ((10629, 10658), 'numpy.array_split', 'np.array_split', (['caseid', 'split'], {}), '(caseid, split)\n', (10643, 10658), True, 'import numpy as np\n'), ((10926, 10953), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (10951, 10953), False, 'import multiprocessing\n'), ((11014, 11021), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (11019, 11021), False, 'from multiprocessing import Process, Manager, Queue\n'), ((11034, 11043), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (11041, 11043), False, 'from multiprocessing import Process, Manager, Queue\n'), ((11135, 11189), 'multiprocessing.Process', 'Process', ([], {'target': 'func', 'args': '((sublogs[i], output) + args)'}), '(target=func, args=(sublogs[i], output) + args)\n', (11142, 11189), False, 'from multiprocessing import Process, Manager, Queue\n'), ((10458, 10480), 'numpy.mean', 'np.mean', (["self['VALUE']"], {}), "(self['VALUE'])\n", (10465, 10480), True, 'import numpy as np\n')]
|
"""
Functions for interacting with the BEAST model
"""
import numpy as np
import h5py
from tqdm import tqdm
__all__ = ["read_lnp_data", "read_noise_data", "read_sed_data", "get_lnp_grid_vals"]
def read_lnp_data(filename, nstars=None, shift_lnp=True):
"""
Read in the sparse lnp for all the stars in the hdf5 file
Parameters
----------
filename : string
name of the file with the sparse lnp values
nstars : int (default=None)
if you want to check that the number of lnp values is correct, set this
to the number of stars expected in the file
shift_lnp : boolean (default=True)
if True, shift lnp values to have a max of 0.0
Returns
-------
lnp_data : dictonary
contains arrays of the lnp values and indices to the BEAST model grid
"""
with h5py.File(filename, "r") as lnp_hdf:
# get keyword names for the stars (as opposed to filter info)
star_key_list = [sname for sname in lnp_hdf.keys() if "star" in sname]
tot_stars = len(star_key_list)
if nstars is not None:
if tot_stars != nstars:
raise ValueError(
"Error: number of stars not equal between nstars image and lnp file"
)
# initialize arrays
# - find the lengths of the sparse likelihoods
lnp_sizes = [lnp_hdf[sname]["lnp"][()].shape[0] for sname in star_key_list]
# - set arrays to the maximum size
lnp_vals = np.full((np.max(lnp_sizes), tot_stars), -np.inf)
lnp_indxs = np.full((np.max(lnp_sizes), tot_stars), np.nan)
# loop over all the stars (groups)
for k, sname in enumerate(star_key_list):
lnp_vals[: lnp_sizes[k], k] = lnp_hdf[sname]["lnp"][()]
lnp_indxs[: lnp_sizes[k], k] = np.array(lnp_hdf[sname]["idx"][()])
if shift_lnp:
# shift the log(likelihood) values to have a max of 0.0
# ok if the same shift is applied to all stars in a pixel
# avoids numerical issues later when we go to intergrate probs
lnp_vals -= np.max(lnp_vals)
return {"vals": lnp_vals, "indxs": lnp_indxs}
def read_noise_data(
filename,
param_list=["bias", "completeness", "error"],
filter_col=None,
):
"""
Read some or all of the noise model parameters, for one or all of the filters
Parameters
----------
filename : string
name of the file with the BEAST observationmodel grid
param_list : list of strings
the set of parameters to extract
filter_col : int (default=None)
if set, only return the data for this column number
Returns
-------
noise_data : dictonary
contains arrays of the noise parameters
"""
noise_data = {}
# open files for reading
with h5py.File(filename, "r") as noise_hdf:
# get beast physicsmodel params
for param in tqdm(param_list, desc="reading noise data"):
if filter_col is None:
noise_data[param] = np.array(noise_hdf[param])
else:
noise_data[param] = noise_hdf[param][:, filter_col]
return noise_data
def read_sed_data(
filename,
param_list=["Av", "Rv", "f_A", "M_ini", "logA", "Z", "distance"],
return_params=False,
):
"""
Read in the beast data needed by all the pixels
Parameters
----------
filename : string
name of the file with the BEAST physicsmodel grid
param_list : list of strings
The set of parameters to extract (default: Av, Rv, f_A, M_ini, logA, Z,
distance). If set to 'all', extract all parameters and model fluxes in
the grid.
return_params : boolean (default=False)
If True, return the list of keywords for all parameters and model fluxes
in the grid. Useful for checking what columns are present.
Returns
-------
Two possible returns depending on param_list input
sed_data : dictonary (param_list input as list of strings)
contains arrays of the requested SED grid parameters
if return_params is True, then also provides
grid_param_list : list of strings (return_params is True)
if param_list is None, return the list of parameter options
"""
sed_data = {}
# open files for reading
with h5py.File(filename, "r") as sed_hdf:
# get the possible list of parameters
grid_param_list = list(sed_hdf["grid"][()].dtype.names)
# return that if the user is so inclined
if return_params:
return grid_param_list + ["seds", "lamb", "filters"]
if param_list == "all":
param_list = grid_param_list
# get parameters
for param in tqdm(param_list, desc="reading sed data"):
# grid parameter
if param in grid_param_list:
sed_data[param] = sed_hdf["grid"][param]
# wavelengths of the filters -or- SED photometry values
elif (param == "lamb") or (param == "seds"):
sed_data[param] = sed_hdf[param][()]
elif param == "filters":
filtstr = sed_hdf["grid"].attrs["filters"]
if isinstance(filtstr, bytes):
filtstr = filtstr.decode()
sed_data[param] = filtstr.split(" ")
else:
raise ValueError("parameter {0} not found in SED grid".format(param))
return sed_data
def get_lnp_grid_vals(sed_data, lnp_data, verbose=False):
"""
Acquire the SED parameter values for the locations where the lnp values
were saved
Parameters
----------
sed_data : dictonary or string
if dictionary: contains arrays of the beast parameters (output from
read_sed_data)
if string: name of the file with the BEAST physicsmodel grid, which will
be used in read_sed_data to get default parameters
lnp_data : dictonary or string
if dictionary: contains arrays of the lnp values and indices to the BEAST
model grid (output from read_lnp_data)
if string: name of the file with the sparse lnp values, which will be
used in read_lnp_data with default parameters
Returns
-------
lnp_grid_vals : dictonary
arrays of the SED grid parameters for the points in the lnp lists
"""
if isinstance(sed_data, str):
sed_data = read_sed_data(sed_data)
if isinstance(lnp_data, str):
lnp_data = read_lnp_data(lnp_data)
# get the keys in beast_data
param_list = sed_data.keys()
# setup the output
lnp_grid_vals = {}
n_lnps, n_stars = lnp_data["indxs"].shape
for param in param_list:
lnp_grid_vals[param] = np.zeros((n_lnps, n_stars), dtype=float)
# loop over the stars and extract the requested BEAST data
for k in tqdm(
range(n_stars), desc="extracting params for each lnP", disable=not verbose
):
lnp_inds = lnp_data["indxs"][:, k]
good_inds = np.isfinite(lnp_inds)
for param in param_list:
lnp_grid_vals[param][good_inds, k] = sed_data[param][
lnp_inds[good_inds].astype(int)
]
return lnp_grid_vals
|
[
"h5py.File",
"tqdm.tqdm",
"numpy.zeros",
"numpy.isfinite",
"numpy.max",
"numpy.array"
] |
[((833, 857), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (842, 857), False, 'import h5py\n'), ((2838, 2862), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (2847, 2862), False, 'import h5py\n'), ((2939, 2982), 'tqdm.tqdm', 'tqdm', (['param_list'], {'desc': '"""reading noise data"""'}), "(param_list, desc='reading noise data')\n", (2943, 2982), False, 'from tqdm import tqdm\n'), ((4355, 4379), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (4364, 4379), False, 'import h5py\n'), ((4763, 4804), 'tqdm.tqdm', 'tqdm', (['param_list'], {'desc': '"""reading sed data"""'}), "(param_list, desc='reading sed data')\n", (4767, 4804), False, 'from tqdm import tqdm\n'), ((6744, 6784), 'numpy.zeros', 'np.zeros', (['(n_lnps, n_stars)'], {'dtype': 'float'}), '((n_lnps, n_stars), dtype=float)\n', (6752, 6784), True, 'import numpy as np\n'), ((7021, 7042), 'numpy.isfinite', 'np.isfinite', (['lnp_inds'], {}), '(lnp_inds)\n', (7032, 7042), True, 'import numpy as np\n'), ((1820, 1855), 'numpy.array', 'np.array', (["lnp_hdf[sname]['idx'][()]"], {}), "(lnp_hdf[sname]['idx'][()])\n", (1828, 1855), True, 'import numpy as np\n'), ((2118, 2134), 'numpy.max', 'np.max', (['lnp_vals'], {}), '(lnp_vals)\n', (2124, 2134), True, 'import numpy as np\n'), ((1507, 1524), 'numpy.max', 'np.max', (['lnp_sizes'], {}), '(lnp_sizes)\n', (1513, 1524), True, 'import numpy as np\n'), ((1576, 1593), 'numpy.max', 'np.max', (['lnp_sizes'], {}), '(lnp_sizes)\n', (1582, 1593), True, 'import numpy as np\n'), ((3055, 3081), 'numpy.array', 'np.array', (['noise_hdf[param]'], {}), '(noise_hdf[param])\n', (3063, 3081), True, 'import numpy as np\n')]
|
import math
import numpy as np
from collections import namedtuple
import random
from cyclopts import cyclopts_io as cycio
from cyclopts.structured_species import data
"""default values and np.dtypes for points making up parameter space"""
Param = namedtuple('Param', ['val', 'dtype'])
class Point(object):
"""A container class representing a point in parameter space"""
def __init__(self, d=None):
"""Parameters
----------
d : dict, optional
a dictionary with key value pairs of parameter name, parameter
value
"""
d = d if d is not None else {}
# init with dict-specified value else default
for name, param in self._parameters().items():
val = d[name] if name in d else param.val
setattr(self, name, val)
def _parameters(self):
"""subclasses must implement their parameter mapping"""
return NotImplemented
def __eq__(self, other):
return (isinstance(other, self.__class__) \
and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
def mean_enr(rxtr, commod):
"""the mean enrichment for a reactor and commodity"""
return np.mean(data.enr_ranges[rxtr][commod])
def conv_ratio(kind):
"""provides the inventory to process conversion ratio for given support"""
commod, rxtr = data.sup_to_commod[kind], data.sup_to_rxtr[kind]
enr = mean_enr(rxtr, commod)
return data.converters[kind]['inv'](1.0, enr, commod) / \
data.converters[kind]['proc'](1.0, enr, commod)
def region(loc, n_reg=1):
"""assumes loc is on [0, 1]"""
return int(math.floor(n_reg * loc))
def loc_pref(r_loc, s_loc, loc_fidelity=0, n_reg=1):
"""returns the location-based preference between a requester and supplier
for a commodity"""
loc_pref = 0
if loc_fidelity > 0: # at least coarse
rreg = region(r_loc, n_reg=n_reg)
sreg = region(s_loc, n_reg=n_reg)
loc_pref = math.exp(-np.abs(rreg - sreg))
if loc_fidelity > 1: # fine
loc_pref = (loc_pref + math.exp(-np.abs(r_loc - s_loc))) / 2
return loc_pref
def reactor_breakdown(point):
"""Returns
-------
n_uox, n_mox, n_thox : tuple
the number of each reactor type
"""
n_rxtr = point.n_rxtr
fidelity = point.f_fc
r_t_f = point.r_t_f # thermal to fast
r_th_pu = point.r_th_pu # thox to mox
n_uox, n_mox, n_thox = 0, 0, 0
if fidelity == 0: # once through
n_uox = max(n_rxtr, 1)
elif fidelity == 1: # uox + fast mox
n_uox = max(int(round(r_t_f * n_rxtr)), 1)
n_mox = max(n_rxtr - n_uox, 1)
else: # uox + fast mox + fast thox
n_uox = max(int(round(r_t_f * n_rxtr)), 1)
n_thox = max(int(round(r_th_pu * (n_rxtr - n_uox))), 1)
n_mox = max(n_rxtr - n_uox - n_thox, 1)
return n_uox, n_mox, n_thox
def support_breakdown(point):
"""Returns
-------
n_uox, n_mox, n_thox, n_repo : tuple
the number of each support type
"""
n_uox_r, n_mox_r, n_thox_r = reactor_breakdown(point)
n_uox, n_t_mox, n_f_mox, n_f_thox, n_repo = 0, 0, 0, 0, 0
fidelity = point.f_fc
# number thermal supports
if fidelity == 0: # once through - only uox
n_uox = max(int(round(point.r_s_th * n_uox_r)), 1)
else:
n_s_t = max(int(round(point.r_s_th * n_uox_r)), 1)
n_uox = max(int(round(n_s_t / (1.0 + point.r_s_mox_uox))), 1)
n_t_mox = max(n_s_t - n_uox, 1)
# number f_mox supports
if fidelity > 0:
n_f_mox = max(int(round(point.r_s_mox * n_mox_r)), 1)
# number f_thox supports
if fidelity > 1:
n_f_thox = max(int(round(point.r_s_thox * n_thox_r)), 1)
if hasattr(point, 'r_repo'):
n_repo = max(int(round(sum([n_uox, n_t_mox, n_f_mox, n_f_thox]) * \
point.r_repo)), 1)
return n_uox, n_t_mox, n_f_mox, n_f_thox, n_repo
def assembly_roulette(fracs):
"""In the case where this is only one assembly (i.e., low reactor fidelity),
this method chooses the index
Parameters
----------
fracs : list
the assembly distribution, assumed to be normalized
Returns
-------
idx : int
the chosen list index
"""
rnd = random.uniform(0, 1)
cum_sum = 0
for i in range(len(fracs)):
cum_sum += fracs[i]
if rnd <= cum_sum:
return i
def assembly_breakdown(point, kind):
"""Returns
-------
assems : dict
a dictionary from commodity types to the number of assemblies
"""
if kind == data.Reactors.th:
fracs = point.d_th
elif kind == data.Reactors.f_mox:
fracs = point.d_f_mox
elif kind == data.Reactors.f_thox:
fracs = point.d_f_thox
denom = float(sum(fracs))
fracs = [x / denom for x in fracs]
if point.f_rxtr == 0: # one 'assembly', i.e. a batch
ret = [0] * len(fracs)
ret[assembly_roulette(fracs)] = 1
else: # full assemblies
nassems = data.n_assemblies[kind]
ret = [int(round(x * nassems)) for x in fracs]
diff = sum(ret) - nassems
if diff != 0: # adjust largest amount to give exactly nassems
ret[ret.index(max(ret))] -= diff
return {data.Commodities[i]: ret[i] for i in range(len(ret))}
class Reactor(object):
"""A simplified reactor model for Structured Species"""
def __init__(self, kind, point=None, n_assems=None):
self.kind = kind
if point is not None:
self.n_assems = 1 if point.f_rxtr == 0 else data.n_assemblies[kind]
elif n_assems is not None:
self.n_assems = n_assems
self.enr_rnd = random.uniform(0, 1)
self.loc = data.loc()
def enr(self, commod):
# node quantity takes into account relative fissile material
lb, ub = data.enr_ranges[self.kind][commod]
return (ub - lb) * self.enr_rnd + lb
def coeffs(self, commod):
return [1 / data.relative_qtys[self.kind][commod]]
"""Structured Arc Table Members"""
arc_io_name = "Arcs"
arc_tbl_dtype = np.dtype(
[('arcid', np.uint32), ('commod', np.uint32),
('pref_c', np.float32), ('pref_l', np.float32)])
"""Structured Post-Processing Table Members"""
pp_tbl_name = "PostProcess"
pp_tbl_dtype = np.dtype(
[('solnid', ('str', 16)), ('c_pref_flow', np.float64),
('l_pref_flow', np.float64)])
def tbl_descs(io_prefix):
return [
cycio.TblDesc('/'.join([io_prefix, pp_tbl_name]), 'soln', 'solnid'),
]
def _iid_to_prefs(iid, tbl, narcs, strategy='col'):
"""return a numpy array of preferences"""
if strategy == 'grp':
return tbl.read(field='pref_c'), tbl.read(field='pref_l')
# otherwise, do column strat
c_ret = np.zeros(narcs)
l_ret = np.zeros(narcs)
rows = cycio.uuid_rows(tbl, iid)
for x in rows:
aid = x['arcid']
c_ret[aid] = x['pref_c']
l_ret[aid] = x['pref_l']
return c_ret, l_ret
def _pp_work(instid, solnids, narcs, sid_to_flows, arc_tbl, strategy='col'):
c_prefs, l_prefs = _iid_to_prefs(instid, arc_tbl, narcs, strategy=strategy)
data = []
for sid, flows in sid_to_flows.items():
c_pref_flow = np.dot(c_prefs, flows)
l_pref_flow = np.dot(l_prefs, flows)
data.append((sid.bytes, c_pref_flow, l_pref_flow))
return data
def post_process(instid, solnids, props, io_managers, sp_name):
"""Perform any post processing on input and output.
Parameters
----------
instid : UUID
UUID of the instance to post process
solnids : tuple of UUIDs
a collection of solution UUIDs corresponding the instid
props : tuple, other
as defined by cyclopts.exchange_family
io_managers : tuple of cyclopts.cyclopts_io.IOManager
iomanager from an input file, iomanager from an output file,
and iomanager from a post-processed file
sp_name : str
the name of the species being post processed
"""
intbls, outtbls, pptbls = (m.tables for m in io_managers)
ingrps, outgrps, ppgrps = (m.groups for m in io_managers)
narcs, sid_to_flows = props
pp_tbl = pptbls[pp_tbl_name]
if arc_io_name in intbls.keys():
arc_tbl = intbls[arc_io_name]
strategy = 'col'
else:
arc_tbl = ingrps[arc_io_name].group()._f_get_child('id_' +
instid.hex)
strategy = 'grp'
data = _pp_work(instid, solnids, narcs, sid_to_flows, arc_tbl,
strategy=strategy)
pp_tbl.append_data(data)
|
[
"numpy.abs",
"random.uniform",
"numpy.dtype",
"numpy.zeros",
"cyclopts.cyclopts_io.uuid_rows",
"math.floor",
"cyclopts.structured_species.data.append",
"numpy.mean",
"collections.namedtuple",
"numpy.dot",
"cyclopts.structured_species.data.loc"
] |
[((249, 286), 'collections.namedtuple', 'namedtuple', (['"""Param"""', "['val', 'dtype']"], {}), "('Param', ['val', 'dtype'])\n", (259, 286), False, 'from collections import namedtuple\n'), ((6197, 6305), 'numpy.dtype', 'np.dtype', (["[('arcid', np.uint32), ('commod', np.uint32), ('pref_c', np.float32), (\n 'pref_l', np.float32)]"], {}), "([('arcid', np.uint32), ('commod', np.uint32), ('pref_c', np.\n float32), ('pref_l', np.float32)])\n", (6205, 6305), True, 'import numpy as np\n'), ((6402, 6500), 'numpy.dtype', 'np.dtype', (["[('solnid', ('str', 16)), ('c_pref_flow', np.float64), ('l_pref_flow', np.\n float64)]"], {}), "([('solnid', ('str', 16)), ('c_pref_flow', np.float64), (\n 'l_pref_flow', np.float64)])\n", (6410, 6500), True, 'import numpy as np\n'), ((1262, 1300), 'numpy.mean', 'np.mean', (['data.enr_ranges[rxtr][commod]'], {}), '(data.enr_ranges[rxtr][commod])\n', (1269, 1300), True, 'import numpy as np\n'), ((4358, 4378), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (4372, 4378), False, 'import random\n'), ((6871, 6886), 'numpy.zeros', 'np.zeros', (['narcs'], {}), '(narcs)\n', (6879, 6886), True, 'import numpy as np\n'), ((6899, 6914), 'numpy.zeros', 'np.zeros', (['narcs'], {}), '(narcs)\n', (6907, 6914), True, 'import numpy as np\n'), ((6926, 6951), 'cyclopts.cyclopts_io.uuid_rows', 'cycio.uuid_rows', (['tbl', 'iid'], {}), '(tbl, iid)\n', (6941, 6951), True, 'from cyclopts import cyclopts_io as cycio\n'), ((1699, 1722), 'math.floor', 'math.floor', (['(n_reg * loc)'], {}), '(n_reg * loc)\n', (1709, 1722), False, 'import math\n'), ((5788, 5808), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (5802, 5808), False, 'import random\n'), ((5829, 5839), 'cyclopts.structured_species.data.loc', 'data.loc', ([], {}), '()\n', (5837, 5839), False, 'from cyclopts.structured_species import data\n'), ((7324, 7346), 'numpy.dot', 'np.dot', (['c_prefs', 'flows'], {}), '(c_prefs, flows)\n', (7330, 7346), True, 'import numpy as np\n'), ((7369, 7391), 'numpy.dot', 'np.dot', (['l_prefs', 'flows'], {}), '(l_prefs, flows)\n', (7375, 7391), True, 'import numpy as np\n'), ((7400, 7450), 'cyclopts.structured_species.data.append', 'data.append', (['(sid.bytes, c_pref_flow, l_pref_flow)'], {}), '((sid.bytes, c_pref_flow, l_pref_flow))\n', (7411, 7450), False, 'from cyclopts.structured_species import data\n'), ((2053, 2072), 'numpy.abs', 'np.abs', (['(rreg - sreg)'], {}), '(rreg - sreg)\n', (2059, 2072), True, 'import numpy as np\n'), ((2152, 2173), 'numpy.abs', 'np.abs', (['(r_loc - s_loc)'], {}), '(r_loc - s_loc)\n', (2158, 2173), True, 'import numpy as np\n')]
|
import pygame
import random
import numpy as np
from collections import deque
import tensorflow as tf # http://blog.topspeedsnail.com/archives/10116
import cv2 # http://blog.topspeedsnail.com/archives/4755
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
SCREEN_SIZE = [320, 400]
BAR_SIZE = [50, 5]
BALL_SIZE = [15, 15]
# 神经网络的输出
MOVE_STAY = [1, 0, 0]
MOVE_LEFT = [0, 1, 0]
MOVE_RIGHT = [0, 0, 1]
class Game(object):
def __init__(self):
pygame.init()
self.clock = pygame.time.Clock()
self.screen = pygame.display.set_mode(SCREEN_SIZE)
pygame.display.set_caption('Simple Game')
self.ball_pos_x = SCREEN_SIZE[0] // 2 - BALL_SIZE[0] / 2
self.ball_pos_y = SCREEN_SIZE[1] // 2 - BALL_SIZE[1] / 2
self.ball_dir_x = -1 # -1 = left 1 = right
self.ball_dir_y = -1 # -1 = up 1 = down
self.ball_pos = pygame.Rect(self.ball_pos_x, self.ball_pos_y, BALL_SIZE[0], BALL_SIZE[1])
self.bar_pos_x = SCREEN_SIZE[0] // 2 - BAR_SIZE[0] // 2
self.bar_pos = pygame.Rect(self.bar_pos_x, SCREEN_SIZE[1] - BAR_SIZE[1], BAR_SIZE[0], BAR_SIZE[1])
# action是MOVE_STAY、MOVE_LEFT、MOVE_RIGHT
# ai控制棒子左右移动;返回游戏界面像素数和对应的奖励。(像素->奖励->强化棒子往奖励高的方向移动)
def step(self, action):
if action == MOVE_LEFT:
self.bar_pos_x = self.bar_pos_x - 2
elif action == MOVE_RIGHT:
self.bar_pos_x = self.bar_pos_x + 2
else:
pass
if self.bar_pos_x < 0:
self.bar_pos_x = 0
if self.bar_pos_x > SCREEN_SIZE[0] - BAR_SIZE[0]:
self.bar_pos_x = SCREEN_SIZE[0] - BAR_SIZE[0]
self.screen.fill(BLACK)
self.bar_pos.left = self.bar_pos_x
pygame.draw.rect(self.screen, WHITE, self.bar_pos)
self.ball_pos.left += self.ball_dir_x * 2
self.ball_pos.bottom += self.ball_dir_y * 3
pygame.draw.rect(self.screen, WHITE, self.ball_pos)
if self.ball_pos.top <= 0 or self.ball_pos.bottom >= (SCREEN_SIZE[1] - BAR_SIZE[1] + 1):
self.ball_dir_y = self.ball_dir_y * -1
if self.ball_pos.left <= 0 or self.ball_pos.right >= (SCREEN_SIZE[0]):
self.ball_dir_x = self.ball_dir_x * -1
reward = 0
if self.bar_pos.top <= self.ball_pos.bottom and (self.bar_pos.left < self.ball_pos.right and self.bar_pos.right > self.ball_pos.left):
reward = 1 # 击中奖励
elif self.bar_pos.top <= self.ball_pos.bottom and (self.bar_pos.left > self.ball_pos.right or self.bar_pos.right < self.ball_pos.left):
reward = -1 # 没击中惩罚
# 获得游戏界面像素
screen_image = pygame.surfarray.array3d(pygame.display.get_surface())
pygame.display.update()
# 返回游戏界面像素和对应的奖励
return reward, screen_image
# learning_rate
LEARNING_RATE = 0.99
# 更新梯度
INITIAL_EPSILON = 1.0
FINAL_EPSILON = 0.05
# 测试观测次数
EXPLORE = 500000
OBSERVE = 50000
# 存储过往经验大小
REPLAY_MEMORY = 500000
BATCH = 100
output = 3 # 输出层神经元数。代表3种操作-MOVE_STAY:[1, 0, 0] MOVE_LEFT:[0, 1, 0] MOVE_RIGHT:[0, 0, 1]
input_image = tf.placeholder("float", [None, 80, 100, 4]) # 游戏像素
action = tf.placeholder("float", [None, output]) # 操作
CHECKPOINT_PATH = r'E:/workspace/ai/gomoku/playground/dqn_example/checkpoints/ckpt'
# 定义CNN-卷积神经网络 参考:http://blog.topspeedsnail.com/archives/10451
def convolutional_neural_network(input_image):
weights = {'w_conv1': tf.Variable(tf.zeros([8, 8, 4, 32])),
'w_conv2': tf.Variable(tf.zeros([4, 4, 32, 64])),
'w_conv3': tf.Variable(tf.zeros([3, 3, 64, 64])),
'w_fc4': tf.Variable(tf.zeros([3456, 784])),
'w_out': tf.Variable(tf.zeros([784, output]))}
biases = {'b_conv1': tf.Variable(tf.zeros([32])),
'b_conv2': tf.Variable(tf.zeros([64])),
'b_conv3': tf.Variable(tf.zeros([64])),
'b_fc4': tf.Variable(tf.zeros([784])),
'b_out': tf.Variable(tf.zeros([output]))}
conv1 = tf.nn.relu(tf.nn.conv2d(input_image, weights['w_conv1'], strides=[1, 4, 4, 1], padding="VALID") + biases['b_conv1'])
conv2 = tf.nn.relu(tf.nn.conv2d(conv1, weights['w_conv2'], strides=[1, 2, 2, 1], padding="VALID") + biases['b_conv2'])
conv3 = tf.nn.relu(tf.nn.conv2d(conv2, weights['w_conv3'], strides=[1, 1, 1, 1], padding="VALID") + biases['b_conv3'])
conv3_flat = tf.reshape(conv3, [-1, 3456])
fc4 = tf.nn.relu(tf.matmul(conv3_flat, weights['w_fc4']) + biases['b_fc4'])
output_layer = tf.matmul(fc4, weights['w_out']) + biases['b_out']
return output_layer
# 深度强化学习入门: https://www.nervanasys.com/demystifying-deep-reinforcement-learning/
# 训练神经网络
def train_neural_network(input_image):
predict_action = convolutional_neural_network(input_image)
argmax = tf.placeholder("float", [None, output])
gt = tf.placeholder("float", [None])
action = tf.reduce_sum(tf.multiply(predict_action, argmax), reduction_indices=1)
cost = tf.reduce_mean(tf.square(action - gt))
optimizer = tf.train.AdamOptimizer(1e-6).minimize(cost)
game = Game()
D = deque()
_, image = game.step(MOVE_STAY)
# 转换为灰度值
image = cv2.cvtColor(cv2.resize(image, (100, 80)), cv2.COLOR_BGR2GRAY)
# 转换为二值
ret, image = cv2.threshold(image, 1, 255, cv2.THRESH_BINARY)
input_image_data = np.stack((image, image, image, image), axis=2)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
saver = tf.train.Saver(max_to_keep=3)
try:
saver.restore(sess, CHECKPOINT_PATH)
except tf.errors.NotFoundError:
print('-1-')
n = 0
epsilon = INITIAL_EPSILON
while True:
action_t = predict_action.eval(feed_dict={input_image: [input_image_data]})[0]
argmax_t = np.zeros([output], dtype=np.int)
if (random.random() <= INITIAL_EPSILON):
maxIndex = random.randrange(output)
else:
maxIndex = np.argmax(action_t)
argmax_t[maxIndex] = 1
if epsilon > FINAL_EPSILON:
epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE
reward, image = game.step(list(argmax_t))
image = cv2.cvtColor(cv2.resize(image, (100, 80)), cv2.COLOR_BGR2GRAY)
ret, image = cv2.threshold(image, 1, 255, cv2.THRESH_BINARY)
image = np.reshape(image, (80, 100, 1))
input_image_data1 = np.append(image, input_image_data[:, :, 0:3], axis=2)
D.append((input_image_data, argmax_t, reward, input_image_data1))
if len(D) > REPLAY_MEMORY:
D.popleft()
if n > OBSERVE:
minibatch = random.sample(D, BATCH)
input_image_data_batch = [d[0] for d in minibatch]
argmax_batch = [d[1] for d in minibatch]
reward_batch = [d[2] for d in minibatch]
input_image_data1_batch = [d[3] for d in minibatch]
gt_batch = []
out_batch = predict_action.eval(feed_dict={input_image: input_image_data1_batch})
for i in range(0, len(minibatch)):
gt_batch.append(reward_batch[i] + LEARNING_RATE * np.max(out_batch[i]))
optimizer.run(feed_dict={gt: gt_batch, argmax: argmax_batch, input_image: input_image_data_batch})
input_image_data = input_image_data1
n = n + 1
if n % 2000 == 0:
saver.save(sess, CHECKPOINT_PATH) # 保存模型
print(n, "epsilon:", epsilon, " ", "action:", maxIndex, " ", "reward:", reward)
train_neural_network(input_image)
|
[
"numpy.argmax",
"random.sample",
"tensorflow.reshape",
"pygame.Rect",
"tensorflow.matmul",
"pygame.display.update",
"tensorflow.multiply",
"tensorflow.nn.conv2d",
"collections.deque",
"pygame.display.set_mode",
"tensorflow.placeholder",
"numpy.append",
"numpy.max",
"numpy.reshape",
"tensorflow.initialize_all_variables",
"pygame.display.set_caption",
"cv2.resize",
"numpy.stack",
"tensorflow.train.Saver",
"pygame.draw.rect",
"tensorflow.Session",
"pygame.init",
"random.random",
"pygame.time.Clock",
"cv2.threshold",
"numpy.zeros",
"pygame.display.get_surface",
"tensorflow.zeros",
"random.randrange",
"tensorflow.square",
"tensorflow.train.AdamOptimizer"
] |
[((3136, 3179), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, 80, 100, 4]'], {}), "('float', [None, 80, 100, 4])\n", (3150, 3179), True, 'import tensorflow as tf\n'), ((3198, 3237), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, output]'], {}), "('float', [None, output])\n", (3212, 3237), True, 'import tensorflow as tf\n'), ((4446, 4475), 'tensorflow.reshape', 'tf.reshape', (['conv3', '[-1, 3456]'], {}), '(conv3, [-1, 3456])\n', (4456, 4475), True, 'import tensorflow as tf\n'), ((4871, 4910), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, output]'], {}), "('float', [None, output])\n", (4885, 4910), True, 'import tensorflow as tf\n'), ((4921, 4952), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None]'], {}), "('float', [None])\n", (4935, 4952), True, 'import tensorflow as tf\n'), ((5183, 5190), 'collections.deque', 'deque', ([], {}), '()\n', (5188, 5190), False, 'from collections import deque\n'), ((5351, 5398), 'cv2.threshold', 'cv2.threshold', (['image', '(1)', '(255)', 'cv2.THRESH_BINARY'], {}), '(image, 1, 255, cv2.THRESH_BINARY)\n', (5364, 5398), False, 'import cv2\n'), ((5423, 5469), 'numpy.stack', 'np.stack', (['(image, image, image, image)'], {'axis': '(2)'}), '((image, image, image, image), axis=2)\n', (5431, 5469), True, 'import numpy as np\n'), ((470, 483), 'pygame.init', 'pygame.init', ([], {}), '()\n', (481, 483), False, 'import pygame\n'), ((506, 525), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (523, 525), False, 'import pygame\n'), ((549, 585), 'pygame.display.set_mode', 'pygame.display.set_mode', (['SCREEN_SIZE'], {}), '(SCREEN_SIZE)\n', (572, 585), False, 'import pygame\n'), ((595, 636), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Simple Game"""'], {}), "('Simple Game')\n", (621, 636), False, 'import pygame\n'), ((903, 976), 'pygame.Rect', 'pygame.Rect', (['self.ball_pos_x', 'self.ball_pos_y', 'BALL_SIZE[0]', 'BALL_SIZE[1]'], {}), '(self.ball_pos_x, self.ball_pos_y, BALL_SIZE[0], BALL_SIZE[1])\n', (914, 976), False, 'import pygame\n'), ((1068, 1155), 'pygame.Rect', 'pygame.Rect', (['self.bar_pos_x', '(SCREEN_SIZE[1] - BAR_SIZE[1])', 'BAR_SIZE[0]', 'BAR_SIZE[1]'], {}), '(self.bar_pos_x, SCREEN_SIZE[1] - BAR_SIZE[1], BAR_SIZE[0],\n BAR_SIZE[1])\n', (1079, 1155), False, 'import pygame\n'), ((1758, 1808), 'pygame.draw.rect', 'pygame.draw.rect', (['self.screen', 'WHITE', 'self.bar_pos'], {}), '(self.screen, WHITE, self.bar_pos)\n', (1774, 1808), False, 'import pygame\n'), ((1924, 1975), 'pygame.draw.rect', 'pygame.draw.rect', (['self.screen', 'WHITE', 'self.ball_pos'], {}), '(self.screen, WHITE, self.ball_pos)\n', (1940, 1975), False, 'import pygame\n'), ((2747, 2770), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (2768, 2770), False, 'import pygame\n'), ((4579, 4611), 'tensorflow.matmul', 'tf.matmul', (['fc4', "weights['w_out']"], {}), "(fc4, weights['w_out'])\n", (4588, 4611), True, 'import tensorflow as tf\n'), ((4983, 5018), 'tensorflow.multiply', 'tf.multiply', (['predict_action', 'argmax'], {}), '(predict_action, argmax)\n', (4994, 5018), True, 'import tensorflow as tf\n'), ((5068, 5090), 'tensorflow.square', 'tf.square', (['(action - gt)'], {}), '(action - gt)\n', (5077, 5090), True, 'import tensorflow as tf\n'), ((5270, 5298), 'cv2.resize', 'cv2.resize', (['image', '(100, 80)'], {}), '(image, (100, 80))\n', (5280, 5298), False, 'import cv2\n'), ((5482, 5494), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5492, 5494), True, 'import tensorflow as tf\n'), ((5572, 5601), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(3)'}), '(max_to_keep=3)\n', (5586, 5601), True, 'import tensorflow as tf\n'), ((2708, 2736), 'pygame.display.get_surface', 'pygame.display.get_surface', ([], {}), '()\n', (2734, 2736), False, 'import pygame\n'), ((3488, 3511), 'tensorflow.zeros', 'tf.zeros', (['[8, 8, 4, 32]'], {}), '([8, 8, 4, 32])\n', (3496, 3511), True, 'import tensorflow as tf\n'), ((3553, 3577), 'tensorflow.zeros', 'tf.zeros', (['[4, 4, 32, 64]'], {}), '([4, 4, 32, 64])\n', (3561, 3577), True, 'import tensorflow as tf\n'), ((3619, 3643), 'tensorflow.zeros', 'tf.zeros', (['[3, 3, 64, 64]'], {}), '([3, 3, 64, 64])\n', (3627, 3643), True, 'import tensorflow as tf\n'), ((3683, 3704), 'tensorflow.zeros', 'tf.zeros', (['[3456, 784]'], {}), '([3456, 784])\n', (3691, 3704), True, 'import tensorflow as tf\n'), ((3744, 3767), 'tensorflow.zeros', 'tf.zeros', (['[784, output]'], {}), '([784, output])\n', (3752, 3767), True, 'import tensorflow as tf\n'), ((3810, 3824), 'tensorflow.zeros', 'tf.zeros', (['[32]'], {}), '([32])\n', (3818, 3824), True, 'import tensorflow as tf\n'), ((3865, 3879), 'tensorflow.zeros', 'tf.zeros', (['[64]'], {}), '([64])\n', (3873, 3879), True, 'import tensorflow as tf\n'), ((3920, 3934), 'tensorflow.zeros', 'tf.zeros', (['[64]'], {}), '([64])\n', (3928, 3934), True, 'import tensorflow as tf\n'), ((3973, 3988), 'tensorflow.zeros', 'tf.zeros', (['[784]'], {}), '([784])\n', (3981, 3988), True, 'import tensorflow as tf\n'), ((4027, 4045), 'tensorflow.zeros', 'tf.zeros', (['[output]'], {}), '([output])\n', (4035, 4045), True, 'import tensorflow as tf\n'), ((4074, 4163), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['input_image', "weights['w_conv1']"], {'strides': '[1, 4, 4, 1]', 'padding': '"""VALID"""'}), "(input_image, weights['w_conv1'], strides=[1, 4, 4, 1], padding\n ='VALID')\n", (4086, 4163), True, 'import tensorflow as tf\n'), ((4204, 4282), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['conv1', "weights['w_conv2']"], {'strides': '[1, 2, 2, 1]', 'padding': '"""VALID"""'}), "(conv1, weights['w_conv2'], strides=[1, 2, 2, 1], padding='VALID')\n", (4216, 4282), True, 'import tensorflow as tf\n'), ((4328, 4406), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['conv2', "weights['w_conv3']"], {'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""'}), "(conv2, weights['w_conv3'], strides=[1, 1, 1, 1], padding='VALID')\n", (4340, 4406), True, 'import tensorflow as tf\n'), ((4498, 4537), 'tensorflow.matmul', 'tf.matmul', (['conv3_flat', "weights['w_fc4']"], {}), "(conv3_flat, weights['w_fc4'])\n", (4507, 4537), True, 'import tensorflow as tf\n'), ((5109, 5138), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(1e-06)'], {}), '(1e-06)\n', (5131, 5138), True, 'import tensorflow as tf\n'), ((5522, 5551), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (5549, 5551), True, 'import tensorflow as tf\n'), ((5926, 5958), 'numpy.zeros', 'np.zeros', (['[output]'], {'dtype': 'np.int'}), '([output], dtype=np.int)\n', (5934, 5958), True, 'import numpy as np\n'), ((6451, 6498), 'cv2.threshold', 'cv2.threshold', (['image', '(1)', '(255)', 'cv2.THRESH_BINARY'], {}), '(image, 1, 255, cv2.THRESH_BINARY)\n', (6464, 6498), False, 'import cv2\n'), ((6520, 6551), 'numpy.reshape', 'np.reshape', (['image', '(80, 100, 1)'], {}), '(image, (80, 100, 1))\n', (6530, 6551), True, 'import numpy as np\n'), ((6585, 6638), 'numpy.append', 'np.append', (['image', 'input_image_data[:, :, 0:3]'], {'axis': '(2)'}), '(image, input_image_data[:, :, 0:3], axis=2)\n', (6594, 6638), True, 'import numpy as np\n'), ((5976, 5991), 'random.random', 'random.random', ([], {}), '()\n', (5989, 5991), False, 'import random\n'), ((6041, 6065), 'random.randrange', 'random.randrange', (['output'], {}), '(output)\n', (6057, 6065), False, 'import random\n'), ((6113, 6132), 'numpy.argmax', 'np.argmax', (['action_t'], {}), '(action_t)\n', (6122, 6132), True, 'import numpy as np\n'), ((6375, 6403), 'cv2.resize', 'cv2.resize', (['image', '(100, 80)'], {}), '(image, (100, 80))\n', (6385, 6403), False, 'import cv2\n'), ((6851, 6874), 'random.sample', 'random.sample', (['D', 'BATCH'], {}), '(D, BATCH)\n', (6864, 6874), False, 'import random\n'), ((7387, 7407), 'numpy.max', 'np.max', (['out_batch[i]'], {}), '(out_batch[i])\n', (7393, 7407), True, 'import numpy as np\n')]
|
import resources as res
import numpy as np
import nltk
class Feature(object):
dataset = None
def __init__(self, dataset):
self.dataset = dataset
def run(self):
array = []
for text in self.dataset:
bigrams = 0
counter = 0
words = nltk.word_tokenize(text)
if len(words) < 3:
array.append(0.0)
continue
for i in range(0, len(words) - 1):
counter+=1
if words[i].lower() in res.__bigrams__ and words[i + 1].lower() in res.__bigrams__[words[i].lower()]:
bigrams += 1
if counter == 0:
array.append(0.0)
else:
array.append(float(bigrams/counter))
return np.matrix(array)
|
[
"numpy.matrix",
"nltk.word_tokenize"
] |
[((800, 816), 'numpy.matrix', 'np.matrix', (['array'], {}), '(array)\n', (809, 816), True, 'import numpy as np\n'), ((307, 331), 'nltk.word_tokenize', 'nltk.word_tokenize', (['text'], {}), '(text)\n', (325, 331), False, 'import nltk\n')]
|
from __future__ import absolute_import
import numpy
import orange, statc
from . import stats
def mean(l):
return float(sum(l))/len(l)
class MA_pearsonCorrelation:
"""
Calling an object of this class computes Pearson correlation of all
attributes against class.
"""
def __call__(self, i, data):
dom2 = orange.Domain([data.domain.attributes[i]], data.domain.classVar)
data2 = orange.ExampleTable(dom2, data)
a,c = data2.toNumpy("A/C")
return numpy.corrcoef(c,a[:,0])[0,1]
class MA_signalToNoise:
"""
Returns signal to noise measurement: difference of means of two classes
divided by the sum of standard deviations for both classes.
Usege similar to MeasureAttribute*.
Standard deviation used for now returns minmally 0.2*|mi|, where mi=0 is adjusted to mi=1
(as in gsea implementation).
Can work only on data with two classes. If there are multiple class, then
relevant class values can be specified on object initialization.
By default the relevant classes are first and second class value
from the domain.
"""
def __init__(self, a=None, b=None):
"""
a and b are choosen class values.
"""
self.a = a
self.b = b
def __call__(self, i, data):
cv = data.domain.classVar
#print data.domain
if self.a == None: self.a = cv.values[0]
if self.b == None: self.b = cv.values[1]
def stdev(l):
return statc.std(l)
def mean(l):
return statc.mean(l)
def stdevm(l):
m = mean(l)
std = stdev(l)
#return minmally 0.2*|mi|, where mi=0 is adjusted to mi=1
return max(std, 0.2*abs(1.0 if m == 0 else m))
def avWCVal(value):
return [ex[i].value for ex in data if ex[-1].value == value and not ex[i].isSpecial() ]
exa = avWCVal(self.a)
exb = avWCVal(self.b)
try:
rval = (mean(exa)-mean(exb))/(stdevm(exa)+stdevm(exb))
return rval
except:
#return some "middle" value -
#TODO rather throw exception?
return 0
class MA_t_test(object):
def __init__(self, a=None, b=None, prob=False):
self.a = a
self.b = b
self.prob = prob
def __call__(self, i, data):
cv = data.domain.classVar
#print data.domain
#for faster computation. to save dragging many attributes along
dom2 = orange.Domain([data.domain[i]], data.domain.classVar)
data = orange.ExampleTable(dom2, data)
i = 0
if self.a == None: self.a = cv.values[0]
if self.b == None: self.b = cv.values[1]
def avWCVal(value):
return [ex[i].value for ex in data if ex[cv] == value and not ex[i].isSpecial() ]
exa = avWCVal(self.a)
exb = avWCVal(self.b)
try:
t, prob = stats.lttest_ind(exa, exb)
return prob if self.prob else t
except:
return 1.0 if self.prob else 0.0
class MA_fold_change(object):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
def __call__(self, i, data):
cv = data.domain.classVar
#print data.domain
#for faster computation. to save dragging many attributes along
dom2 = orange.Domain([data.domain[i]], data.domain.classVar)
data = orange.ExampleTable(dom2, data)
i = 0
if self.a == None: self.a = cv.values[0]
if self.b == None: self.b = cv.values[1]
def avWCVal(value):
return [ex[i].value for ex in data if ex[cv] == value and not ex[i].isSpecial() ]
exa = avWCVal(self.a)
exb = avWCVal(self.b)
try:
return mean(exa)/mean(exb)
except:
return 1
class MA_anova(object):
def __init__(self, prob=False):
self.prob = prob
def __call__(self, i, data):
cv = data.domain.classVar
#print data.domain
#for faster computation. to save dragging many attributes along
dom2 = orange.Domain([data.domain[i]], data.domain.classVar)
data = orange.ExampleTable(dom2, data)
i = 0
def avWCVal(value):
return [ex[i].value for ex in data if ex[cv] == value and not ex[i].isSpecial() ]
data = [avWCVal(val) for val in cv.values]
try:
f, prob = stats.lF_oneway(*tuple(data))
return prob if self.prob else f
except:
return 1.0 if self.prob else 0.0
import numpy as np
import numpy.ma as ma
class ExpressionSignificance_Test(object):
def __new__(cls, data, useAttributeLabels, **kwargs):
self = object.__new__(cls)
if kwargs:
self.__init__(data, useAttributeLabels)
return self.__call__(**kwargs)
else:
return self
def __init__(self, data, useAttributeLabels=False):
self.data = data
self.useAttributeLabels = useAttributeLabels
self.attr_labels, self.data_classes = self._data_info(data)
self.attributes = [attr for attr in self.data.domain.attributes if attr.varType in [orange.VarTypes.Continuous, orange.VarTypes.Discrete]]
self.classes = np.array(self.attr_labels if useAttributeLabels else self.data_classes)
self.keys = range(len(data)) if useAttributeLabels else self.attributes
self.array, _, _ = data.toNumpyMA()
if self.useAttributeLabels:
self.array = ma.transpose(self.array)
# self.dim = 1 if useAttributeLabels else 0
self.dim = 0
def _data_info(self, data):
return [set(attr.attributes.items()) for attr in data.domain.attributes], [ex.getclass() for ex in data] if data.domain.classVar else [None]*len(data)
def test_indices(self, target, classes=None):
classes = self.classes if classes is None else classes
def target_set(target):
if isinstance(target, tuple):
return set([target])
else:
assert(isinstance(target, set))
return target
if self.useAttributeLabels:
if isinstance(target, list):
ind = [[i for i, cl in enumerate(self.classes) if target_set(t).intersection(cl)] for t in target]
else:
target = target_set(target)
ind1 = [i for i, cl in enumerate(self.classes) if target.intersection(cl)]
ind2 = [i for i, cl in enumerate(self.classes) if not target.intersection(cl)]
ind = [ind1, ind2]
else:
if isinstance(target, list):
ind = [ma.nonzero(self.classes == t)[0] for t in target]
else:
if isinstance(target, (basestring, orange.Variable)):
target = set([target])
else:
assert(isinstance(target, set))
target = list(target)
ind1 = [i for i, cl in enumerate(self.classes) if cl in target]
ind2 = [i for i, cl in enumerate(self.classes) if cl not in target]
ind = [ind1, ind2]
return ind
def __call__(self, target):
raise NotImplementedError()
def null_distribution(self, num, *args, **kwargs):
kwargs = dict(kwargs)
advance = lambda: None
if "advance" in kwargs:
advance = kwargs["advance"]
del kwargs["advance"]
results = []
originalClasses = self.classes.copy()
for i in range(num):
np.random.shuffle(self.classes)
results.append(self.__call__(*args, **kwargs))
advance()
self.classes = originalClasses
return results
class ExpressionSignificance_TTest(ExpressionSignificance_Test):
def __call__(self, target):
ind1, ind2 = self.test_indices(target)
t, pval = attest_ind(self.array[ind1, :], self.array[ind2, :], dim=self.dim)
return zip(self.keys, zip(t, pval))
class ExpressionSignificance_FoldChange(ExpressionSignificance_Test):
def __call__(self, target):
ind1, ind2 = self.test_indices(target)
a1, a2 = self.array[ind1, :], self.array[ind2, :]
fold = ma.mean(a1, self.dim)/ma.mean(a2, self.dim)
return zip(self.keys, fold)
class ExpressionSignificance_SignalToNoise(ExpressionSignificance_Test):
def __call__(self, target):
ind1, ind2 = self.test_indices(target)
a1, a2 = self.array[ind1, :], self.array[ind2, :]
stn = (ma.mean(a1, self.dim) - ma.mean(a2, self.dim)) / (ma.sqrt(ma.var(a1, self.dim)) + ma.sqrt(ma.var(a2, self.dim)))
return zip(self.keys, stn)
class ExpressionSignificance_ANOVA(ExpressionSignificance_Test):
def __call__(self, target=None):
if target is not None:
indices = self.test_indices(target)
else:
indices = []
f, prob = aF_oneway(*[self.array[ind, :] for ind in indices], **dict(dim=0))
return zip(self.keys, zip(f, prob))
class ExpressionSignificance_ChiSquare(ExpressionSignificance_Test):
def __call__(self, target):
array = equi_n_discretization(self.array.copy(), intervals=5, dim=0)
ind1, ind2 = self.test_indices(target)
a1, a2 = array[ind1, :], array[ind2, :]
dist1, dist2 = [], []
dist = ma.zeros((array.shape[1], 2, 5))
for i in range(5):
dist1.append(ma.sum(ma.ones(a1.shape) * (a1 == i), 0))
dist2.append(ma.sum(ma.ones(a2.shape) * (a2 == i), 0))
dist[:, 0, i] = dist1[-1]
dist[:, 1, i] = dist2[-1]
return zip(self.keys, achisquare_indtest(np.array(dist), dim=1))
class ExpressionSignificance_Info(ExpressionSignificance_Test):
def __call__(self, target):
array = equi_n_discretization(self.array.copy(), intervals=5, dim=1)
ind1, ind2 = self.test_indices(target)
a1, a2 = array[ind1, :], array[ind2, :]
dist1, dist2 = [], []
dist = ma.zeros((array.shape[1], 2, 5))
for i in range(5):
dist1.append(ma.sum(ma.ones(a1.shape) * (a1 == i), 0))
dist2.append(ma.sum(ma.ones(a2.shape) * (a2 == i), 0))
dist[:, 0, i] = dist1[-1]
dist[:, 1, i] = dist2[-1]
classinfo = entropy(np.array([len(ind1), len(ind2)]))
E = ma.sum(entropy(dist, dim=1) * ma.sum(dist, 1), 1) / ma.sum(ma.sum(dist, 1), 1)
return zip(self.keys, classinfo - E)
class ExpressionSignificance_MannWhitneyu(ExpressionSignificance_Test):
def __call__(self, target):
ind1, ind2 = self.test_indices(target)
a, b = self.array[ind1, :], self.array[ind2, :]
# results = [amannwhitneyu(a[:, i],b[:, i]) for i in range(a.shape[1])]
results = [statc.mannwhitneyu(list(a[:, i]),list(b[:, i])) for i in range(a.shape[1])]
return zip(self.keys, results)
def attest_ind(a, b, dim=None):
""" Return the t-test statistics on arrays a and b over the dim axis.
Returns both the t statistic as well as the p-value
"""
# dim = a.ndim - 1 if dim is None else dim
x1, x2 = ma.mean(a, dim), ma.mean(b, dim)
v1, v2 = ma.var(a, dim), ma.var(b, dim)
n1, n2 = (a.shape[dim], b.shape[dim]) if dim is not None else (a.size, b.size)
df = float(n1+n2-2)
svar = ((n1-1)*v1+(n2-1)*v2) / df
t = (x1-x2)/ma.sqrt(svar*(1.0/n1 + 1.0/n2))
if t.ndim == 0:
return (t, statc.betai(0.5*df,0.5,df/(df+t**2)) if t is not ma.masked and df/(df+t**2) <= 1.0 else ma.masked)
else:
prob = [statc.betai(0.5*df,0.5,df/(df+tsq)) if tsq is not ma.masked and df/(df+tsq) <= 1.0 else ma.masked for tsq in t*t]
return t, prob
def aF_oneway(*args, **kwargs):
dim = kwargs.get("dim", None)
arrays = args
means = [ma.mean(a, dim) for a in arrays]
vars = [ma.var(a, dim) for a in arrays]
lens = [ma.sum(ma.array(ma.ones(a.shape), mask=ma.asarray(a).mask), dim) for a in arrays]
alldata = ma.concatenate(arrays, dim if dim is not None else 0)
bign = ma.sum(ma.array(ma.ones(alldata.shape), mask=alldata.mask), dim)
sstot = ma.sum(alldata ** 2, dim) - (ma.sum(alldata, dim) ** 2) / bign
ssbn = ma.sum([(ma.sum(a, dim) ** 2) / L for a, L in zip(arrays, lens)], dim)
# print ma.sum(alldata, dim) ** 2 / bign, ssbn
ssbn -= ma.sum(alldata, dim) ** 2 / bign
sswn = sstot - ssbn
dfbn = dfnum = float(len(args) - 1.0)
dfwn = bign - len(args) # + 1.0
F = (ssbn / dfbn) / (sswn / dfwn)
if F.ndim == 0 and dfwn.ndim == 0:
return (F,statc.betai(0.5 * dfwn, 0.5 * dfnum, dfwn/float(dfwn+dfnum*F)) if F is not ma.masked and dfwn/float(dfwn+dfnum*F) <= 1.0 \
and dfwn/float(dfwn+dfnum*F) >= 0.0 else ma.masked)
else:
prob = [statc.betai(0.5 * dfden, 0.5 * dfnum, dfden/float(dfden+dfnum*f)) if f is not ma.masked and dfden/float(dfden+dfnum*f) <= 1.0 \
and dfden/float(dfden+dfnum*f) >= 0.0 else ma.masked for dfden, f in zip (dfwn, F)]
return F, prob
def achisquare_indtest(observed, dim=None):
if observed.ndim == 2:
observed = ma.array([observed])
if dim is not None:
dim += 1
if dim is None:
dim = observed.ndim - 2
rowtotal = ma.sum(observed, dim + 1)
coltotal = ma.sum(observed, dim)
total = ma.sum(rowtotal, dim)
ones = ma.array(ma.ones(observed.shape))
expected = ones * rowtotal.reshape(rowtotal.shape[:dim] + (-1, 1))
a = ones * coltotal[..., np.zeros(observed.shape[dim], dtype=int),:]
expected = expected * (a) / total.reshape((-1, 1, 1))
chisq = ma.sum(ma.sum((observed - expected) ** 2 / expected, dim + 1), dim)
return chisq
def equi_n_discretization(array, intervals=5, dim=1):
count = ma.sum(ma.array(ma.ones(array.shape, dtype=int), mask=array.mask), dim)
cut = ma.zeros(len(count), dtype=int)
sarray = ma.sort(array, dim)
r = count % intervals
pointsshape = list(array.shape)
pointsshape[dim] = 1
points = []
for i in range(intervals):
cutend = cut + count / intervals + numpy.ones(len(r)) * (r > i)
if dim == 1:
p = sarray[range(len(cutend)), numpy.array(cutend, dtype=int) -1]
else:
p = sarray[numpy.array(cutend, dtype=int) -1, range(len(cutend))]
points.append(p.reshape(pointsshape))
cut = cutend
darray = ma.array(ma.zeros(array.shape) - 1, mask=array.mask)
darray[ma.nonzero(array <= points[0])] = 0
for i in range(0, intervals):
darray[ma.nonzero((array > points[i]))] = i + 1
return darray
def entropy(array, dim=None):
if dim is None:
array = array.ravel()
dim = 0
n = ma.sum(array, dim)
array = ma.log(array) * array
sum = ma.sum(array, dim)
return (ma.log(n) - sum / n) / ma.log(2.0)
"""\
MA - Plot
=========
Functions for normalization of expression arrays and ploting
MA - Plots
Example::
## Load data from GEO
>>> data = orange.ExampleTable("GDS1210.tab")
## Split data by columns into normal and cancer subsets
>>> cancer, normal = data_split(data, [("disease state", "cancer"), ("disease state", "normal")])
## Convert to numpy MaskedArrays
>>> cancer, normal = cancer.toNumpyMA("A")[0], normal.toNumpyMA("A")[0]
## Merge by averaging
>>> cancer = merge_replicates(cancer)
>>> normal = merge_replicates(normal)
## Plot MA-plot
>>> MA_plot(cancer, normal)
"""
from Orange.orng import orngMisc
from numpy import median
def lowess(x, y, f=2./3., iter=3, progressCallback=None):
""" Lowess taken from Bio.Statistics.lowess, modified to compute pairwise
distances inplace.
lowess(x, y, f=2./3., iter=3) -> yest
Lowess smoother: Robust locally weighted regression.
The lowess function fits a nonparametric regression curve to a scatterplot.
The arrays x and y contain an equal number of elements; each pair
(x[i], y[i]) defines a data point in the scatterplot. The function returns
the estimated (smooth) values of y.
The smoothing span is given by f. A larger value for f will result in a
smoother curve. The number of robustifying iterations is given by iter. The
function will run faster with a smaller number of iterations.
x and y should be numpy float arrays of equal length. The return value is
also a numpy float array of that length.
e.g.
>>> import numpy
>>> x = numpy.array([4, 4, 7, 7, 8, 9, 10, 10, 10, 11, 11, 12, 12, 12,
... 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 16, 16,
... 17, 17, 17, 18, 18, 18, 18, 19, 19, 19, 20, 20, 20, 20,
... 20, 22, 23, 24, 24, 24, 24, 25], numpy.float)
>>> y = numpy.array([2, 10, 4, 22, 16, 10, 18, 26, 34, 17, 28, 14, 20, 24,
... 28, 26, 34, 34, 46, 26, 36, 60, 80, 20, 26, 54, 32, 40,
... 28, 26, 34, 34, 46, 26, 36, 60, 80, 20, 26, 54, 32, 40,
... 32, 40, 50, 42, 56, 76, 84, 36, 46, 68, 32, 48, 52, 56,
... 64, 66, 54, 70, 92, 93, 120, 85], numpy.float)
>>> result = lowess(x, y)
>>> len(result)
50
>>> print "[%0.2f, ..., %0.2f]" % (result[0], result[-1])
[4.85, ..., 84.98]
"""
n = len(x)
r = min(int(numpy.ceil(f*n)), n - 1)
# h = [numpy.sort(numpy.abs(x-x[i]))[r] for i in range(n)]
# h, xtmp = numpy.zeros_like(x), numpy.zeros_like(x)
# for i in range(n):
# xtmp = numpy.abs(x - x[i], xtmp)
# h[i] = numpy.sort(xtmp)[r]
# w = numpy.clip(numpy.abs(([x]-numpy.transpose([x]))/h),0.0,1.0)
dist = [x] - numpy.transpose([x])
dist = numpy.abs(dist, dist)
dist.sort(axis=1)
h = dist[:, r]
del dist
w = [x]-numpy.transpose([x])
w /= h
w = numpy.abs(w, w)
w = numpy.clip(w, 0.0, 1.0, w)
# w = 1-w*w*w
w **= 3
w *= -1
w += 1
# w = w*w*w
w **= 3
yest = numpy.zeros(n)
delta = numpy.ones(n)
milestones = orngMisc.progressBarMilestones(iter*n)
for iteration in range(iter):
for i in xrange(n):
weights = delta * w[:,i]
weights_mul_x = weights * x
b1 = numpy.ma.dot(weights,y)
b2 = numpy.ma.dot(weights_mul_x,y)
A11 = sum(weights)
A12 = sum(weights_mul_x)
A21 = A12
A22 = numpy.ma.dot(weights_mul_x,x)
determinant = A11*A22 - A12*A21
beta1 = (A22*b1-A12*b2) / determinant
beta2 = (A11*b2-A21*b1) / determinant
yest[i] = beta1 + beta2*x[i]
if progressCallback and (iteration*n + i) in milestones:
progressCallback((100. * iteration*n + i) / (iter * n))
residuals = y-yest
s = median(abs(residuals))
delta[:] = numpy.clip(residuals/(6*s),-1,1)
delta[:] = 1-delta*delta
delta[:] = delta*delta
return yest
def lowess2(x, y, xest, f=2./3., iter=3, progressCallback=None):
"""Returns estimated values of y in data points xest (or None if estimation fails).
Lowess smoother: Robust locally weighted regression.
The lowess function fits a nonparametric regression curve to a scatterplot.
The arrays x and y contain an equal number of elements; each pair
(x[i], y[i]) defines a data point in the scatterplot. The function returns
the estimated (smooth) values of y.
The smoothing span is given by f. A larger value for f will result in a
smoother curve. The number of robustifying iterations is given by iter. The
function will run faster with a smaller number of iterations.
Taken from <NAME>'s numpyExtn.py, modified for numpy, computes pairwise
distances inplace
"""
x = numpy.asarray(x, 'f')
y = numpy.asarray(y, 'f')
xest = numpy.asarray(xest, 'f')
n = len(x)
nest = len(xest)
r = min(int(numpy.ceil(f*n)),n-1) # radius: num. of points to take into LR
# h = [numpy.sort(numpy.abs(x-x[i]))[r] for i in range(n)] # distance of the r-th point from x[i]
dist = [x] - numpy.transpose([x])
dist = numpy.abs(dist, dist)
dist.sort(axis=1)
h = dist[:, r]
del dist # to free memory
w = [x] - numpy.transpose([x])
w /= h
w = numpy.abs(w, w)
w = numpy.clip(w, 0.0, 1.0, w)
# w = numpy.clip(numpy.abs(([x]-numpy.transpose([x]))/h),0.0,1.0)
w **= 3
w *= -1
w += 1
# w = 1 - w**3 #1-w*w*w
w **= 3
# w = w**3 #w*w*w
# hest = [numpy.sort(numpy.abs(x-xest[i]))[r] for i in range(nest)] # r-th min. distance from xest[i] to x
dist = [x] - numpy.transpose([xest])
dist = numpy.abs(dist, dist)
dist.sort(axis=1)
hest = dist[:, r]
del dist # to free memory
# west = numpy.clip(numpy.abs(([xest]-numpy.transpose([x]))/hest),0.0,1.0) # shape: (len(x), len(xest)
west = [xest]-numpy.transpose([x])
west /= hest
west = numpy.abs(west, west)
west = numpy.clip(west, 0.0, 1.0, west)
# west = 1 - west**3 #1-west*west*west
west **= 3
west *= -1
west += 1
# west = west**3 #west*west*west
west **= 3
yest = numpy.zeros(n,'f')
yest2 = numpy.zeros(nest,'f')
delta = numpy.ones(n,'f')
iter_count = iter*(nest + n) if iter > 1 else nest
milestones = orngMisc.progressBarMilestones(iter_count)
curr_iter = 0
for iteration in range(iter):
# fit xest
for i in range(nest):
weights = delta * west[:,i]
b = numpy.array([numpy.sum(weights*y), numpy.sum(weights*y*x)])
A = numpy.array([[numpy.sum(weights), numpy.sum(weights*x)], [numpy.sum(weights*x), numpy.sum(weights*x*x)]])
beta = numpy.linalg.solve(A, b)
yest2[i] = beta[0] + beta[1]*xest[i]
if progressCallback and curr_iter in milestones:
progressCallback(100. * curr_iter / iter_count)
curr_iter += 1
# fit x (to calculate residuals and delta)
if iter > 1:
for i in range(n):
weights = delta * w[:,i]
b = numpy.array([numpy.sum(weights*y), numpy.sum(weights*y*x)])
A = numpy.array([[numpy.sum(weights), numpy.sum(weights*x)], [numpy.sum(weights*x), numpy.sum(weights*x*x)]])
beta = numpy.linalg.solve(A,b)
yest[i] = beta[0] + beta[1]*x[i]
if progressCallback and curr_iter in milestones:
progressCallback(100. * curr_iter / iter_count)
curr_iter += 1
residuals = y-yest
s = numpy.median(numpy.abs(residuals))
delta = numpy.clip(residuals/(6*s), -1, 1)
delta = 1-delta*delta
delta = delta*delta
return yest2
def attr_group_indices(data, label_groups):
""" Return a two or more lists of indices into `data.domain` based on `label_groups`
Example::
cancer_indices, no_cancer_indices = attr_group_indices(data, [("disease state", "cancer"), ("disease state", "normal")])
"""
ret = []
for key, val in label_groups:
ind = [i for i, attr in enumerate(data.domain.attributes) if attr.attributes.get(key, None) == val]
ret.append(ind)
return ret
def example_group_indices(data, attr, values):
""" Return lists of indices into `data` for each `values` item that matches
the example value at `attr` attribute
Example::
cls_ind1, cls_ind2 = example_group_indices(data, data.domain.classVar, ["Class 1", "Class 2"])
"""
ret = [[] for _ in values]
values_id = dict([(str(value), i) for i, value in enumerate(values)])
for i, ex in enumerate(data):
id = values_id.get(str(ex[attr]), None)
if id is not None:
ret[id].append(i)
return ret
def data_group_split(data, label_groups):
""" Split an `data` example table into two or more based on
contents of iterable `label_groups` containing (key, value)
pairs matching the labels of data attributes.
Example::
cancer, no_cancer = data_group_split(data, [("disease state", "cancer"), ("disease state", "normal")])
"""
ret = []
group_indices = attr_group_indices(data, label_groups)
for indices in group_indices:
attrs = [data.domain[i] for i in indices]
domain = orange.Domain(attrs, data.domain.classVar)
domain.addmetas(data.domain.getmetas())
ret.append(orange.ExampleTable(domain, data))
return ret
def select_indices(data, key, value, axis=1):
""" Return indices into `data` (ExampleTable) along specified `axis`
where:
- if axis == 0 match data[i][key] == value
- if axis == 1 match data.domain[i].attributes[key] == value
Example::
cancer_ind = select_indices(data, key="disease state", value="cancer"), axis=1)
normal_ind = select_indices(data, key="disease state", value=["normal"], axis=1) # value can be a list to specify more then one value
"""
values = value if isinstance(value, list) else [value]
if axis == 0:
groups = example_group_indices(data, key, values)
else:
groups = attr_group_indices(data, [(key, val) for val in values])
return sorted(reduce(set.union, groups, set()))
def select_data(data, key, value, axis=1):
""" Return `data` (ExampleTable) subset along specified `axis` where
where:
- if axis == 0 match data[i][key] == value
- if axis == 1 match data.domain[i].attributes[key] == value
.. note:: This preserves all meta attributes of the domain
Example::
cancer = select_data(data, "disease state", "cancer", axis=1)
normal = select_data(data, "disease state", ["normal"], axis=1) # value can be a list to specify more then one value
"""
indices = select_indices(data, key, value, axis)
if axis == 0:
examples = [data[i] for i in indices]
return orange.ExampleTable(data.domain, examples)
else:
attrs = [data.domain[i] for i in indices]
domain = orange.Domain(attrs, False)
domain.addmetas(data.domain.getmetas())
return orange.ExampleTable(domain, data)
def split_data(data, groups, axis=1):
""" Split data (ExampleTable) along specified axis, where elements of
`groups` match `key` and `value` arguments of the `select_data`
function
Example::
cancer, normal = split_data(data, [("disease state", "cancer"), ("disease state", ["normal"])], axis=1)
"""
res = []
for key, value in groups:
res.append(select_data(data, key, value, axis))
return res
def geometric_mean(array):
""" Return a geometric mean computed on a 1d masked array
"""
array = numpy.ma.asanyarray(array)
return numpy.power(reduce(lambda a,b: a*b, array.filled(1.), 1.0), 1./len(array))
def harmonic_mean(array):
""" Return a harmonic mean computed ona a 1d masked array
"""
array = numpy.ma.asanyarray(array)
return len(array) / numpy.ma.sum(1. / array)
def merge_replicates(replicates, axis=0, merge_function=numpy.ma.average):
""" Merge `replicates` (numpy.array) along `axis` using `merge_function`
"""
return numpy.ma.apply_along_axis(merge_function, axis, replicates)
def ratio_intensity(G, R):
""" return the log2(R/G), log10(R*G) as a tuple
"""
log2Ratio = numpy.ma.log(R/G) / numpy.log(2)
log10Intensity = numpy.ma.log10(R*G)
return log2Ratio, log10Intensity
def MA_center_average(G, R):
""" return the G, R by centering the average log2 ratio
"""
center_est = numpy.ma.average(numpy.ma.log(R/G) / numpy.log(2))
G = G * numpy.exp2(center_est)
return G, R.copy()
def MA_center_lowess(G, R, f=2./3., iter=1, progressCallback=None):
""" return the G, R by centering the average log2 ratio locally
depending on the intensity using lowess (locally weighted linear regression)
"""
# from Bio.Statistics.lowess import lowess
ratio, intensity = ratio_intensity(G, R)
valid = - (ratio.mask & intensity.mask)
valid_ind = numpy.ma.where(valid)
center_est = lowess(intensity[valid], ratio[valid], f=f, iter=iter, progressCallback=progressCallback)
Gc, R = G.copy(), R.copy()
Gc[valid] *= numpy.exp2(center_est)
Gc.mask, R.mask = -valid, -valid
return Gc, R
def MA_center_lowess_fast(G, R, f=2./3., iter=1, resolution=100, progressCallback=None):
"""return the G, R by centering the average log2 ratio locally
depending on the intensity using lowess (locally weighted linear regression),
approximated only in a limited resolution.
"""
ratio, intensity = ratio_intensity(G, R)
valid = - (ratio.mask & intensity.mask)
resoluiton = min(resolution, len(intensity[valid]))
hist, edges = numpy.histogram(intensity[valid], len(intensity[valid])/resolution)
progressCallback2 = (lambda val: progressCallback(val/2)) if progressCallback else None
centered = lowess2(intensity[valid], ratio[valid], edges, f, iter, progressCallback=progressCallback2)
progressCallback2 = (lambda val: progressCallback(50 + val/2)) if progressCallback else None
centered = lowess2(edges, centered, intensity[valid], f, iter, progressCallback=progressCallback2)
Gc, R = G.copy(), R.copy()
Gc[valid] *= numpy.exp2(centered)
Gc.mask, R.mask = -valid, -valid
return Gc, R
def MA_plot(G, R, format="b."):
""" Plot G, R on a MA-plot using matplotlib
"""
import matplotlib.pyplot as plt
ratio, intensity = ratio_intensity(G, R)
plt.plot(intensity, ratio, format)
plt.ylabel('M = log2(R/G')
plt.xlabel('A = log10(R*G)')
def normalize_expression_data(data, groups, axis=1, merge_function=numpy.ma.average, center_function=MA_center_lowess_fast):
""" A helper function that normalizes expression array example table, by centering the MA plot.
"""
if isinstance(data, orange.ExampleTable):
label_groups = [select_indices(data, key, value, axis) for key, value in groups]
array, _, _ = data.toNumpyMA()
merged = []
for indices in label_groups:
replicates = numpy.take(array, indices, axis=1)
merged.append(merge_replicates(replicates, axis=1, merge_function=merge_function))
ind1, ind2 = label_groups
G, R = merged
Gc, Rc = center_function(G, R)
domain = orange.Domain(data.domain.attributes, data.domain.classVar)
domain.addmetas(data.domain.getmetas())
data = orange.ExampleTable(domain, data)
GFactors = Gc/G
if axis == 0:
for i, gf in zip(ind1, GFactors):
for attr in range(len(data[i])):
if not data[i][attr].isSpecial():
data[i][attr] = float(data[i][attr]) * gf
else:
for ex, gf in zip(data, GFactors):
for i in ind1:
if not ex[i].isSpecial():
ex[i] = float(ex[i]) * gf
return data
def MA_zscore(G, R, window=1./5., padded=False, progressCallback=None):
""" Return the Z-score of log2 fold ratio estimated from local
distribution of log2 fold ratio values on the MA-plot
"""
ratio, intensity = ratio_intensity(G, R)
z_scores = numpy.ma.zeros(G.shape)
sorted = list(numpy.ma.argsort(intensity))
import math, random
r = int(math.ceil(len(sorted)*window)) # number of window elements
def local_indices(i, sorted):
""" local indices in sorted (mirror padded if out of bounds)
"""
start, end = i - r/2, i + r/2 + r%2
pad_start , pad_end = [], []
if start < 0:
pad_start = sorted[:abs(start)]
random.shuffle(pad_start)
start = 0
if end > len(sorted):
pad_end = sorted[end - len(sorted):]
random.shuffle(pad_end)
end = len(sorted)
if padded:
return pad_start + sorted[start: end] + pad_end
else:
return sorted[start:end]
milestones = orngMisc.progressBarMilestones(len(sorted))
for i in range(len(sorted)):
indices = local_indices(i, sorted)
localRatio = numpy.take(ratio, indices)
local_std = numpy.ma.std(localRatio)
ind = sorted[i]
z_scores[ind] = ratio[ind] / local_std
if progressCallback and i in milestones:
progressCallback(100. * i / len(sorted))
z_scores._mask = - numpy.isfinite(z_scores)
return z_scores
|
[
"numpy.ma.sum",
"numpy.abs",
"numpy.sum",
"orange.ExampleTable",
"statc.mean",
"numpy.ma.where",
"random.shuffle",
"numpy.ones",
"numpy.clip",
"numpy.ma.log",
"numpy.ma.mean",
"numpy.ma.transpose",
"numpy.linalg.solve",
"numpy.ma.asarray",
"Orange.orng.orngMisc.progressBarMilestones",
"numpy.transpose",
"numpy.isfinite",
"numpy.ma.asanyarray",
"statc.betai",
"numpy.random.shuffle",
"numpy.ceil",
"numpy.corrcoef",
"numpy.asarray",
"numpy.ma.var",
"numpy.ma.argsort",
"numpy.ma.concatenate",
"matplotlib.pyplot.ylabel",
"numpy.ma.zeros",
"numpy.ma.sqrt",
"statc.std",
"numpy.ma.dot",
"orange.Domain",
"numpy.ma.apply_along_axis",
"numpy.exp2",
"numpy.log",
"matplotlib.pyplot.plot",
"numpy.ma.log10",
"numpy.zeros",
"numpy.ma.nonzero",
"numpy.ma.array",
"numpy.ma.std",
"numpy.array",
"numpy.ma.ones",
"numpy.ma.sort",
"numpy.take",
"matplotlib.pyplot.xlabel"
] |
[((12178, 12231), 'numpy.ma.concatenate', 'ma.concatenate', (['arrays', '(dim if dim is not None else 0)'], {}), '(arrays, dim if dim is not None else 0)\n', (12192, 12231), True, 'import numpy.ma as ma\n'), ((13455, 13480), 'numpy.ma.sum', 'ma.sum', (['observed', '(dim + 1)'], {}), '(observed, dim + 1)\n', (13461, 13480), True, 'import numpy.ma as ma\n'), ((13496, 13517), 'numpy.ma.sum', 'ma.sum', (['observed', 'dim'], {}), '(observed, dim)\n', (13502, 13517), True, 'import numpy.ma as ma\n'), ((13530, 13551), 'numpy.ma.sum', 'ma.sum', (['rowtotal', 'dim'], {}), '(rowtotal, dim)\n', (13536, 13551), True, 'import numpy.ma as ma\n'), ((14094, 14113), 'numpy.ma.sort', 'ma.sort', (['array', 'dim'], {}), '(array, dim)\n', (14101, 14113), True, 'import numpy.ma as ma\n'), ((14905, 14923), 'numpy.ma.sum', 'ma.sum', (['array', 'dim'], {}), '(array, dim)\n', (14911, 14923), True, 'import numpy.ma as ma\n'), ((14968, 14986), 'numpy.ma.sum', 'ma.sum', (['array', 'dim'], {}), '(array, dim)\n', (14974, 14986), True, 'import numpy.ma as ma\n'), ((17889, 17910), 'numpy.abs', 'numpy.abs', (['dist', 'dist'], {}), '(dist, dist)\n', (17898, 17910), False, 'import numpy\n'), ((18018, 18033), 'numpy.abs', 'numpy.abs', (['w', 'w'], {}), '(w, w)\n', (18027, 18033), False, 'import numpy\n'), ((18042, 18068), 'numpy.clip', 'numpy.clip', (['w', '(0.0)', '(1.0)', 'w'], {}), '(w, 0.0, 1.0, w)\n', (18052, 18068), False, 'import numpy\n'), ((18159, 18173), 'numpy.zeros', 'numpy.zeros', (['n'], {}), '(n)\n', (18170, 18173), False, 'import numpy\n'), ((18186, 18199), 'numpy.ones', 'numpy.ones', (['n'], {}), '(n)\n', (18196, 18199), False, 'import numpy\n'), ((18217, 18257), 'Orange.orng.orngMisc.progressBarMilestones', 'orngMisc.progressBarMilestones', (['(iter * n)'], {}), '(iter * n)\n', (18247, 18257), False, 'from Orange.orng import orngMisc\n'), ((19966, 19987), 'numpy.asarray', 'numpy.asarray', (['x', '"""f"""'], {}), "(x, 'f')\n", (19979, 19987), False, 'import numpy\n'), ((19996, 20017), 'numpy.asarray', 'numpy.asarray', (['y', '"""f"""'], {}), "(y, 'f')\n", (20009, 20017), False, 'import numpy\n'), ((20029, 20053), 'numpy.asarray', 'numpy.asarray', (['xest', '"""f"""'], {}), "(xest, 'f')\n", (20042, 20053), False, 'import numpy\n'), ((20322, 20343), 'numpy.abs', 'numpy.abs', (['dist', 'dist'], {}), '(dist, dist)\n', (20331, 20343), False, 'import numpy\n'), ((20469, 20484), 'numpy.abs', 'numpy.abs', (['w', 'w'], {}), '(w, w)\n', (20478, 20484), False, 'import numpy\n'), ((20493, 20519), 'numpy.clip', 'numpy.clip', (['w', '(0.0)', '(1.0)', 'w'], {}), '(w, 0.0, 1.0, w)\n', (20503, 20519), False, 'import numpy\n'), ((20849, 20870), 'numpy.abs', 'numpy.abs', (['dist', 'dist'], {}), '(dist, dist)\n', (20858, 20870), False, 'import numpy\n'), ((21119, 21140), 'numpy.abs', 'numpy.abs', (['west', 'west'], {}), '(west, west)\n', (21128, 21140), False, 'import numpy\n'), ((21152, 21184), 'numpy.clip', 'numpy.clip', (['west', '(0.0)', '(1.0)', 'west'], {}), '(west, 0.0, 1.0, west)\n', (21162, 21184), False, 'import numpy\n'), ((21333, 21352), 'numpy.zeros', 'numpy.zeros', (['n', '"""f"""'], {}), "(n, 'f')\n", (21344, 21352), False, 'import numpy\n'), ((21364, 21386), 'numpy.zeros', 'numpy.zeros', (['nest', '"""f"""'], {}), "(nest, 'f')\n", (21375, 21386), False, 'import numpy\n'), ((21398, 21416), 'numpy.ones', 'numpy.ones', (['n', '"""f"""'], {}), "(n, 'f')\n", (21408, 21416), False, 'import numpy\n'), ((21488, 21530), 'Orange.orng.orngMisc.progressBarMilestones', 'orngMisc.progressBarMilestones', (['iter_count'], {}), '(iter_count)\n', (21518, 21530), False, 'from Orange.orng import orngMisc\n'), ((27008, 27034), 'numpy.ma.asanyarray', 'numpy.ma.asanyarray', (['array'], {}), '(array)\n', (27027, 27034), False, 'import numpy\n'), ((27231, 27257), 'numpy.ma.asanyarray', 'numpy.ma.asanyarray', (['array'], {}), '(array)\n', (27250, 27257), False, 'import numpy\n'), ((27480, 27539), 'numpy.ma.apply_along_axis', 'numpy.ma.apply_along_axis', (['merge_function', 'axis', 'replicates'], {}), '(merge_function, axis, replicates)\n', (27505, 27539), False, 'import numpy\n'), ((27699, 27720), 'numpy.ma.log10', 'numpy.ma.log10', (['(R * G)'], {}), '(R * G)\n', (27713, 27720), False, 'import numpy\n'), ((28359, 28380), 'numpy.ma.where', 'numpy.ma.where', (['valid'], {}), '(valid)\n', (28373, 28380), False, 'import numpy\n'), ((28536, 28558), 'numpy.exp2', 'numpy.exp2', (['center_est'], {}), '(center_est)\n', (28546, 28558), False, 'import numpy\n'), ((29603, 29623), 'numpy.exp2', 'numpy.exp2', (['centered'], {}), '(centered)\n', (29613, 29623), False, 'import numpy\n'), ((29853, 29887), 'matplotlib.pyplot.plot', 'plt.plot', (['intensity', 'ratio', 'format'], {}), '(intensity, ratio, format)\n', (29861, 29887), True, 'import matplotlib.pyplot as plt\n'), ((29892, 29918), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""M = log2(R/G"""'], {}), "('M = log2(R/G')\n", (29902, 29918), True, 'import matplotlib.pyplot as plt\n'), ((29923, 29951), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""A = log10(R*G)"""'], {}), "('A = log10(R*G)')\n", (29933, 29951), True, 'import matplotlib.pyplot as plt\n'), ((30677, 30736), 'orange.Domain', 'orange.Domain', (['data.domain.attributes', 'data.domain.classVar'], {}), '(data.domain.attributes, data.domain.classVar)\n', (30690, 30736), False, 'import orange, statc\n'), ((30792, 30825), 'orange.ExampleTable', 'orange.ExampleTable', (['domain', 'data'], {}), '(domain, data)\n', (30811, 30825), False, 'import orange, statc\n'), ((31540, 31563), 'numpy.ma.zeros', 'numpy.ma.zeros', (['G.shape'], {}), '(G.shape)\n', (31554, 31563), False, 'import numpy\n'), ((338, 402), 'orange.Domain', 'orange.Domain', (['[data.domain.attributes[i]]', 'data.domain.classVar'], {}), '([data.domain.attributes[i]], data.domain.classVar)\n', (351, 402), False, 'import orange, statc\n'), ((419, 450), 'orange.ExampleTable', 'orange.ExampleTable', (['dom2', 'data'], {}), '(dom2, data)\n', (438, 450), False, 'import orange, statc\n'), ((2513, 2566), 'orange.Domain', 'orange.Domain', (['[data.domain[i]]', 'data.domain.classVar'], {}), '([data.domain[i]], data.domain.classVar)\n', (2526, 2566), False, 'import orange, statc\n'), ((2582, 2613), 'orange.ExampleTable', 'orange.ExampleTable', (['dom2', 'data'], {}), '(dom2, data)\n', (2601, 2613), False, 'import orange, statc\n'), ((3370, 3423), 'orange.Domain', 'orange.Domain', (['[data.domain[i]]', 'data.domain.classVar'], {}), '([data.domain[i]], data.domain.classVar)\n', (3383, 3423), False, 'import orange, statc\n'), ((3439, 3470), 'orange.ExampleTable', 'orange.ExampleTable', (['dom2', 'data'], {}), '(dom2, data)\n', (3458, 3470), False, 'import orange, statc\n'), ((4126, 4179), 'orange.Domain', 'orange.Domain', (['[data.domain[i]]', 'data.domain.classVar'], {}), '([data.domain[i]], data.domain.classVar)\n', (4139, 4179), False, 'import orange, statc\n'), ((4195, 4226), 'orange.ExampleTable', 'orange.ExampleTable', (['dom2', 'data'], {}), '(dom2, data)\n', (4214, 4226), False, 'import orange, statc\n'), ((5295, 5366), 'numpy.array', 'np.array', (['(self.attr_labels if useAttributeLabels else self.data_classes)'], {}), '(self.attr_labels if useAttributeLabels else self.data_classes)\n', (5303, 5366), True, 'import numpy as np\n'), ((9516, 9548), 'numpy.ma.zeros', 'ma.zeros', (['(array.shape[1], 2, 5)'], {}), '((array.shape[1], 2, 5))\n', (9524, 9548), True, 'import numpy.ma as ma\n'), ((10191, 10223), 'numpy.ma.zeros', 'ma.zeros', (['(array.shape[1], 2, 5)'], {}), '((array.shape[1], 2, 5))\n', (10199, 10223), True, 'import numpy.ma as ma\n'), ((11323, 11338), 'numpy.ma.mean', 'ma.mean', (['a', 'dim'], {}), '(a, dim)\n', (11330, 11338), True, 'import numpy.ma as ma\n'), ((11340, 11355), 'numpy.ma.mean', 'ma.mean', (['b', 'dim'], {}), '(b, dim)\n', (11347, 11355), True, 'import numpy.ma as ma\n'), ((11369, 11383), 'numpy.ma.var', 'ma.var', (['a', 'dim'], {}), '(a, dim)\n', (11375, 11383), True, 'import numpy.ma as ma\n'), ((11385, 11399), 'numpy.ma.var', 'ma.var', (['b', 'dim'], {}), '(b, dim)\n', (11391, 11399), True, 'import numpy.ma as ma\n'), ((11561, 11598), 'numpy.ma.sqrt', 'ma.sqrt', (['(svar * (1.0 / n1 + 1.0 / n2))'], {}), '(svar * (1.0 / n1 + 1.0 / n2))\n', (11568, 11598), True, 'import numpy.ma as ma\n'), ((11993, 12008), 'numpy.ma.mean', 'ma.mean', (['a', 'dim'], {}), '(a, dim)\n', (12000, 12008), True, 'import numpy.ma as ma\n'), ((12038, 12052), 'numpy.ma.var', 'ma.var', (['a', 'dim'], {}), '(a, dim)\n', (12044, 12052), True, 'import numpy.ma as ma\n'), ((12321, 12346), 'numpy.ma.sum', 'ma.sum', (['(alldata ** 2)', 'dim'], {}), '(alldata ** 2, dim)\n', (12327, 12346), True, 'import numpy.ma as ma\n'), ((13317, 13337), 'numpy.ma.array', 'ma.array', (['[observed]'], {}), '([observed])\n', (13325, 13337), True, 'import numpy.ma as ma\n'), ((13572, 13595), 'numpy.ma.ones', 'ma.ones', (['observed.shape'], {}), '(observed.shape)\n', (13579, 13595), True, 'import numpy.ma as ma\n'), ((13818, 13872), 'numpy.ma.sum', 'ma.sum', (['((observed - expected) ** 2 / expected)', '(dim + 1)'], {}), '((observed - expected) ** 2 / expected, dim + 1)\n', (13824, 13872), True, 'import numpy.ma as ma\n'), ((14655, 14685), 'numpy.ma.nonzero', 'ma.nonzero', (['(array <= points[0])'], {}), '(array <= points[0])\n', (14665, 14685), True, 'import numpy.ma as ma\n'), ((14936, 14949), 'numpy.ma.log', 'ma.log', (['array'], {}), '(array)\n', (14942, 14949), True, 'import numpy.ma as ma\n'), ((15022, 15033), 'numpy.ma.log', 'ma.log', (['(2.0)'], {}), '(2.0)\n', (15028, 15033), True, 'import numpy.ma as ma\n'), ((17857, 17877), 'numpy.transpose', 'numpy.transpose', (['[x]'], {}), '([x])\n', (17872, 17877), False, 'import numpy\n'), ((17978, 17998), 'numpy.transpose', 'numpy.transpose', (['[x]'], {}), '([x])\n', (17993, 17998), False, 'import numpy\n'), ((19029, 19067), 'numpy.clip', 'numpy.clip', (['(residuals / (6 * s))', '(-1)', '(1)'], {}), '(residuals / (6 * s), -1, 1)\n', (19039, 19067), False, 'import numpy\n'), ((20290, 20310), 'numpy.transpose', 'numpy.transpose', (['[x]'], {}), '([x])\n', (20305, 20310), False, 'import numpy\n'), ((20429, 20449), 'numpy.transpose', 'numpy.transpose', (['[x]'], {}), '([x])\n', (20444, 20449), False, 'import numpy\n'), ((20814, 20837), 'numpy.transpose', 'numpy.transpose', (['[xest]'], {}), '([xest])\n', (20829, 20837), False, 'import numpy\n'), ((21070, 21090), 'numpy.transpose', 'numpy.transpose', (['[x]'], {}), '([x])\n', (21085, 21090), False, 'import numpy\n'), ((24548, 24590), 'orange.Domain', 'orange.Domain', (['attrs', 'data.domain.classVar'], {}), '(attrs, data.domain.classVar)\n', (24561, 24590), False, 'import orange, statc\n'), ((26190, 26232), 'orange.ExampleTable', 'orange.ExampleTable', (['data.domain', 'examples'], {}), '(data.domain, examples)\n', (26209, 26232), False, 'import orange, statc\n'), ((26310, 26337), 'orange.Domain', 'orange.Domain', (['attrs', '(False)'], {}), '(attrs, False)\n', (26323, 26337), False, 'import orange, statc\n'), ((26401, 26434), 'orange.ExampleTable', 'orange.ExampleTable', (['domain', 'data'], {}), '(domain, data)\n', (26420, 26434), False, 'import orange, statc\n'), ((27282, 27307), 'numpy.ma.sum', 'numpy.ma.sum', (['(1.0 / array)'], {}), '(1.0 / array)\n', (27294, 27307), False, 'import numpy\n'), ((27645, 27664), 'numpy.ma.log', 'numpy.ma.log', (['(R / G)'], {}), '(R / G)\n', (27657, 27664), False, 'import numpy\n'), ((27665, 27677), 'numpy.log', 'numpy.log', (['(2)'], {}), '(2)\n', (27674, 27677), False, 'import numpy\n'), ((27935, 27957), 'numpy.exp2', 'numpy.exp2', (['center_est'], {}), '(center_est)\n', (27945, 27957), False, 'import numpy\n'), ((30441, 30475), 'numpy.take', 'numpy.take', (['array', 'indices'], {'axis': '(1)'}), '(array, indices, axis=1)\n', (30451, 30475), False, 'import numpy\n'), ((31582, 31609), 'numpy.ma.argsort', 'numpy.ma.argsort', (['intensity'], {}), '(intensity)\n', (31598, 31609), False, 'import numpy\n'), ((32475, 32501), 'numpy.take', 'numpy.take', (['ratio', 'indices'], {}), '(ratio, indices)\n', (32485, 32501), False, 'import numpy\n'), ((32522, 32546), 'numpy.ma.std', 'numpy.ma.std', (['localRatio'], {}), '(localRatio)\n', (32534, 32546), False, 'import numpy\n'), ((32752, 32776), 'numpy.isfinite', 'numpy.isfinite', (['z_scores'], {}), '(z_scores)\n', (32766, 32776), False, 'import numpy\n'), ((501, 527), 'numpy.corrcoef', 'numpy.corrcoef', (['c', 'a[:, 0]'], {}), '(c, a[:, 0])\n', (515, 527), False, 'import numpy\n'), ((1501, 1513), 'statc.std', 'statc.std', (['l'], {}), '(l)\n', (1510, 1513), False, 'import orange, statc\n'), ((1555, 1568), 'statc.mean', 'statc.mean', (['l'], {}), '(l)\n', (1565, 1568), False, 'import orange, statc\n'), ((5552, 5576), 'numpy.ma.transpose', 'ma.transpose', (['self.array'], {}), '(self.array)\n', (5564, 5576), True, 'import numpy.ma as ma\n'), ((7691, 7722), 'numpy.random.shuffle', 'np.random.shuffle', (['self.classes'], {}), '(self.classes)\n', (7708, 7722), True, 'import numpy as np\n'), ((8376, 8397), 'numpy.ma.mean', 'ma.mean', (['a1', 'self.dim'], {}), '(a1, self.dim)\n', (8383, 8397), True, 'import numpy.ma as ma\n'), ((8398, 8419), 'numpy.ma.mean', 'ma.mean', (['a2', 'self.dim'], {}), '(a2, self.dim)\n', (8405, 8419), True, 'import numpy.ma as ma\n'), ((12260, 12282), 'numpy.ma.ones', 'ma.ones', (['alldata.shape'], {}), '(alldata.shape)\n', (12267, 12282), True, 'import numpy.ma as ma\n'), ((12528, 12548), 'numpy.ma.sum', 'ma.sum', (['alldata', 'dim'], {}), '(alldata, dim)\n', (12534, 12548), True, 'import numpy.ma as ma\n'), ((13983, 14014), 'numpy.ma.ones', 'ma.ones', (['array.shape'], {'dtype': 'int'}), '(array.shape, dtype=int)\n', (13990, 14014), True, 'import numpy.ma as ma\n'), ((14600, 14621), 'numpy.ma.zeros', 'ma.zeros', (['array.shape'], {}), '(array.shape)\n', (14608, 14621), True, 'import numpy.ma as ma\n'), ((14740, 14769), 'numpy.ma.nonzero', 'ma.nonzero', (['(array > points[i])'], {}), '(array > points[i])\n', (14750, 14769), True, 'import numpy.ma as ma\n'), ((14999, 15008), 'numpy.ma.log', 'ma.log', (['n'], {}), '(n)\n', (15005, 15008), True, 'import numpy.ma as ma\n'), ((17521, 17538), 'numpy.ceil', 'numpy.ceil', (['(f * n)'], {}), '(f * n)\n', (17531, 17538), False, 'import numpy\n'), ((18412, 18436), 'numpy.ma.dot', 'numpy.ma.dot', (['weights', 'y'], {}), '(weights, y)\n', (18424, 18436), False, 'import numpy\n'), ((18453, 18483), 'numpy.ma.dot', 'numpy.ma.dot', (['weights_mul_x', 'y'], {}), '(weights_mul_x, y)\n', (18465, 18483), False, 'import numpy\n'), ((18591, 18621), 'numpy.ma.dot', 'numpy.ma.dot', (['weights_mul_x', 'x'], {}), '(weights_mul_x, x)\n', (18603, 18621), False, 'import numpy\n'), ((20106, 20123), 'numpy.ceil', 'numpy.ceil', (['(f * n)'], {}), '(f * n)\n', (20116, 20123), False, 'import numpy\n'), ((21889, 21913), 'numpy.linalg.solve', 'numpy.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (21907, 21913), False, 'import numpy\n'), ((22844, 22882), 'numpy.clip', 'numpy.clip', (['(residuals / (6 * s))', '(-1)', '(1)'], {}), '(residuals / (6 * s), -1, 1)\n', (22854, 22882), False, 'import numpy\n'), ((24658, 24691), 'orange.ExampleTable', 'orange.ExampleTable', (['domain', 'data'], {}), '(domain, data)\n', (24677, 24691), False, 'import orange, statc\n'), ((27889, 27908), 'numpy.ma.log', 'numpy.ma.log', (['(R / G)'], {}), '(R / G)\n', (27901, 27908), False, 'import numpy\n'), ((27909, 27921), 'numpy.log', 'numpy.log', (['(2)'], {}), '(2)\n', (27918, 27921), False, 'import numpy\n'), ((31980, 32005), 'random.shuffle', 'random.shuffle', (['pad_start'], {}), '(pad_start)\n', (31994, 32005), False, 'import math, random\n'), ((32119, 32142), 'random.shuffle', 'random.shuffle', (['pad_end'], {}), '(pad_end)\n', (32133, 32142), False, 'import math, random\n'), ((8686, 8707), 'numpy.ma.mean', 'ma.mean', (['a1', 'self.dim'], {}), '(a1, self.dim)\n', (8693, 8707), True, 'import numpy.ma as ma\n'), ((8710, 8731), 'numpy.ma.mean', 'ma.mean', (['a2', 'self.dim'], {}), '(a2, self.dim)\n', (8717, 8731), True, 'import numpy.ma as ma\n'), ((9836, 9850), 'numpy.array', 'np.array', (['dist'], {}), '(dist)\n', (9844, 9850), True, 'import numpy as np\n'), ((10594, 10609), 'numpy.ma.sum', 'ma.sum', (['dist', '(1)'], {}), '(dist, 1)\n', (10600, 10609), True, 'import numpy.ma as ma\n'), ((11632, 11678), 'statc.betai', 'statc.betai', (['(0.5 * df)', '(0.5)', '(df / (df + t ** 2))'], {}), '(0.5 * df, 0.5, df / (df + t ** 2))\n', (11643, 11678), False, 'import orange, statc\n'), ((11757, 11800), 'statc.betai', 'statc.betai', (['(0.5 * df)', '(0.5)', '(df / (df + tsq))'], {}), '(0.5 * df, 0.5, df / (df + tsq))\n', (11768, 11800), False, 'import orange, statc\n'), ((12098, 12114), 'numpy.ma.ones', 'ma.ones', (['a.shape'], {}), '(a.shape)\n', (12105, 12114), True, 'import numpy.ma as ma\n'), ((12350, 12370), 'numpy.ma.sum', 'ma.sum', (['alldata', 'dim'], {}), '(alldata, dim)\n', (12356, 12370), True, 'import numpy.ma as ma\n'), ((13697, 13737), 'numpy.zeros', 'np.zeros', (['observed.shape[dim]'], {'dtype': 'int'}), '(observed.shape[dim], dtype=int)\n', (13705, 13737), True, 'import numpy as np\n'), ((22505, 22529), 'numpy.linalg.solve', 'numpy.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (22523, 22529), False, 'import numpy\n'), ((22802, 22822), 'numpy.abs', 'numpy.abs', (['residuals'], {}), '(residuals)\n', (22811, 22822), False, 'import numpy\n'), ((8744, 8764), 'numpy.ma.var', 'ma.var', (['a1', 'self.dim'], {}), '(a1, self.dim)\n', (8750, 8764), True, 'import numpy.ma as ma\n'), ((8776, 8796), 'numpy.ma.var', 'ma.var', (['a2', 'self.dim'], {}), '(a2, self.dim)\n', (8782, 8796), True, 'import numpy.ma as ma\n'), ((10565, 10580), 'numpy.ma.sum', 'ma.sum', (['dist', '(1)'], {}), '(dist, 1)\n', (10571, 10580), True, 'import numpy.ma as ma\n'), ((12404, 12418), 'numpy.ma.sum', 'ma.sum', (['a', 'dim'], {}), '(a, dim)\n', (12410, 12418), True, 'import numpy.ma as ma\n'), ((21701, 21723), 'numpy.sum', 'numpy.sum', (['(weights * y)'], {}), '(weights * y)\n', (21710, 21723), False, 'import numpy\n'), ((21723, 21749), 'numpy.sum', 'numpy.sum', (['(weights * y * x)'], {}), '(weights * y * x)\n', (21732, 21749), False, 'import numpy\n'), ((6772, 6801), 'numpy.ma.nonzero', 'ma.nonzero', (['(self.classes == t)'], {}), '(self.classes == t)\n', (6782, 6801), True, 'import numpy.ma as ma\n'), ((9608, 9625), 'numpy.ma.ones', 'ma.ones', (['a1.shape'], {}), '(a1.shape)\n', (9615, 9625), True, 'import numpy.ma as ma\n'), ((9675, 9692), 'numpy.ma.ones', 'ma.ones', (['a2.shape'], {}), '(a2.shape)\n', (9682, 9692), True, 'import numpy.ma as ma\n'), ((10283, 10300), 'numpy.ma.ones', 'ma.ones', (['a1.shape'], {}), '(a1.shape)\n', (10290, 10300), True, 'import numpy.ma as ma\n'), ((10350, 10367), 'numpy.ma.ones', 'ma.ones', (['a2.shape'], {}), '(a2.shape)\n', (10357, 10367), True, 'import numpy.ma as ma\n'), ((12121, 12134), 'numpy.ma.asarray', 'ma.asarray', (['a'], {}), '(a)\n', (12131, 12134), True, 'import numpy.ma as ma\n'), ((14384, 14414), 'numpy.array', 'numpy.array', (['cutend'], {'dtype': 'int'}), '(cutend, dtype=int)\n', (14395, 14414), False, 'import numpy\n'), ((14456, 14486), 'numpy.array', 'numpy.array', (['cutend'], {'dtype': 'int'}), '(cutend, dtype=int)\n', (14467, 14486), False, 'import numpy\n'), ((21778, 21796), 'numpy.sum', 'numpy.sum', (['weights'], {}), '(weights)\n', (21787, 21796), False, 'import numpy\n'), ((21798, 21820), 'numpy.sum', 'numpy.sum', (['(weights * x)'], {}), '(weights * x)\n', (21807, 21820), False, 'import numpy\n'), ((21822, 21844), 'numpy.sum', 'numpy.sum', (['(weights * x)'], {}), '(weights * x)\n', (21831, 21844), False, 'import numpy\n'), ((21844, 21870), 'numpy.sum', 'numpy.sum', (['(weights * x * x)'], {}), '(weights * x * x)\n', (21853, 21870), False, 'import numpy\n'), ((22309, 22331), 'numpy.sum', 'numpy.sum', (['(weights * y)'], {}), '(weights * y)\n', (22318, 22331), False, 'import numpy\n'), ((22331, 22357), 'numpy.sum', 'numpy.sum', (['(weights * y * x)'], {}), '(weights * y * x)\n', (22340, 22357), False, 'import numpy\n'), ((22390, 22408), 'numpy.sum', 'numpy.sum', (['weights'], {}), '(weights)\n', (22399, 22408), False, 'import numpy\n'), ((22410, 22432), 'numpy.sum', 'numpy.sum', (['(weights * x)'], {}), '(weights * x)\n', (22419, 22432), False, 'import numpy\n'), ((22434, 22456), 'numpy.sum', 'numpy.sum', (['(weights * x)'], {}), '(weights * x)\n', (22443, 22456), False, 'import numpy\n'), ((22456, 22482), 'numpy.sum', 'numpy.sum', (['(weights * x * x)'], {}), '(weights * x * x)\n', (22465, 22482), False, 'import numpy\n')]
|
from __future__ import print_function
from PIL import Image
import numpy as np
import os
import cv2
import torch
import torch.nn.functional as F
import torchvision
import torchvision.transforms.functional as TF
import math
import pickle
class ImageTransformer(object):
"""
Rescale the image in a sample to a given size.
"""
def __init__(self, output_size):
"""
Args:
output_size (tuple or int): Desired output size. If tuple, output is matched to output_size.
If int, smaller of image edges is matched to output_size keeping aspect ratio the same.
"""
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
images = sample['images']
resized_images = []
for image in images:
image = cv2.resize(image, (self.output_size, self.output_size))
image = image.astype(np.float32)
image /= 255.0
image = image * 2 - 1
image = np.transpose(image, (2, 0, 1))
resized_images.append(image)
resized_images = np.stack(resized_images, axis=0)
sample['images'] = resized_images
return sample
class ImageNormalizeToTensor(object):
"""
Rescale the image in a sample to a given size.
"""
def __call__(self, image):
# image = F.to_tensor(image)
image = TF.to_tensor(image)
image.mul_(2.0)
image.sub_(1.0)
return image
class ToTensor(object):
"""
Convert ndarrays in sample to Tensors.
"""
def __call__(self, sample):
sample['images'] = torch.Tensor(sample['images']).float()
sample['smpls'] = torch.Tensor(sample['smpls']).float()
return sample
class ToTensorDensePose(object):
"""
Convert ndarrays in sample to Tensors.
"""
def __call__(self, sample):
sample['images'] = torch.Tensor(sample['images']).float()
sample['smpl'] = torch.Tensor(sample['smpl']).float()
sample['uvs'] = torch.Tensor(sample['uvs']).float()
sample['mask'] = torch.Tensor(sample['mask']).int()
return sample
def morph(src_bg_mask, ks, mode='erode', kernel=None):
n_ks = ks ** 2
pad_s = ks // 2
if kernel is None:
kernel = torch.ones(1, 1, ks, ks, dtype=torch.float32, device=src_bg_mask.device)
if mode == 'erode':
src_bg_mask_pad = F.pad(src_bg_mask, [pad_s, pad_s, pad_s, pad_s], value=1.0)
out = F.conv2d(src_bg_mask_pad, kernel)
out = (out == n_ks).float()
else:
src_bg_mask_pad = F.pad(src_bg_mask, [pad_s, pad_s, pad_s, pad_s], value=0.0)
out = F.conv2d(src_bg_mask_pad, kernel)
out = (out >= 1).float()
return out
def cal_mask_bbox(head_mask, factor=1.3):
"""
Args:
head_mask (np.ndarray): (N, 1, 256, 256).
factor (float): the factor to enlarge the bbox of head.
Returns:
bbox (np.ndarray.int32): (N, 4), hear, 4 = (left_top_x, right_top_x, left_top_y, right_top_y)
"""
bs, _, height, width = head_mask.shape
bbox = np.zeros((bs, 4), dtype=np.int32)
valid = np.ones((bs,), dtype=np.float32)
for i in range(bs):
mask = head_mask[i, 0]
ys, xs = np.where(mask == 1)
if len(ys) == 0:
valid[i] = 0.0
bbox[i, 0] = 0
bbox[i, 1] = width
bbox[i, 2] = 0
bbox[i, 3] = height
continue
lt_y = np.min(ys) # left top of Y
lt_x = np.min(xs) # left top of X
rt_y = np.max(ys) # right top of Y
rt_x = np.max(xs) # right top of X
h = rt_y - lt_y # height of head
w = rt_x - lt_x # width of head
cy = (lt_y + rt_y) // 2 # (center of y)
cx = (lt_x + rt_x) // 2 # (center of x)
_h = h * factor
_w = w * factor
_lt_y = max(0, int(cy - _h / 2))
_lt_x = max(0, int(cx - _w / 2))
_rt_y = min(height, int(cy + _h / 2))
_rt_x = min(width, int(cx + _w / 2))
if (_lt_x == _rt_x) or (_lt_y == _rt_y):
valid[i] = 0.0
bbox[i, 0] = 0
bbox[i, 1] = width
bbox[i, 2] = 0
bbox[i, 3] = height
else:
bbox[i, 0] = _lt_x
bbox[i, 1] = _rt_x
bbox[i, 2] = _lt_y
bbox[i, 3] = _rt_y
return bbox, valid
def to_tensor(tensor):
if isinstance(tensor, np.ndarray):
tensor = torch.FloatTensor(tensor)
return tensor
def plot_fim_enc(fim_enc, map_name):
# import matplotlib.pyplot as plt
import utils.mesh as mesh
if not isinstance(fim_enc, np.ndarray):
fim_enc = fim_enc.cpu().numpy()
if fim_enc.ndim != 4:
fim_enc = fim_enc[np.newaxis, ...]
fim_enc = np.transpose(fim_enc, axes=(0, 2, 3, 1))
imgs = []
for fim_i in fim_enc:
img = mesh.cvt_fim_enc(fim_i, map_name)
imgs.append(img)
return np.stack(imgs, axis=0)
def tensor2im(img, imtype=np.uint8, unnormalize=True, idx=0, nrows=None):
# select a sample or create grid if img is a batch
if len(img.shape) == 4:
nrows = nrows if nrows is not None else int(math.sqrt(img.size(0)))
img = img[idx] if idx >= 0 else torchvision.utils.make_grid(img, nrows)
img = img.cpu().float()
if unnormalize:
img += 1.0
img /= 2.0
image_numpy = img.numpy()
# image_numpy = np.transpose(image_numpy, (1, 2, 0))
image_numpy *= 255.0
return image_numpy.astype(imtype)
def tensor2maskim(mask, imtype=np.uint8, idx=0, nrows=1):
im = tensor2im(mask, imtype=imtype, idx=idx, unnormalize=False, nrows=nrows)
if im.shape[2] == 1:
im = np.repeat(im, 3, axis=-1)
return im
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
return paths
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
return path
def clear_dir(path):
import shutil
if os.path.exists(path):
shutil.rmtree(path)
return mkdir(path)
def save_image(image_numpy, image_path):
mkdir(os.path.dirname(image_path))
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def load_pickle_file(pkl_path):
with open(pkl_path, 'rb') as f:
data = pickle.load(f, encoding='latin1')
return data
def write_pickle_file(pkl_path, data_dict):
with open(pkl_path, 'wb') as fp:
pickle.dump(data_dict, fp, protocol=2)
|
[
"pickle.dump",
"torchvision.transforms.functional.to_tensor",
"numpy.ones",
"pickle.load",
"shutil.rmtree",
"torch.nn.functional.pad",
"torch.ones",
"os.path.dirname",
"numpy.transpose",
"os.path.exists",
"torch.FloatTensor",
"numpy.max",
"torch.Tensor",
"cv2.resize",
"numpy.repeat",
"numpy.stack",
"torch.nn.functional.conv2d",
"utils.mesh.cvt_fim_enc",
"numpy.min",
"os.makedirs",
"numpy.zeros",
"torchvision.utils.make_grid",
"numpy.where",
"PIL.Image.fromarray"
] |
[((3155, 3188), 'numpy.zeros', 'np.zeros', (['(bs, 4)'], {'dtype': 'np.int32'}), '((bs, 4), dtype=np.int32)\n', (3163, 3188), True, 'import numpy as np\n'), ((3201, 3233), 'numpy.ones', 'np.ones', (['(bs,)'], {'dtype': 'np.float32'}), '((bs,), dtype=np.float32)\n', (3208, 3233), True, 'import numpy as np\n'), ((4872, 4912), 'numpy.transpose', 'np.transpose', (['fim_enc'], {'axes': '(0, 2, 3, 1)'}), '(fim_enc, axes=(0, 2, 3, 1))\n', (4884, 4912), True, 'import numpy as np\n'), ((5039, 5061), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (5047, 5061), True, 'import numpy as np\n'), ((6161, 6181), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6175, 6181), False, 'import os\n'), ((6333, 6361), 'PIL.Image.fromarray', 'Image.fromarray', (['image_numpy'], {}), '(image_numpy)\n', (6348, 6361), False, 'from PIL import Image\n'), ((1154, 1186), 'numpy.stack', 'np.stack', (['resized_images'], {'axis': '(0)'}), '(resized_images, axis=0)\n', (1162, 1186), True, 'import numpy as np\n'), ((1444, 1463), 'torchvision.transforms.functional.to_tensor', 'TF.to_tensor', (['image'], {}), '(image)\n', (1456, 1463), True, 'import torchvision.transforms.functional as TF\n'), ((2338, 2410), 'torch.ones', 'torch.ones', (['(1)', '(1)', 'ks', 'ks'], {'dtype': 'torch.float32', 'device': 'src_bg_mask.device'}), '(1, 1, ks, ks, dtype=torch.float32, device=src_bg_mask.device)\n', (2348, 2410), False, 'import torch\n'), ((2462, 2521), 'torch.nn.functional.pad', 'F.pad', (['src_bg_mask', '[pad_s, pad_s, pad_s, pad_s]'], {'value': '(1.0)'}), '(src_bg_mask, [pad_s, pad_s, pad_s, pad_s], value=1.0)\n', (2467, 2521), True, 'import torch.nn.functional as F\n'), ((2536, 2569), 'torch.nn.functional.conv2d', 'F.conv2d', (['src_bg_mask_pad', 'kernel'], {}), '(src_bg_mask_pad, kernel)\n', (2544, 2569), True, 'import torch.nn.functional as F\n'), ((2642, 2701), 'torch.nn.functional.pad', 'F.pad', (['src_bg_mask', '[pad_s, pad_s, pad_s, pad_s]'], {'value': '(0.0)'}), '(src_bg_mask, [pad_s, pad_s, pad_s, pad_s], value=0.0)\n', (2647, 2701), True, 'import torch.nn.functional as F\n'), ((2716, 2749), 'torch.nn.functional.conv2d', 'F.conv2d', (['src_bg_mask_pad', 'kernel'], {}), '(src_bg_mask_pad, kernel)\n', (2724, 2749), True, 'import torch.nn.functional as F\n'), ((3307, 3326), 'numpy.where', 'np.where', (['(mask == 1)'], {}), '(mask == 1)\n', (3315, 3326), True, 'import numpy as np\n'), ((3534, 3544), 'numpy.min', 'np.min', (['ys'], {}), '(ys)\n', (3540, 3544), True, 'import numpy as np\n'), ((3578, 3588), 'numpy.min', 'np.min', (['xs'], {}), '(xs)\n', (3584, 3588), True, 'import numpy as np\n'), ((3623, 3633), 'numpy.max', 'np.max', (['ys'], {}), '(ys)\n', (3629, 3633), True, 'import numpy as np\n'), ((3668, 3678), 'numpy.max', 'np.max', (['xs'], {}), '(xs)\n', (3674, 3678), True, 'import numpy as np\n'), ((4552, 4577), 'torch.FloatTensor', 'torch.FloatTensor', (['tensor'], {}), '(tensor)\n', (4569, 4577), False, 'import torch\n'), ((4968, 5001), 'utils.mesh.cvt_fim_enc', 'mesh.cvt_fim_enc', (['fim_i', 'map_name'], {}), '(fim_i, map_name)\n', (4984, 5001), True, 'import utils.mesh as mesh\n'), ((5795, 5820), 'numpy.repeat', 'np.repeat', (['im', '(3)'], {'axis': '(-1)'}), '(im, 3, axis=-1)\n', (5804, 5820), True, 'import numpy as np\n'), ((6049, 6069), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6063, 6069), False, 'import os\n'), ((6079, 6096), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (6090, 6096), False, 'import os\n'), ((6191, 6210), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (6204, 6210), False, 'import shutil\n'), ((6288, 6315), 'os.path.dirname', 'os.path.dirname', (['image_path'], {}), '(image_path)\n', (6303, 6315), False, 'import os\n'), ((6478, 6511), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (6489, 6511), False, 'import pickle\n'), ((6620, 6658), 'pickle.dump', 'pickle.dump', (['data_dict', 'fp'], {'protocol': '(2)'}), '(data_dict, fp, protocol=2)\n', (6631, 6658), False, 'import pickle\n'), ((872, 927), 'cv2.resize', 'cv2.resize', (['image', '(self.output_size, self.output_size)'], {}), '(image, (self.output_size, self.output_size))\n', (882, 927), False, 'import cv2\n'), ((1055, 1085), 'numpy.transpose', 'np.transpose', (['image', '(2, 0, 1)'], {}), '(image, (2, 0, 1))\n', (1067, 1085), True, 'import numpy as np\n'), ((5337, 5376), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['img', 'nrows'], {}), '(img, nrows)\n', (5364, 5376), False, 'import torchvision\n'), ((1678, 1708), 'torch.Tensor', 'torch.Tensor', (["sample['images']"], {}), "(sample['images'])\n", (1690, 1708), False, 'import torch\n'), ((1743, 1772), 'torch.Tensor', 'torch.Tensor', (["sample['smpls']"], {}), "(sample['smpls'])\n", (1755, 1772), False, 'import torch\n'), ((1957, 1987), 'torch.Tensor', 'torch.Tensor', (["sample['images']"], {}), "(sample['images'])\n", (1969, 1987), False, 'import torch\n'), ((2021, 2049), 'torch.Tensor', 'torch.Tensor', (["sample['smpl']"], {}), "(sample['smpl'])\n", (2033, 2049), False, 'import torch\n'), ((2082, 2109), 'torch.Tensor', 'torch.Tensor', (["sample['uvs']"], {}), "(sample['uvs'])\n", (2094, 2109), False, 'import torch\n'), ((2143, 2171), 'torch.Tensor', 'torch.Tensor', (["sample['mask']"], {}), "(sample['mask'])\n", (2155, 2171), False, 'import torch\n')]
|
import numpy as np
import pyastar
# The start and goal coordinates are in matrix coordinates (i, j).
start = (0, 0)
goal = (4, 4)
# The minimum cost must be 1 for the heuristic to be valid.
weights = np.array([[1, 3, 3, 3, 3],
[2, 1, 3, 3, 3],
[2, 2, 1, 3, 3],
[2, 2, 2, 1, 3],
[2, 2, 2, 2, 1]], dtype=np.float32)
print("Cost matrix:")
print(weights)
path = pyastar.astar_path(weights, start, goal, allow_diagonal=True)
# The path is returned as a numpy array of (i, j) coordinates.
print(f"Shortest path from {start} to {goal} found:")
print(path)
|
[
"pyastar.astar_path",
"numpy.array"
] |
[((203, 321), 'numpy.array', 'np.array', (['[[1, 3, 3, 3, 3], [2, 1, 3, 3, 3], [2, 2, 1, 3, 3], [2, 2, 2, 1, 3], [2, 2,\n 2, 2, 1]]'], {'dtype': 'np.float32'}), '([[1, 3, 3, 3, 3], [2, 1, 3, 3, 3], [2, 2, 1, 3, 3], [2, 2, 2, 1, 3\n ], [2, 2, 2, 2, 1]], dtype=np.float32)\n', (211, 321), True, 'import numpy as np\n'), ((441, 502), 'pyastar.astar_path', 'pyastar.astar_path', (['weights', 'start', 'goal'], {'allow_diagonal': '(True)'}), '(weights, start, goal, allow_diagonal=True)\n', (459, 502), False, 'import pyastar\n')]
|
#simulate the movement of the rogue AP and recieved RSSI values at the stationary
#APs based on the lognormal shadowing model
#Results will be written in a file to be read by the server to calculate the distance to the rogue AP
#Prx(d) = Prx(d0)-10*n*log(d/d0) + x(0, σ)
#rogue AP moves at a constant speed = 1m/sec
from time import sleep
from Crypto.Random import random
import math
from numpy import random as ff
AP1 = (16.6, 16.6)
AP2 = (16.6, 33.3)
AP3 = (33.3, 16.6)
AP4 = (33.3, 33.3)
#Rogue_loc = (random.randrange(1, 49), random.randrange(1, 49)) #initial location of the rogue AP
def distance(a, b):
return round(math.sqrt((b[0] - a[0])**2 +(b[1] - a[1])**2), 2)
def calcRSSI(AP, sigma, Rogue_loc):
if(AP == 1):
d = distance(AP1, Rogue_loc)
elif(AP == 2):
d = distance(AP2, Rogue_loc)
elif(AP == 3):
d = distance(AP3, Rogue_loc)
elif(AP == 4):
d = distance(AP4, Rogue_loc)
else:
print('Hmmm, did someone edit my code?')
return 0
if sigma == 0:
return(round(-40 -10*3*math.log10(d/1),2 ))
else:
return(round(-40 -10*3*math.log10(d/1)+ff.normal(0,sigma,1)[0],2 ))
def calcDistance(RSSI):
return(round(10**(-(RSSI+40)/30),2))
def exec(Rogue_loc, sigma):
f = open('simulation.txt', 'w')
direction = random.choice([0, 1, 2, 3]) #movement direction, 0=up, 1=right, 2=down, 3=left
step = 20 #change direction every x seconds
speed = 1 #m/s
'''stdev = input('please choose environment:\n 1:static\n2:semistatic\n3:somewhat dynamic\n4:highly dynamic\n')
if stdev == '1':
sigma = 0
elif stdev == '2':
sigma = 2
elif stdev =='3':
sigma = 4
elif stdev == '4':
sigma = 6'''
for i in range(0,300): #each second
for x in range(0, 10): #10 beacons/sec
if direction == 0 and Rogue_loc[0] != 0:
Rogue_loc = (round(Rogue_loc[0],2), round(Rogue_loc[1]+speed/10,2)) #move up
elif direction == 1:
Rogue_loc = (round(Rogue_loc[0]+speed/10, 2), round(Rogue_loc[1], 2)) #move right
elif direction == 2:
Rogue_loc = (round(Rogue_loc[0], 2), round(Rogue_loc[1]-speed/10,2)) #move down
elif direction == 3:
Rogue_loc = (round(Rogue_loc[0]-speed/10, 2), round(Rogue_loc[1], 2)) #move left
if Rogue_loc[0] == 0 or Rogue_loc[0] == 50 or Rogue_loc[1] == 0 or Rogue_loc[1] == 50: #correct movement direction in case it goes out of 50*50 range
direction = (direction + 2) % 4
f.write(str(Rogue_loc[0])+' '+str(Rogue_loc[1])+' '+str(calcRSSI(1, sigma,Rogue_loc))+' '+str(calcRSSI(2, sigma,Rogue_loc))+' '+str(calcRSSI(3, sigma,Rogue_loc))+' '+ str(calcRSSI(4, sigma,Rogue_loc))+'\n')
'''
print('Distance from AP1 ', calcDistance(calcRSSI(distance(Rogue_loc, AP1))), 'real distance = ', distance(Rogue_loc, AP1))
print('Distance from AP2 ', calcDistance(calcRSSI(distance(Rogue_loc, AP2))), 'real distance = ', distance(Rogue_loc, AP2))
print('Distance from AP3 ', calcDistance(calcRSSI(distance(Rogue_loc, AP3))), 'real distance = ', distance(Rogue_loc, AP3))
print('Distance from AP4 ', calcDistance(calcRSSI(distance(Rogue_loc, AP4))), 'real distance = ', distance(Rogue_loc, AP4))
print(Rogue_loc)
print(direction)'''
if i % random.randrange(10, 25) == 0: #change direction at random intervals
direction = random.choice([0, 1, 2, 3])
f.close()
|
[
"math.sqrt",
"Crypto.Random.random.randrange",
"Crypto.Random.random.choice",
"math.log10",
"numpy.random.normal"
] |
[((1323, 1350), 'Crypto.Random.random.choice', 'random.choice', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (1336, 1350), False, 'from Crypto.Random import random\n'), ((628, 678), 'math.sqrt', 'math.sqrt', (['((b[0] - a[0]) ** 2 + (b[1] - a[1]) ** 2)'], {}), '((b[0] - a[0]) ** 2 + (b[1] - a[1]) ** 2)\n', (637, 678), False, 'import math\n'), ((3564, 3591), 'Crypto.Random.random.choice', 'random.choice', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (3577, 3591), False, 'from Crypto.Random import random\n'), ((3471, 3495), 'Crypto.Random.random.randrange', 'random.randrange', (['(10)', '(25)'], {}), '(10, 25)\n', (3487, 3495), False, 'from Crypto.Random import random\n'), ((1067, 1084), 'math.log10', 'math.log10', (['(d / 1)'], {}), '(d / 1)\n', (1077, 1084), False, 'import math\n'), ((1145, 1167), 'numpy.random.normal', 'ff.normal', (['(0)', 'sigma', '(1)'], {}), '(0, sigma, 1)\n', (1154, 1167), True, 'from numpy import random as ff\n'), ((1129, 1146), 'math.log10', 'math.log10', (['(d / 1)'], {}), '(d / 1)\n', (1139, 1146), False, 'import math\n')]
|
#!/usr/bin/python
# encoding: utf-8
import random
import os
import torch
from PIL import Image
import numpy as np
from utils import *
import cv2
def scale_image_channel(im, c, v):
cs = list(im.split())
cs[c] = cs[c].point(lambda i: i * v)
out = Image.merge(im.mode, tuple(cs))
return out
def distort_image(im, hue, sat, val):
im = im.convert('HSV')
cs = list(im.split())
cs[1] = cs[1].point(lambda i: i * sat)
cs[2] = cs[2].point(lambda i: i * val)
def change_hue(x):
x += hue*255
if x > 255:
x -= 255
if x < 0:
x += 255
return x
cs[0] = cs[0].point(change_hue)
im = Image.merge(im.mode, tuple(cs))
im = im.convert('RGB')
#constrain_image(im)
return im
def rand_scale(s):
scale = random.uniform(1, s)
if(random.randint(1,10000)%2):
return scale
return 1./scale
def random_distort_image(im, dhue, dsat, dexp):
res = distort_image(im, dhue, dsat, dexp)
return res
def data_augmentation(clip, shape, jitter, hue, saturation, exposure):
# Initialize Random Variables
oh = clip[0].height
ow = clip[0].width
dw =int(ow*jitter)
dh =int(oh*jitter)
pleft = random.randint(-dw, dw)
pright = random.randint(-dw, dw)
ptop = random.randint(-dh, dh)
pbot = random.randint(-dh, dh)
swidth = ow - pleft - pright
sheight = oh - ptop - pbot
sx = float(swidth) / ow
sy = float(sheight) / oh
dx = (float(pleft)/ow)/sx
dy = (float(ptop) /oh)/sy
flip = random.randint(1,10000)%2
dhue = random.uniform(-hue, hue)
dsat = rand_scale(saturation)
dexp = rand_scale(exposure)
# Augment
cropped = [img.crop((pleft, ptop, pleft + swidth - 1, ptop + sheight - 1)) for img in clip]
sized = [img.resize(shape) for img in cropped]
if flip:
sized = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in sized]
clip = [random_distort_image(img, dhue, dsat, dexp) for img in sized]
return clip, flip, dx, dy, sx, sy
# this function works for obtaining new labels after data augumentation
def fill_truth_detection(labpath, w, h, flip, dx, dy, sx, sy):
max_boxes = 50
label = np.zeros((max_boxes,5))
if os.path.getsize(labpath):
bs = np.loadtxt(labpath)
if bs is None:
return label
bs = np.reshape(bs, (-1, 5))
for i in range(bs.shape[0]):
cx = (bs[i][1] + bs[i][3]) / (2 * 320)
cy = (bs[i][2] + bs[i][4]) / (2 * 240)
imgw = (bs[i][3] - bs[i][1]) / 320
imgh = (bs[i][4] - bs[i][2]) / 240
bs[i][0] = bs[i][0] - 1
bs[i][1] = cx
bs[i][2] = cy
bs[i][3] = imgw
bs[i][4] = imgh
cc = 0
for i in range(bs.shape[0]):
x1 = bs[i][1] - bs[i][3]/2
y1 = bs[i][2] - bs[i][4]/2
x2 = bs[i][1] + bs[i][3]/2
y2 = bs[i][2] + bs[i][4]/2
x1 = min(0.999, max(0, x1 * sx - dx))
y1 = min(0.999, max(0, y1 * sy - dy))
x2 = min(0.999, max(0, x2 * sx - dx))
y2 = min(0.999, max(0, y2 * sy - dy))
bs[i][1] = (x1 + x2)/2
bs[i][2] = (y1 + y2)/2
bs[i][3] = (x2 - x1)
bs[i][4] = (y2 - y1)
if flip:
bs[i][1] = 0.999 - bs[i][1]
if bs[i][3] < 0.001 or bs[i][4] < 0.001:
continue
label[cc] = bs[i]
cc += 1
if cc >= 50:
break
label = np.reshape(label, (-1))
return label
def load_data_detection(base_path, imgpath, train, train_dur, shape, dataset_use='ucf101-24', jitter=0.2, hue=0.1, saturation=1.5, exposure=1.5):
# clip loading and data augmentation
# if dataset_use == 'ucf101-24':
# base_path = "/usr/home/sut/datasets/ucf24"
# else:
# base_path = "/usr/home/sut/Tim-Documents/jhmdb/data/jhmdb"
im_split = imgpath.split('/')
num_parts = len(im_split)
im_ind = int(im_split[num_parts-1][0:5])
labpath = os.path.join(base_path, 'labels', im_split[0], im_split[1] ,'{:05d}.txt'.format(im_ind))
img_folder = os.path.join(base_path, 'rgb-images', im_split[0], im_split[1])
if dataset_use == 'ucf101-24':
max_num = len(os.listdir(img_folder))
else:
max_num = len(os.listdir(img_folder)) - 1
clip = []
### We change downsampling rate throughout training as a ###
### temporal augmentation, which brings around 1-2 frame ###
### mAP. During test time it is set to 1. ###
d = 1
if train:
d = random.randint(1, 2)
for i in reversed(range(train_dur)):
# make it as a loop
i_temp = im_ind - i * d
while i_temp < 1:
i_temp = max_num + i_temp
while i_temp > max_num:
i_temp = i_temp - max_num
if dataset_use == 'ucf101-24':
path_tmp = os.path.join(base_path, 'rgb-images', im_split[0], im_split[1] ,'{:05d}.jpg'.format(i_temp))
else:
path_tmp = os.path.join(base_path, 'rgb-images', im_split[0], im_split[1] ,'{:05d}.png'.format(i_temp))
clip.append(Image.open(path_tmp).convert('RGB'))
if train: # Apply augmentation
clip,flip,dx,dy,sx,sy = data_augmentation(clip, shape, jitter, hue, saturation, exposure)
label = fill_truth_detection(labpath, clip[0].width, clip[0].height, flip, dx, dy, 1./sx, 1./sy)
label = torch.from_numpy(label)
else: # No augmentation
label = torch.zeros(50*5)
try:
tmp = torch.from_numpy(read_truths_args(labpath, 8.0/clip[0].width).astype('float32'))
except Exception:
tmp = torch.zeros(1,5)
tmp = tmp.view(-1)
tsz = tmp.numel()
if tsz > 50*5:
label = tmp[0:50*5]
elif tsz > 0:
label[0:tsz] = tmp
if train:
return clip, label
else:
return im_split[0] + '_' +im_split[1] + '_' + im_split[2], clip, label
def load_data_detection_test(root, imgpath, train_dur, num_samples):
clip,label = get_clip(root, imgpath, train_dur, num_samples)
return clip, label
def get_clip(root, imgpath, train_dur, num_samples):
im_split = imgpath.split('/')
num_parts = len(im_split)
im_ind = int(im_split[num_parts - 1][0:5])
# for UCF101 dataset
base_path = "/usr/home/sut/datasets/ucf24"
labpath = os.path.join(base_path, 'labels', im_split[6], im_split[7], '{:05d}.txt'.format(im_ind))
img_folder = os.path.join(base_path, 'rgb-images', im_split[6], im_split[7])
# for arbitrary videos
max_num = len(os.listdir(img_folder))
clip = []
for i in reversed(range(train_dur)):
# the clip is created with the trained sample(image) being placed as the last image and 7 adjacent images before it
i_temp = im_ind - i
if i_temp < 1:
i_temp = 1
if i_temp > max_num:
i_temp = max_num
path_tmp = os.path.join(base_path, 'rgb-images', im_split[6], im_split[7] ,'{:05d}.jpg'.format(i_temp))
clip.append(Image.open(path_tmp).convert('RGB'))
label = torch.zeros(50 * 5)
tmp = torch.zeros(1, 5)
tmp = tmp.view(-1)
tsz = tmp.numel()
if tsz > 50 * 5:
label = tmp[0:50 * 5]
elif tsz > 0:
label[0:tsz] = tmp
return clip, label
|
[
"random.randint",
"random.uniform",
"os.path.getsize",
"numpy.zeros",
"PIL.Image.open",
"numpy.reshape",
"numpy.loadtxt",
"torch.zeros",
"os.path.join",
"os.listdir",
"torch.from_numpy"
] |
[((807, 827), 'random.uniform', 'random.uniform', (['(1)', 's'], {}), '(1, s)\n', (821, 827), False, 'import random\n'), ((1240, 1263), 'random.randint', 'random.randint', (['(-dw)', 'dw'], {}), '(-dw, dw)\n', (1254, 1263), False, 'import random\n'), ((1277, 1300), 'random.randint', 'random.randint', (['(-dw)', 'dw'], {}), '(-dw, dw)\n', (1291, 1300), False, 'import random\n'), ((1314, 1337), 'random.randint', 'random.randint', (['(-dh)', 'dh'], {}), '(-dh, dh)\n', (1328, 1337), False, 'import random\n'), ((1351, 1374), 'random.randint', 'random.randint', (['(-dh)', 'dh'], {}), '(-dh, dh)\n', (1365, 1374), False, 'import random\n'), ((1615, 1640), 'random.uniform', 'random.uniform', (['(-hue)', 'hue'], {}), '(-hue, hue)\n', (1629, 1640), False, 'import random\n'), ((2243, 2267), 'numpy.zeros', 'np.zeros', (['(max_boxes, 5)'], {}), '((max_boxes, 5))\n', (2251, 2267), True, 'import numpy as np\n'), ((2274, 2298), 'os.path.getsize', 'os.path.getsize', (['labpath'], {}), '(labpath)\n', (2289, 2298), False, 'import os\n'), ((3638, 3659), 'numpy.reshape', 'np.reshape', (['label', '(-1)'], {}), '(label, -1)\n', (3648, 3659), True, 'import numpy as np\n'), ((4269, 4332), 'os.path.join', 'os.path.join', (['base_path', '"""rgb-images"""', 'im_split[0]', 'im_split[1]'], {}), "(base_path, 'rgb-images', im_split[0], im_split[1])\n", (4281, 4332), False, 'import os\n'), ((6651, 6714), 'os.path.join', 'os.path.join', (['base_path', '"""rgb-images"""', 'im_split[6]', 'im_split[7]'], {}), "(base_path, 'rgb-images', im_split[6], im_split[7])\n", (6663, 6714), False, 'import os\n'), ((7289, 7308), 'torch.zeros', 'torch.zeros', (['(50 * 5)'], {}), '(50 * 5)\n', (7300, 7308), False, 'import torch\n'), ((7319, 7336), 'torch.zeros', 'torch.zeros', (['(1)', '(5)'], {}), '(1, 5)\n', (7330, 7336), False, 'import torch\n'), ((835, 859), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (849, 859), False, 'import random\n'), ((1577, 1601), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (1591, 1601), False, 'import random\n'), ((2313, 2332), 'numpy.loadtxt', 'np.loadtxt', (['labpath'], {}), '(labpath)\n', (2323, 2332), True, 'import numpy as np\n'), ((2394, 2417), 'numpy.reshape', 'np.reshape', (['bs', '(-1, 5)'], {}), '(bs, (-1, 5))\n', (2404, 2417), True, 'import numpy as np\n'), ((4722, 4742), 'random.randint', 'random.randint', (['(1)', '(2)'], {}), '(1, 2)\n', (4736, 4742), False, 'import random\n'), ((5578, 5601), 'torch.from_numpy', 'torch.from_numpy', (['label'], {}), '(label)\n', (5594, 5601), False, 'import torch\n'), ((5647, 5666), 'torch.zeros', 'torch.zeros', (['(50 * 5)'], {}), '(50 * 5)\n', (5658, 5666), False, 'import torch\n'), ((6761, 6783), 'os.listdir', 'os.listdir', (['img_folder'], {}), '(img_folder)\n', (6771, 6783), False, 'import os\n'), ((4390, 4412), 'os.listdir', 'os.listdir', (['img_folder'], {}), '(img_folder)\n', (4400, 4412), False, 'import os\n'), ((4446, 4468), 'os.listdir', 'os.listdir', (['img_folder'], {}), '(img_folder)\n', (4456, 4468), False, 'import os\n'), ((5821, 5838), 'torch.zeros', 'torch.zeros', (['(1)', '(5)'], {}), '(1, 5)\n', (5832, 5838), False, 'import torch\n'), ((5286, 5306), 'PIL.Image.open', 'Image.open', (['path_tmp'], {}), '(path_tmp)\n', (5296, 5306), False, 'from PIL import Image\n'), ((7239, 7259), 'PIL.Image.open', 'Image.open', (['path_tmp'], {}), '(path_tmp)\n', (7249, 7259), False, 'from PIL import Image\n')]
|
import logging
import os
import numpy as np
import torch
from tensorboardX import SummaryWriter
from torch.optim.lr_scheduler import ReduceLROnPlateau
from . import utils
from tqdm import tqdm
from unet3d.utils import unpad_eval
class UNet3DTrainer:
"""3D UNet trainer.
Args:
model (Unet3D): UNet 3D model to be trained
optimizer (nn.optim.Optimizer): optimizer used for training
lr_scheduler (torch.optim.lr_scheduler._LRScheduler): learning rate scheduler
WARN: bear in mind that lr_scheduler.step() is invoked after every validation step
(i.e. validate_after_iters) not after every epoch. So e.g. if one uses StepLR with step_size=30
the learning rate will be adjusted after every 30 * validate_after_iters iterations.
loss_criterion (callable): loss function
eval_criterion (callable): used to compute training/validation metric (such as Dice, IoU, AP or Rand score)
saving the best checkpoint is based on the result of this function on the validation set
device (torch.device): device to train on
loaders (dict): 'train' and 'val' loaders
checkpoint_dir (string): dir for saving checkpoints and tensorboard logs
max_num_epochs (int): maximum number of epochs
max_num_iterations (int): maximum number of iterations
validate_after_iters (int): validate after that many iterations
log_after_iters (int): number of iterations before logging to tensorboard
validate_iters (int): number of validation iterations, if None validate
on the whole validation set
eval_score_higher_is_better (bool): if True higher eval scores are considered better
best_eval_score (float): best validation score so far (higher better)
num_iterations (int): useful when loading the model from the checkpoint
num_epoch (int): useful when loading the model from the checkpoint
"""
def __init__(self, model, optimizer, lr_scheduler, loss_criterion,
eval_criterion, device, loaders, checkpoint_dir,
max_num_epochs=100, max_num_iterations=1e5,
validate_after_iters=100, log_after_iters=100,
validate_iters=None, num_iterations=1, num_epoch=0,
eval_score_higher_is_better=True, best_eval_score=None,
logger=None):
if logger is None:
self.logger = utils.get_logger('UNet3DTrainer', level=logging.DEBUG)
else:
self.logger = logger
self.logger.info(model)
self.model = model
self.optimizer = optimizer
self.scheduler = lr_scheduler
self.loss_criterion = loss_criterion
self.eval_criterion = eval_criterion
self.device = device
self.loaders = loaders
self.checkpoint_dir = checkpoint_dir
self.max_num_epochs = max_num_epochs
self.max_num_iterations = max_num_iterations
self.validate_after_iters = validate_after_iters
self.log_after_iters = log_after_iters
self.validate_iters = validate_iters
self.eval_score_higher_is_better = eval_score_higher_is_better
logger.info(f'eval_score_higher_is_better: {eval_score_higher_is_better}')
if best_eval_score is not None:
self.best_eval_score = best_eval_score
else:
# initialize the best_eval_score
if eval_score_higher_is_better:
self.best_eval_score = float('-inf')
else:
self.best_eval_score = float('+inf')
self.writer = SummaryWriter(logdir=os.path.join(checkpoint_dir, 'logs'))
self.num_iterations = num_iterations
self.num_epoch = num_epoch
@classmethod
def from_checkpoint(cls, checkpoint_path, model, optimizer, lr_scheduler, loss_criterion, eval_criterion, loaders,
logger=None):
logger.info(f"Loading checkpoint '{checkpoint_path}'...")
state = utils.load_checkpoint(checkpoint_path, model, optimizer)
logger.info(
f"Checkpoint loaded. Epoch: {state['epoch']}. Best val score: {state['best_eval_score']}. Num_iterations: {state['num_iterations']}")
checkpoint_dir = os.path.split(checkpoint_path)[0]
return cls(model, optimizer, lr_scheduler,
loss_criterion, eval_criterion,
torch.device(state['device']),
loaders, checkpoint_dir,
eval_score_higher_is_better=state['eval_score_higher_is_better'],
best_eval_score=state['best_eval_score'],
num_iterations=state['num_iterations'],
num_epoch=state['epoch'],
max_num_epochs=state['max_num_epochs'],
max_num_iterations=state['max_num_iterations'],
validate_after_iters=state['validate_after_iters'],
log_after_iters=state['log_after_iters'],
validate_iters=state['validate_iters'],
logger=logger)
@classmethod
def from_pretrained(cls, pre_trained, model, optimizer, lr_scheduler, loss_criterion, eval_criterion,
device, loaders,
max_num_epochs=100, max_num_iterations=1e5,
validate_after_iters=100, log_after_iters=100,
validate_iters=None, num_iterations=1, num_epoch=0,
eval_score_higher_is_better=True, best_eval_score=None,
logger=None):
logger.info(f"Logging pre-trained model from '{pre_trained}'...")
utils.load_checkpoint(pre_trained, model, None)
checkpoint_dir = os.path.split(pre_trained)[0]
return cls(model, optimizer, lr_scheduler,
loss_criterion, eval_criterion,
device, loaders, checkpoint_dir,
eval_score_higher_is_better=eval_score_higher_is_better,
best_eval_score=best_eval_score,
num_iterations=num_iterations,
num_epoch=num_epoch,
max_num_epochs=max_num_epochs,
max_num_iterations=max_num_iterations,
validate_after_iters=validate_after_iters,
log_after_iters=log_after_iters,
validate_iters=validate_iters,
logger=logger)
def fit(self):
for epoch in range(self.num_epoch, self.max_num_epochs):
# train for one epoch
self.logger.info('Start Epoch: {}, lr = {}'.format(epoch, self.optimizer.param_groups[0]['lr']))
should_terminate = self.train(self.loaders['train'])
if epoch % 1 == 0:
# evaluate on validation set
eval_score = self.validate(self.loaders['val'])
# adjust learning rate if necessary
if isinstance(self.scheduler, ReduceLROnPlateau):
self.scheduler.step(eval_score)
elif not isinstance(self.scheduler,torch.optim.lr_scheduler.CyclicLR):
self.scheduler.step()
# log current learning rate in tensorboard
self._log_lr()
# remember best validation metric
is_best = self._is_best_eval_score(eval_score)
# save checkpoint
self._save_checkpoint(is_best)
if should_terminate:
break
self.num_epoch += 1
def train(self, train_loader):
"""Trains the model for 1 epoch.
Args:
train_loader (torch.utils.data.DataLoader): training data loader
Returns:
True if the training should be terminated immediately, False otherwise
"""
train_losses = utils.RunningAverage()
# train_eval_scores = utils.RunningAverage()
# sets the model in training mode
self.model.train()
for i, t in enumerate(tqdm(train_loader)):
# self.logger.info(
# f'Training iteration {self.num_iterations}. Batch {i}. Epoch [{self.num_epoch}/{self.max_num_epochs - 1}]')
#input, target, weight, GP = self._split_training_batch(t)
input, target, weight, slices, zyx, GP = self._split_training_batch(t)
#output, loss = self._forward_pass(input, target, weight, GP)
output, loss = self._forward_pass(input, target, zyx, weight = weight,slices = slices,GP = GP)
train_losses.update(loss.item(), self._batch_size(input))
# compute gradients and update parameters
self.optimizer.zero_grad()
loss.backward()
if isinstance(self.scheduler,torch.optim.lr_scheduler.CyclicLR):
self.scheduler.step()
self.optimizer.step()
#print(self.optimizer.param_groups[0]['lr'])
# if self.num_iterations % self.log_after_iters == 0:
# # if model contains final_activation layer for normalizing logits apply it, otherwise both
# # the evaluation metric as well as images in tensorboard will be incorrectly computed
# if hasattr(self.model, 'final_activation'):
# output = self.model.final_activation(output)
#
# # compute eval criterion
# eval_score = self.eval_criterion(output, target)
# train_eval_scores.update(eval_score.item(), self._batch_size(input))
#
# # log stats, params and images
# self.logger.info(
# f'Training stats. Loss: {train_losses.avg}. Evaluation score: {train_eval_scores.avg}')
# self._log_stats('train', train_losses.avg, train_eval_scores.avg)
# self._log_params()
# self._log_images(input, target, output)
if self.max_num_iterations < self.num_iterations:
self.logger.info(
f'Maximum number of iterations {self.max_num_iterations} exceeded. Finishing training...')
return True
self.num_iterations += 1
self.logger.info(f'Train Loss: {train_losses.avg}')
return False
def validate(self, val_loaders):
#self.logger.info('Validating...')
val_losses = utils.RunningAverage()
val_scores = utils.RunningAverage()
try:
# set the model in evaluation mode; final_activation doesn't need to be called explicitly
self.model.eval()
with torch.no_grad():
for val_loader in tqdm(val_loaders):
ds = val_loader.dataset
nD,nH,nW = ds.zz,ds.yy,ds.xx
nC = self.model.out_channels
# initialize the output prediction arrays
prediction_map = torch.zeros(size = (nC,nD,nH,nW),dtype = torch.float32).to(self.device)
# initialize normalization mask in order to average out probabilities of overlapping patches
normalization_mask = torch.zeros(size = (nC,nD,nH,nW),dtype = torch.float32).to(self.device)
gt_label = torch.from_numpy(ds.labels).long().to(self.device)
for i, t in enumerate(val_loader):
#self.logger.info(f'Validation iteration {i}')
#input, target, weight, GP = self._split_training_batch(t)
input, target, weight, slices, zyx, GP = self._split_training_batch(t)
# output, loss = self._forward_pass(input, target, weight, GP)
output, loss = self._forward_pass(input, target, zyx, weight = weight,slices = slices,GP = GP)
val_losses.update(loss.item(), self._batch_size(input))
output = self.model.final_activation(output)
for nB in range(output.size(0)):
slice_pred = (slice(0, nC,),) + slices[nB][1:] # remove channel slice and add class slice at beginning
prob = output[nB]
if self.eval_criterion.pad_width is not None:
prob, slice_pred = unpad_eval(prob, slice_pred, shape = (nD,nH,nW),pad_width= self.eval_criterion.pad_width)
prediction_map[slice_pred] += prob
normalization_mask[slice_pred] += 1.0
if self.validate_iters is not None and self.validate_iters <= i:
# stop validation
break
# one case merge predict
prediction_map = prediction_map / normalization_mask
eval_score = self.eval_criterion(prediction_map[None,:], gt_label[None,:])
val_scores.update(eval_score.item())
self._log_stats('val', val_losses.avg, val_scores.avg)
self.logger.info(f'Validation Loss: {val_losses.avg}. Evaluation score: {val_scores.avg}')
return val_scores.avg
finally:
# set back in training mode
self.model.train()
def _split_training_batch(self, t):
# def _move_to_device(input):
# if isinstance(input, tuple) or isinstance(input, list):
# return tuple([_move_to_device(x) for x in input])
# elif input is None:
# return None
# else:
# return input.to(self.device)
#
# t = _move_to_device(t)
# weight, GP = None,None
#
#
# if len(t) == 2:
# input, target = t
# return input, target, weight, None
# elif len(t)==3:
# input, target, weight = t
# return input, target, weight, None
# else:
# input, target, weight, GP = t
#
# return input, target, weight,GP
if len(t) == 5:
input, target,weight, slices,zyx = t
input = input.to(self.device)
target = target.to(self.device)
if weight is not None:
weight = weight.to(self.device)
return input, target, weight, slices, zyx,None
elif len(t)==6:
input, target,weight, slices,zyx,GP = t
input = input.to(self.device)
target = target.to(self.device)
if weight is not None:
weight = weight.to(self.device)
if GP is not None:
GP = GP.to(self.device)
return input, target, weight, slices, zyx,GP
# def _forward_pass(self, input, target, weight=None, GP = None):
# # forward pass
# if GP is None:
# output = self.model(input)
# else:
# output = self.model(input, GP = GP)
## target = target.float()
#
# if input.dim() == target.dim()+1:
# # expand-dims =false, set uint8 ->long
# target = target.long()
#
#
# if isinstance(self.loss_criterion,list):
# loss = 0.0
# for crit in self.loss_criterion:
# if weight is None:
# loss += crit(output, target)
# else:
# weight = weight.float()
# loss += crit(output, target, weight)
#
#
# else: # compute the loss
# if weight is None:
# loss = self.loss_criterion(output, target)
# else:
# weight = weight.float()
# loss = self.loss_criterion(output, target, weight)
#
# return output, loss
def _forward_pass(self, input, target, zyx, weight=None,slices=None,GP = None):
# forward pass
if GP is None:
output = self.model(input)
else:
output = self.model(input, GP = GP)
# target = target.float()
if input.dim() == target.dim()+1:
# expand-dims =false, set uint8 ->long
target = target.long()
if weight is not None:
weight = weight.float()
if isinstance(self.loss_criterion,list):
loss = 0.0
for crit in self.loss_criterion:
loss += crit(output, target,shape = zyx, weight = weight, slices = slices)
# if weight is None:
# loss += crit(output, target)
# else:
# weight = weight.float()
# loss += crit(output, target, weight)
else: # compute the loss
loss = self.loss_criterion(output, target,shape = zyx, weight = weight, slices = slices)
# if weight is None:
# loss = self.loss_criterion(output, target)
# else:
# weight = weight.float()
# loss = self.loss_criterion(output, target, weight)
return output, loss
def _is_best_eval_score(self, eval_score):
if self.eval_score_higher_is_better:
is_best = eval_score > self.best_eval_score
else:
is_best = eval_score < self.best_eval_score
if is_best:
self.logger.info(f'Saving new best evaluation metric: {eval_score}')
self.best_eval_score = eval_score
return is_best
def _save_checkpoint(self, is_best):
utils.save_checkpoint({
'epoch': self.num_epoch + 1,
'num_iterations': self.num_iterations,
'model_state_dict': self.model.state_dict(),
'best_eval_score': self.best_eval_score,
'eval_score_higher_is_better': self.eval_score_higher_is_better,
'optimizer_state_dict': self.optimizer.state_dict(),
'device': str(self.device),
'max_num_epochs': self.max_num_epochs,
'max_num_iterations': self.max_num_iterations,
'validate_after_iters': self.validate_after_iters,
'log_after_iters': self.log_after_iters,
'validate_iters': self.validate_iters
}, is_best, checkpoint_dir=self.checkpoint_dir,
logger=self.logger)
def _log_lr(self):
lr = self.optimizer.param_groups[0]['lr']
self.writer.add_scalar('learning_rate', lr, self.num_iterations)
def _log_stats(self, phase, loss_avg, eval_score_avg):
tag_value = {
f'{phase}_loss_avg': loss_avg,
f'{phase}_eval_score_avg': eval_score_avg
}
for tag, value in tag_value.items():
self.writer.add_scalar(tag, value, self.num_iterations)
def _log_params(self):
self.logger.info('Logging model parameters and gradients')
for name, value in self.model.named_parameters():
self.writer.add_histogram(name, value.data.cpu().numpy(), self.num_iterations)
self.writer.add_histogram(name + '/grad', value.grad.data.cpu().numpy(), self.num_iterations)
def _log_images(self, input, target, prediction):
inputs_map = {
'inputs': input,
'targets': target,
'predictions': prediction
}
img_sources = {}
for name, batch in inputs_map.items():
if isinstance(batch, list) or isinstance(batch, tuple):
for i, b in enumerate(batch):
img_sources[f'{name}{i}'] = b.data.cpu().numpy()
else:
img_sources[name] = batch.data.cpu().numpy()
for name, batch in img_sources.items():
for tag, image in self._images_from_batch(name, batch):
self.writer.add_image(tag, image, self.num_iterations, dataformats='HW')
def _images_from_batch(self, name, batch):
tag_template = '{}/batch_{}/channel_{}/slice_{}'
tagged_images = []
if batch.ndim == 5:
# NCDHW
slice_idx = batch.shape[2] // 2 # get the middle slice
for batch_idx in range(batch.shape[0]):
for channel_idx in range(batch.shape[1]):
tag = tag_template.format(name, batch_idx, channel_idx, slice_idx)
img = batch[batch_idx, channel_idx, slice_idx, ...]
tagged_images.append((tag, self._normalize_img(img)))
else:
# batch has no channel dim: NDHW
slice_idx = batch.shape[1] // 2 # get the middle slice
for batch_idx in range(batch.shape[0]):
tag = tag_template.format(name, batch_idx, 0, slice_idx)
img = batch[batch_idx, slice_idx, ...]
tagged_images.append((tag, self._normalize_img(img)))
return tagged_images
@staticmethod
def _normalize_img(img):
return (img - np.min(img)) / np.ptp(img)
@staticmethod
def _batch_size(input):
if isinstance(input, list) or isinstance(input, tuple):
return input[0].size(0)
else:
return input.size(0)
|
[
"tqdm.tqdm",
"torch.no_grad",
"unet3d.utils.unpad_eval",
"numpy.ptp",
"numpy.min",
"torch.device",
"torch.zeros",
"os.path.split",
"os.path.join",
"torch.from_numpy"
] |
[((4275, 4305), 'os.path.split', 'os.path.split', (['checkpoint_path'], {}), '(checkpoint_path)\n', (4288, 4305), False, 'import os\n'), ((4430, 4459), 'torch.device', 'torch.device', (["state['device']"], {}), "(state['device'])\n", (4442, 4459), False, 'import torch\n'), ((5759, 5785), 'os.path.split', 'os.path.split', (['pre_trained'], {}), '(pre_trained)\n', (5772, 5785), False, 'import os\n'), ((8143, 8161), 'tqdm.tqdm', 'tqdm', (['train_loader'], {}), '(train_loader)\n', (8147, 8161), False, 'from tqdm import tqdm\n'), ((21475, 21486), 'numpy.ptp', 'np.ptp', (['img'], {}), '(img)\n', (21481, 21486), True, 'import numpy as np\n'), ((3650, 3686), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""logs"""'], {}), "(checkpoint_dir, 'logs')\n", (3662, 3686), False, 'import os\n'), ((10778, 10793), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10791, 10793), False, 'import torch\n'), ((10829, 10846), 'tqdm.tqdm', 'tqdm', (['val_loaders'], {}), '(val_loaders)\n', (10833, 10846), False, 'from tqdm import tqdm\n'), ((21460, 21471), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (21466, 21471), True, 'import numpy as np\n'), ((11111, 11166), 'torch.zeros', 'torch.zeros', ([], {'size': '(nC, nD, nH, nW)', 'dtype': 'torch.float32'}), '(size=(nC, nD, nH, nW), dtype=torch.float32)\n', (11122, 11166), False, 'import torch\n'), ((11337, 11392), 'torch.zeros', 'torch.zeros', ([], {'size': '(nC, nD, nH, nW)', 'dtype': 'torch.float32'}), '(size=(nC, nD, nH, nW), dtype=torch.float32)\n', (11348, 11392), False, 'import torch\n'), ((12643, 12737), 'unet3d.utils.unpad_eval', 'unpad_eval', (['prob', 'slice_pred'], {'shape': '(nD, nH, nW)', 'pad_width': 'self.eval_criterion.pad_width'}), '(prob, slice_pred, shape=(nD, nH, nW), pad_width=self.\n eval_criterion.pad_width)\n', (12653, 12737), False, 'from unet3d.utils import unpad_eval\n'), ((11461, 11488), 'torch.from_numpy', 'torch.from_numpy', (['ds.labels'], {}), '(ds.labels)\n', (11477, 11488), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
import h5py
import yaml
from collections import UserDict
from datetime import datetime
from numpy import string_
from contextlib import contextmanager
TYPEID = '_type_'
@contextmanager
def hdf_file(hdf, lazy=True, *args, **kwargs):
"""Context manager yields h5 file if hdf is str,
otherwise just yield hdf as is."""
if isinstance(hdf, str):
if not lazy:
with h5py.File(hdf, *args, **kwargs) as hdf:
yield hdf
else:
yield h5py.File(hdf, *args, **kwargs)
else:
yield hdf
def unpack_dataset(item):
"""Reconstruct a hdfdict dataset.
Only some special unpacking for yaml and datetime types.
Parameters
----------
item : h5py.Dataset
Returns
-------
value : Unpacked Data
"""
value = item[()]
if TYPEID in item.attrs:
if item.attrs[TYPEID].astype(str) == 'datetime':
if hasattr(value, '__iter__'):
value = [datetime.fromtimestamp(
ts) for ts in value]
else:
value = datetime.fromtimestamp(value)
if item.attrs[TYPEID].astype(str) == 'yaml':
value = yaml.safe_load(value.decode())
return value
class LazyHdfDict(UserDict):
"""Helps loading data only if values from the dict are requested.
This is done by reimplementing the __getitem__ method.
"""
def __init__(self, _h5file=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._h5file = _h5file # used to close the file on deletion.
def __getitem__(self, key):
"""Returns item and loads dataset if needed."""
item = super().__getitem__(key)
if isinstance(item, h5py.Dataset):
item = unpack_dataset(item)
self.__setitem__(key, item)
return item
def unlazy(self):
"""Unpacks all datasets.
You can call dict(this_instance) then to get a real dict.
"""
load(self, lazy=False)
def close(self):
"""Closes the h5file if provided at initialization."""
if self._h5file and hasattr(self._h5file, 'close'):
self._h5file.close()
def __del__(self):
self.close()
def _ipython_key_completions_(self):
"""Returns a tuple of keys.
Special Method for ipython to get key completion
"""
return tuple(self.keys())
def load(hdf, lazy=True, unpacker=unpack_dataset, *args, **kwargs):
"""Returns a dictionary containing the
groups as keys and the datasets as values
from given hdf file.
Parameters
----------
hdf : string (path to file) or `h5py.File()` or `h5py.Group()`
lazy : bool
If True, the datasets are lazy loaded at the moment an item is requested.
upacker : callable
Unpack function gets `value` of type h5py.Dataset.
Must return the data you would like to have it in the returned dict.
Returns
-------
d : dict
The dictionary containing all groupnames as keys and
datasets as values.
"""
def _recurse(hdfobject, datadict):
for key, value in hdfobject.items():
if type(value) == h5py.Group or isinstance(value, LazyHdfDict):
if lazy:
datadict[key] = LazyHdfDict()
else:
datadict[key] = {}
datadict[key] = _recurse(value, datadict[key])
elif isinstance(value, h5py.Dataset):
if not lazy:
value = unpacker(value)
datadict[key] = value
return datadict
with hdf_file(hdf, lazy=lazy, *args, **kwargs) as hdf:
if lazy:
data = LazyHdfDict(_h5file=hdf)
else:
data = {}
return _recurse(hdf, data)
def pack_dataset(hdfobject, key, value):
"""Packs a given key value pair into a dataset in the given hdfobject."""
isdt = None
if isinstance(value, datetime):
value = value.timestamp()
isdt = True
if hasattr(value, '__iter__'):
if all(isinstance(i, datetime) for i in value):
value = [item.timestamp() for item in value]
isdt = True
try:
ds = hdfobject.create_dataset(name=key, data=value)
if isdt:
ds.attrs.create(
name=TYPEID,
data=string_("datetime"))
except TypeError:
# Obviously the data was not serializable. To give it
# a last try; serialize it to yaml
# and save it to the hdf file:
ds = hdfobject.create_dataset(
name=key,
data=string_(yaml.safe_dump(value))
)
ds.attrs.create(
name=TYPEID,
data=string_("yaml"))
# if this fails again, restructure your data!
def dump(data, hdf, packer=pack_dataset, *args, **kwargs):
"""Adds keys of given dict as groups and values as datasets
to the given hdf-file (by string or object) or group object.
Parameters
----------
data : dict
The dictionary containing only string keys and
data values or dicts again.
hdf : string (path to file) or `h5py.File()` or `h5py.Group()`
packer : callable
Callable gets `hdfobject, key, value` as input.
`hdfobject` is considered to be either a h5py.File or a h5py.Group.
`key` is the name of the dataset.
`value` is the dataset to be packed and accepted by h5py.
Returns
-------
hdf : obj
`h5py.Group()` or `h5py.File()` instance
"""
def _recurse(datadict, hdfobject):
for key, value in datadict.items():
if isinstance(key, tuple):
key = '_'.join((str(i) for i in key))
if isinstance(value, (dict, LazyHdfDict)):
hdfgroup = hdfobject.create_group(key)
_recurse(value, hdfgroup)
else:
packer(hdfobject, key, value)
with hdf_file(hdf, *args, **kwargs) as hdf:
_recurse(data, hdf)
return hdf
|
[
"yaml.safe_dump",
"h5py.File",
"numpy.string_",
"datetime.datetime.fromtimestamp"
] |
[((419, 450), 'h5py.File', 'h5py.File', (['hdf', '*args'], {}), '(hdf, *args, **kwargs)\n', (428, 450), False, 'import h5py\n'), ((517, 548), 'h5py.File', 'h5py.File', (['hdf', '*args'], {}), '(hdf, *args, **kwargs)\n', (526, 548), False, 'import h5py\n'), ((1105, 1134), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['value'], {}), '(value)\n', (1127, 1134), False, 'from datetime import datetime\n'), ((998, 1024), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (1020, 1024), False, 'from datetime import datetime\n'), ((4432, 4451), 'numpy.string_', 'string_', (['"""datetime"""'], {}), "('datetime')\n", (4439, 4451), False, 'from numpy import string_\n'), ((4805, 4820), 'numpy.string_', 'string_', (['"""yaml"""'], {}), "('yaml')\n", (4812, 4820), False, 'from numpy import string_\n'), ((4705, 4726), 'yaml.safe_dump', 'yaml.safe_dump', (['value'], {}), '(value)\n', (4719, 4726), False, 'import yaml\n')]
|
def pooled_cohen_kappa(samples_a, samples_b, weight_type=None, questions=None):
"""
Compute the pooled Cohen's Kappa for the given samples.
From:
<NAME>., <NAME>., <NAME>., & <NAME>. (2008).
Using pooled kappa to summarize interrater agreement across many items.
Field methods, 20(3), 272-282.
With pooled kappa:
k_p = (average_accuracy - average_expected_random_agreement) / (1 - average_expected_random_agreement)
Where:
average_agreement = np.mean(colum_wise_agreements)
average_expected_random_agreement = np.mean(expected_random_agreements)
colum_wise_agreements = [agreement(samples_a[:,col], samples_b[:,col]) for col in range(n_cols)]
expected_random_agreements = [expected_random_agreement(samples_a[:,col], samples_b[:,col]) for col in range(n_cols)]
A weighted version of the pooled Cohen's Kappa is also available in which the contingency table is weighted using
either quadratic or linear weights. If weight_type is None, then the weight matrix is the identity matrix.
To compute the weighted Cohen's Kappa, the questions parameter must be provided.
:param samples_a: list of samples from the first rater
:param samples_b: list of samples from the second rater
:param weight_type: Union[None, "linear", "quadratic"] weights type to use for the agreement calculation
:param questions: List[Question] if weights is not None, this is the list of questions and their values
:return: pooled Cohen's Kappa
"""
n = len(samples_a)
ncols = len(samples_a[0])
if n == 0 or ncols == 0:
return 0
if n != len(samples_b) or ncols != len(samples_b[0]):
raise Exception("samples_a and samples_b must have the same length")
if weight_type is not None and (weight_type not in ["linear", "quadratic"] or questions is None):
raise Exception("weights must be None, 'linear' or 'quadratic'")
import numpy as np
# Convert to numpy arrays
samples_a = np.array(samples_a)
samples_b = np.array(samples_b)
def weight(i, j, c):
"""
Compute the weight for a pair of values.
"""
if weight_type == "linear":
return 1 - (abs(i - j) / (c - 1))
elif weight_type == "quadratic":
return 1 - (abs(i - j) / (c - 1)) ** 2
else:
return 1 if i == j else 0
def agreement(colum_a, colum_b, values=None):
"""
Compute the agreement between two columns.
"""
if weight_type is not None:
# Build the contingency table
c = len(values)
contingency_table = np.zeros((c, c))
for i, value_a in enumerate(values):
for j, value_b in enumerate(values):
contingency_table[i, j] = np.mean(
weight(i, j, c) * (colum_a == value_a) * (colum_b == value_b))
# Compute the agreement
return np.sum(contingency_table)
else:
return np.mean(colum_a == colum_b)
def expected_random_agreement(colum_a, colum_b, values=None):
"""
Compute the expected random agreement between two columns.
"""
if weight_type is not None:
# Build the contingency table
c = len(values)
contingency_table = np.zeros((c, c))
for i, value_a in enumerate(values):
for j, value_b in enumerate(values):
contingency_table[i, j] = np.sum((colum_a == value_a) * (colum_b == value_b))
# Compute row and column sums
row_sums = np.sum(contingency_table, axis=1)
col_sums = np.sum(contingency_table, axis=0)
# Build the expected contingency table if independent
expected_contingency_table = np.zeros((c, c))
for i in range(c):
for j in range(c):
expected_contingency_table[i, j] = weight(i,j,c) * (row_sums[i] * col_sums[j]) / n**2
# Compute the expected random agreement
return np.sum(expected_contingency_table)
else:
# For each potential value of the column, compute the marginal probability of each rater
unique_values = np.unique(np.concatenate((colum_a, colum_b)))
expected_independent_agreement = []
for value in unique_values:
marg_probabilities_a = np.mean(samples_a[:, col] == value)
marg_probabilities_b = np.mean(samples_b[:, col] == value)
expected_independent_agreement.append(marg_probabilities_a * marg_probabilities_b)
# Compute the expected random agreement
return np.sum(expected_independent_agreement)
# Compute accuracy (joint probability of agreement) and marginal probability of agreement on each column between samples_a and samples_b
accuracies = np.zeros(ncols)
marg_probabilities = np.zeros(ncols)
for col in range(ncols):
values = None if weight_type is None or questions is None else questions[col].values
accuracies[col] = agreement(samples_a[:, col], samples_b[:, col], values=values)
marg_probabilities[col] = expected_random_agreement(samples_a[:, col], samples_b[:, col], values=values)
# Compute pooled accuracy
average_accuracy = np.mean(accuracies)
# Compute pooled expected random agreement
average_expected_random_agreement = np.mean(marg_probabilities)
# Compute pooled Cohen's Kappa
pooled_cohen_kappa = (average_accuracy - average_expected_random_agreement) / (
1 - average_expected_random_agreement)
return pooled_cohen_kappa
|
[
"numpy.sum",
"numpy.zeros",
"numpy.mean",
"numpy.array",
"numpy.concatenate"
] |
[((2022, 2041), 'numpy.array', 'np.array', (['samples_a'], {}), '(samples_a)\n', (2030, 2041), True, 'import numpy as np\n'), ((2058, 2077), 'numpy.array', 'np.array', (['samples_b'], {}), '(samples_b)\n', (2066, 2077), True, 'import numpy as np\n'), ((4942, 4957), 'numpy.zeros', 'np.zeros', (['ncols'], {}), '(ncols)\n', (4950, 4957), True, 'import numpy as np\n'), ((4983, 4998), 'numpy.zeros', 'np.zeros', (['ncols'], {}), '(ncols)\n', (4991, 4998), True, 'import numpy as np\n'), ((5377, 5396), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (5384, 5396), True, 'import numpy as np\n'), ((5484, 5511), 'numpy.mean', 'np.mean', (['marg_probabilities'], {}), '(marg_probabilities)\n', (5491, 5511), True, 'import numpy as np\n'), ((2667, 2683), 'numpy.zeros', 'np.zeros', (['(c, c)'], {}), '((c, c))\n', (2675, 2683), True, 'import numpy as np\n'), ((2985, 3010), 'numpy.sum', 'np.sum', (['contingency_table'], {}), '(contingency_table)\n', (2991, 3010), True, 'import numpy as np\n'), ((3044, 3071), 'numpy.mean', 'np.mean', (['(colum_a == colum_b)'], {}), '(colum_a == colum_b)\n', (3051, 3071), True, 'import numpy as np\n'), ((3368, 3384), 'numpy.zeros', 'np.zeros', (['(c, c)'], {}), '((c, c))\n', (3376, 3384), True, 'import numpy as np\n'), ((3652, 3685), 'numpy.sum', 'np.sum', (['contingency_table'], {'axis': '(1)'}), '(contingency_table, axis=1)\n', (3658, 3685), True, 'import numpy as np\n'), ((3709, 3742), 'numpy.sum', 'np.sum', (['contingency_table'], {'axis': '(0)'}), '(contingency_table, axis=0)\n', (3715, 3742), True, 'import numpy as np\n'), ((3851, 3867), 'numpy.zeros', 'np.zeros', (['(c, c)'], {}), '((c, c))\n', (3859, 3867), True, 'import numpy as np\n'), ((4112, 4146), 'numpy.sum', 'np.sum', (['expected_contingency_table'], {}), '(expected_contingency_table)\n', (4118, 4146), True, 'import numpy as np\n'), ((4744, 4782), 'numpy.sum', 'np.sum', (['expected_independent_agreement'], {}), '(expected_independent_agreement)\n', (4750, 4782), True, 'import numpy as np\n'), ((4300, 4334), 'numpy.concatenate', 'np.concatenate', (['(colum_a, colum_b)'], {}), '((colum_a, colum_b))\n', (4314, 4334), True, 'import numpy as np\n'), ((4463, 4498), 'numpy.mean', 'np.mean', (['(samples_a[:, col] == value)'], {}), '(samples_a[:, col] == value)\n', (4470, 4498), True, 'import numpy as np\n'), ((4538, 4573), 'numpy.mean', 'np.mean', (['(samples_b[:, col] == value)'], {}), '(samples_b[:, col] == value)\n', (4545, 4573), True, 'import numpy as np\n'), ((3534, 3585), 'numpy.sum', 'np.sum', (['((colum_a == value_a) * (colum_b == value_b))'], {}), '((colum_a == value_a) * (colum_b == value_b))\n', (3540, 3585), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .occ_targets_template import OccTargetsTemplate
from ....utils import coords_utils, point_box_utils
class OccTargets3D(OccTargetsTemplate):
def __init__(
self,
model_cfg,
voxel_size,
point_cloud_range,
data_cfg,
grid_size,
num_class,
voxel_centers,
):
super().__init__(
model_cfg,
voxel_size,
point_cloud_range,
data_cfg,
grid_size,
num_class,
voxel_centers,
)
self.reg = model_cfg.PARAMS.get("REG", False) # default = True
def create_predict_area(
self, voxel_bnysynxsxnzsz, voxel_num_points_float, batch_size, batch_dict
):
return self.create_predict_area2d(
voxel_bnysynxsxnzsz, voxel_num_points_float, batch_size, batch_dict
)
def forward(self, batch_dict, **kwargs):
# voxels: [M, max_points, ndim] float tensor. only contain points.
# voxel_coords: [M, 3] int32 tensor. zyx format.
# voxel_num_points: [M] int32 tensor.
voxel_features, voxel_num_points, coords = (
batch_dict["voxels"],
batch_dict["voxel_num_points"],
batch_dict["voxel_coords"],
)
# print("voxel_features", voxel_features.shape)
voxel_count = voxel_features.shape[1]
# print("voxel_count", voxel_features.shape[0])
mask = self.get_paddings_indicator(voxel_num_points, voxel_count, axis=0)
batch_dict["voxel_point_mask"] = mask
batch_dict = (
self.create_voxel_res_label(batch_dict, mask)
if self.reg
else self.create_voxel_label(batch_dict, mask)
)
# if test inference speed
# if batch_dict["is_train"]:
# batch_dict = self.create_voxel_res_label(batch_dict, mask)
# else:
# batch_dict["point_dist_mask"] = torch.zeros((batch_dict["gt_boxes"].shape[0], self.ny, self.nx, self.nz * self.sz * self.sy * self.sx), device="cuda")
if "point_drop_inds" in batch_dict.keys():
inds = batch_dict["point_drop_inds"]
mask[inds[:, 0], inds[:, 1]] = torch.zeros_like(
inds[:, 0], dtype=torch.bool
)
batch_dict["final_point_mask"] = mask
return batch_dict
def create_voxel_res_label(self, batch_dict, valid_mask):
occ_pnts = torch.cat(
[
coords_utils.uvd2absxyz(
batch_dict["voxels"][..., 0],
batch_dict["voxels"][..., 1],
batch_dict["voxels"][..., 2],
self.data_cfg.OCC.COORD_TYPE,
),
batch_dict["voxels"][..., 3:],
],
dim=-1,
)
if self.point_coding == "absxyz" or self.point_coding == True:
batch_dict["voxels"] = occ_pnts
elif self.point_coding == "both":
batch_dict["voxels"] = torch.cat(
[occ_pnts[..., :3], batch_dict["voxels"]], dim=-1
)
voxel_features, voxel_coords, gt_boxes_num, gt_boxes, bs = (
occ_pnts,
batch_dict["voxel_coords"],
batch_dict["gt_boxes_num"],
batch_dict["gt_boxes"],
batch_dict["gt_boxes"].shape[0],
)
if self.num_class == 1:
gt_label = (gt_boxes[..., -1:] > 1e-2).to(torch.float32)
gt_boxes = torch.cat([gt_boxes[..., :-1], gt_label], dim=-1)
valid_coords_bnznynx, valid_voxel_features = self.get_valid(
valid_mask, voxel_coords, voxel_features
)
voxelwise_mask = self.get_voxelwise_mask(valid_coords_bnznynx, bs)
vcc_mask = self.create_predict_area3d(bs, valid_coords_bnznynx)
occ_voxelwise_mask = self.filter_occ(
self.occ_from_ocp(
vcc_mask,
batch_dict,
bs,
voxelwise_mask,
valid_voxel_features[..., :3],
valid_coords_bnznynx[..., 0],
empty_sur_thresh=self.data_cfg.OCC.EMPT_SUR_THRESH,
type=self.data_cfg.OCC.COORD_TYPE,
),
occ_pnts,
voxelwise_mask,
)
(
fore_voxelwise_mask,
fore_res_mtrx,
mirr_fore_voxelwise_mask,
mirr_res_mtrx,
) = self.get_fore_mirr_voxelwise_mask_res(
batch_dict,
bs,
valid_coords_bnznynx,
valid_voxel_features,
gt_boxes_num,
gt_boxes,
)
mirr_fore_voxelwise_mask = mirr_fore_voxelwise_mask * (
1 - voxelwise_mask
) # exclude original occupied
mirr_res_mtrx = mirr_res_mtrx * (1 - voxelwise_mask).unsqueeze(1)
if self.model_cfg.TARGETS.TMPLT: # default = True
bm_voxelwise_mask, bm_res_mtrx = self.get_bm_voxelwise_mask_res(
batch_dict, bs, gt_boxes_num, gt_boxes
)
bm_voxelwise_mask = (
bm_voxelwise_mask
* (1 - voxelwise_mask)
* (1 - mirr_fore_voxelwise_mask)
)
bm_res_mtrx = (
bm_res_mtrx
* (1 - voxelwise_mask).unsqueeze(1)
* (1 - mirr_fore_voxelwise_mask).unsqueeze(1)
)
else:
bm_voxelwise_mask = torch.zeros_like(
voxelwise_mask, dtype=voxelwise_mask.dtype, device=voxelwise_mask.device
)
##### forebox_label #####
forebox_label = None
if self.data_cfg.OCC.BOX_WEIGHT != 1.0:
bs, max_num_box, box_c = list(gt_boxes.shape)
forebox_label = torch.zeros(
[bs, self.nz, self.ny, self.nx], dtype=torch.int8, device="cuda"
)
shift = torch.tensor(
np.asarray([[0.0, 0.0, 0.0]]), device="cuda", dtype=torch.float32
)
for i in range(bs):
cur_gt_boxes = gt_boxes[i, : gt_boxes_num[i]]
all_voxel_centers_2d = (
point_box_utils.rotatez(
self.all_voxel_centers_2d, batch_dict["rot_z"][i]
)
if "rot_z" in batch_dict
else self.all_voxel_centers_2d
)
voxel_box_label2d = (
point_box_utils.torch_points_in_box_2d_mask(
all_voxel_centers_2d, cur_gt_boxes, shift=shift[..., :2]
)
.view(self.ny, self.nx)
.nonzero()
)
if voxel_box_label2d.shape[0] > 0:
all_voxel_centers_filtered = self.all_voxel_centers[
:, voxel_box_label2d[:, 0], voxel_box_label2d[:, 1], ...
].reshape(-1, 3)
if "rot_z" in batch_dict:
all_voxel_centers_filtered = point_box_utils.rotatez(
all_voxel_centers_filtered, batch_dict["rot_z"][i]
)
voxel_box_label = point_box_utils.torch_points_in_box_3d_label(
all_voxel_centers_filtered,
cur_gt_boxes,
gt_boxes_num[i],
shift=shift,
)[0]
forebox_label[
i, :, voxel_box_label2d[:, 0], voxel_box_label2d[:, 1]
] = voxel_box_label.view(self.nz, -1)
if self.data_cfg.OCC.DROPOUT_RATE > 1e-3 and batch_dict["is_train"]:
batch_dict = self.dropout(batch_dict, fore_voxelwise_mask)
batch_dict = self.prepare_cls_loss_map(
batch_dict,
vcc_mask,
voxelwise_mask,
occ_voxelwise_mask,
fore_voxelwise_mask,
mirr_fore_voxelwise_mask,
bm_voxelwise_mask,
forebox_label=forebox_label,
)
batch_dict = self.prepare_reg_loss_map(
batch_dict, fore_res_mtrx, mirr_res_mtrx, bm_res_mtrx
)
return batch_dict
def get_bm_voxelwise_mask_res(self, batch_dict, bs, gt_boxes_num, gt_boxes):
bm_voxelwise_mask = torch.zeros(
[bs, self.nz, self.ny, self.nx], dtype=torch.uint8, device="cuda"
)
# bm_points added during augmentation, see BtcDet/btcdet/datasets/augmentor/multi_best_match_querier.py
if "bm_points" in batch_dict and len(batch_dict["bm_points"]) > 0:
bm_binds, bm_carte_points = (
batch_dict["bm_points"][..., 0:1].to(torch.int64),
batch_dict["bm_points"][..., 1:],
)
label_array = torch.nonzero(
point_box_utils.torch_points_in_box_3d_label_batch(
bm_carte_points, bm_binds, gt_boxes, gt_boxes_num, bs
)
)[..., 0]
bm_binds = bm_binds[..., 0][label_array]
bm_carte_points = bm_carte_points[label_array, :]
occ_coords_bm_points = coords_utils.cartesian_occ_coords(
bm_carte_points, type=self.data_cfg.OCC.COORD_TYPE
)
if "rot_z" in batch_dict:
rot_z = batch_dict["rot_z"][bm_binds]
if self.data_cfg.OCC.COORD_TYPE == "cartesian":
noise_rotation = -rot_z * np.pi / 180
occ_coords_bm_points = common_utils.rotate_points_along_z(
occ_coords_bm_points.unsqueeze(1), noise_rotation
).squeeze(1)
else:
occ_coords_bm_points[..., 1] += rot_z
inrange_coords_bm, inrange_inds_bm = self.point2coords_inrange(
occ_coords_bm_points,
self.point_origin_tensor,
self.point_max_tensor,
self.max_grid_tensor,
self.min_grid_tensor,
self.voxel_size,
)
bm_coords = torch.cat(
[
bm_binds[inrange_inds_bm].unsqueeze(-1),
self.xyz2zyx(inrange_coords_bm),
],
dim=-1,
)
bm_res_mtrx = self.get_mean_res(
bm_carte_points[inrange_inds_bm],
bm_coords,
bs,
self.nz,
self.ny,
self.nx,
batch_dict,
rot=True,
)
bm_voxelwise_mask[
bm_coords[..., 0],
bm_coords[..., 1],
bm_coords[..., 2],
bm_coords[..., 3],
] = torch.ones_like(
bm_coords[..., 0], dtype=torch.uint8, device=bm_voxelwise_mask.device
) ##
else:
bm_res_mtrx = torch.zeros(
[bs, 3, self.nz, self.ny, self.nx], dtype=torch.float32, device="cuda"
)
return bm_voxelwise_mask, bm_res_mtrx
def get_mean_res(self, feat, coords, bs, nz, ny, nx, batch_dict, rot=False):
xyz_spatial = torch.zeros(
[bs, 3, nz, ny, nx], dtype=torch.float32, device="cuda"
)
if len(coords) > 0:
uni_coords, inverse_indices, labels_count = torch.unique(
coords, return_inverse=True, return_counts=True, dim=0
)
mean_xyz = (
torch.zeros(
[uni_coords.shape[0], 3], dtype=feat.dtype, device=feat.device
).scatter_add_(
0,
inverse_indices.view(inverse_indices.size(0), 1).expand(-1, 3),
feat[..., :3],
)
/ labels_count.float().unsqueeze(1)
)
# mean_xyz = torch_scatter.scatter_mean(feat[..., :3], inverse_indices, dim=0)
mean_xyz -= self.get_voxel_center_xyz(uni_coords, batch_dict, rot=rot)
xyz_spatial[
uni_coords[..., 0],
:,
uni_coords[..., 1],
uni_coords[..., 2],
uni_coords[..., 3],
] = mean_xyz
return xyz_spatial
def get_voxel_center_xyz(self, coords, batch_dict, rot=True):
voxel_centers = (
coords[:, [3, 2, 1]].float() + 0.5
) * self.voxel_size + self.point_origin_tensor
if self.data_cfg.OCC.COORD_TYPE == "cartesian":
if "rot_z" in batch_dict and rot:
rot_z = batch_dict["rot_z"][coords[:, 0]]
noise_rotation = rot_z * np.pi / 180
voxel_centers = common_utils.rotate_points_along_z(
voxel_centers.unsqueeze(1), noise_rotation
).squeeze(1)
else:
if "rot_z" in batch_dict and rot:
rot_z = batch_dict["rot_z"][coords[:, 0]]
voxel_centers[..., 1] -= rot_z
voxel_centers = coords_utils.uvd2absxyz(
voxel_centers[..., 0],
voxel_centers[..., 1],
voxel_centers[..., 2],
self.data_cfg.OCC.COORD_TYPE,
)
return voxel_centers
def get_fore_mirr_voxelwise_mask_res(
self,
batch_dict,
bs,
valid_coords_bnznynx,
valid_voxel_features,
gt_boxes_num,
gt_boxes,
):
fore_voxelwise_mask, mirr_fore_voxelwise_mask = [
torch.zeros(
[bs, self.nz, self.ny, self.nx], dtype=torch.uint8, device="cuda"
)
for i in range(2)
]
(
fore_inds,
mirr_inbox_point,
mirr_binds,
) = point_box_utils.torch_points_and_sym_in_box_3d_batch(
valid_voxel_features[..., :3],
valid_coords_bnznynx,
gt_boxes,
gt_boxes_num,
bs,
batch_dict["box_mirr_flag"],
)
fore_coords = valid_coords_bnznynx[fore_inds] # b zyx
fore_voxelwise_mask[
fore_coords[..., 0],
fore_coords[..., 1],
fore_coords[..., 2],
fore_coords[..., 3],
] = torch.ones_like(
fore_coords[..., 0], dtype=torch.uint8, device=fore_voxelwise_mask.device
)
fore_res_mtrx = self.get_mean_res(
valid_voxel_features[fore_inds],
fore_coords,
bs,
self.nz,
self.ny,
self.nx,
batch_dict,
rot=True,
)
mirr_res_mtrx = torch.zeros(
[bs, 3, self.nz, self.ny, self.nx],
device=fore_voxelwise_mask.device,
dtype=torch.float32,
)
if mirr_inbox_point is not None:
occ_coords_mirr_points = coords_utils.cartesian_occ_coords(
mirr_inbox_point, type=self.data_cfg.OCC.COORD_TYPE
) # sphere x y z
if "rot_z" in batch_dict:
rot_z = batch_dict["rot_z"][mirr_binds]
if self.data_cfg.OCC.COORD_TYPE == "cartesian":
noise_rotation = -rot_z * np.pi / 180
occ_coords_mirr_points = common_utils.rotate_points_along_z(
occ_coords_mirr_points.unsqueeze(1), noise_rotation
).squeeze(1)
else:
occ_coords_mirr_points[..., 1] += rot_z
inrange_coords_mirr, inrange_inds_mirr = self.point2coords_inrange(
occ_coords_mirr_points,
self.point_origin_tensor,
self.point_max_tensor,
self.max_grid_tensor,
self.min_grid_tensor,
self.voxel_size,
)
mirr_coords = torch.cat(
[
mirr_binds[inrange_inds_mirr].unsqueeze(-1),
self.xyz2zyx(inrange_coords_mirr),
],
dim=-1,
) # mirror sphere b z y x
mirr_res_mtrx = self.get_mean_res(
mirr_inbox_point[inrange_inds_mirr],
mirr_coords,
bs,
self.nz,
self.ny,
self.nx,
batch_dict,
rot=True,
)
mirr_fore_voxelwise_mask[
mirr_coords[..., 0],
mirr_coords[..., 1],
mirr_coords[..., 2],
mirr_coords[..., 3],
] = torch.ones_like(
mirr_coords[..., 0],
dtype=torch.uint8,
device=mirr_fore_voxelwise_mask.device,
)
return (
fore_voxelwise_mask,
fore_res_mtrx,
mirr_fore_voxelwise_mask,
mirr_res_mtrx,
)
|
[
"torch.ones_like",
"torch.unique",
"torch.zeros_like",
"numpy.asarray",
"torch.cat",
"torch.zeros"
] |
[((8412, 8490), 'torch.zeros', 'torch.zeros', (['[bs, self.nz, self.ny, self.nx]'], {'dtype': 'torch.uint8', 'device': '"""cuda"""'}), "([bs, self.nz, self.ny, self.nx], dtype=torch.uint8, device='cuda')\n", (8423, 8490), False, 'import torch\n'), ((11282, 11350), 'torch.zeros', 'torch.zeros', (['[bs, 3, nz, ny, nx]'], {'dtype': 'torch.float32', 'device': '"""cuda"""'}), "([bs, 3, nz, ny, nx], dtype=torch.float32, device='cuda')\n", (11293, 11350), False, 'import torch\n'), ((14353, 14448), 'torch.ones_like', 'torch.ones_like', (['fore_coords[..., 0]'], {'dtype': 'torch.uint8', 'device': 'fore_voxelwise_mask.device'}), '(fore_coords[..., 0], dtype=torch.uint8, device=\n fore_voxelwise_mask.device)\n', (14368, 14448), False, 'import torch\n'), ((14738, 14846), 'torch.zeros', 'torch.zeros', (['[bs, 3, self.nz, self.ny, self.nx]'], {'device': 'fore_voxelwise_mask.device', 'dtype': 'torch.float32'}), '([bs, 3, self.nz, self.ny, self.nx], device=fore_voxelwise_mask.\n device, dtype=torch.float32)\n', (14749, 14846), False, 'import torch\n'), ((2286, 2332), 'torch.zeros_like', 'torch.zeros_like', (['inds[:, 0]'], {'dtype': 'torch.bool'}), '(inds[:, 0], dtype=torch.bool)\n', (2302, 2332), False, 'import torch\n'), ((3564, 3613), 'torch.cat', 'torch.cat', (['[gt_boxes[..., :-1], gt_label]'], {'dim': '(-1)'}), '([gt_boxes[..., :-1], gt_label], dim=-1)\n', (3573, 3613), False, 'import torch\n'), ((5529, 5624), 'torch.zeros_like', 'torch.zeros_like', (['voxelwise_mask'], {'dtype': 'voxelwise_mask.dtype', 'device': 'voxelwise_mask.device'}), '(voxelwise_mask, dtype=voxelwise_mask.dtype, device=\n voxelwise_mask.device)\n', (5545, 5624), False, 'import torch\n'), ((5847, 5924), 'torch.zeros', 'torch.zeros', (['[bs, self.nz, self.ny, self.nx]'], {'dtype': 'torch.int8', 'device': '"""cuda"""'}), "([bs, self.nz, self.ny, self.nx], dtype=torch.int8, device='cuda')\n", (5858, 5924), False, 'import torch\n'), ((10856, 10947), 'torch.ones_like', 'torch.ones_like', (['bm_coords[..., 0]'], {'dtype': 'torch.uint8', 'device': 'bm_voxelwise_mask.device'}), '(bm_coords[..., 0], dtype=torch.uint8, device=\n bm_voxelwise_mask.device)\n', (10871, 10947), False, 'import torch\n'), ((11017, 11105), 'torch.zeros', 'torch.zeros', (['[bs, 3, self.nz, self.ny, self.nx]'], {'dtype': 'torch.float32', 'device': '"""cuda"""'}), "([bs, 3, self.nz, self.ny, self.nx], dtype=torch.float32, device\n ='cuda')\n", (11028, 11105), False, 'import torch\n'), ((11457, 11525), 'torch.unique', 'torch.unique', (['coords'], {'return_inverse': '(True)', 'return_counts': '(True)', 'dim': '(0)'}), '(coords, return_inverse=True, return_counts=True, dim=0)\n', (11469, 11525), False, 'import torch\n'), ((13623, 13701), 'torch.zeros', 'torch.zeros', (['[bs, self.nz, self.ny, self.nx]'], {'dtype': 'torch.uint8', 'device': '"""cuda"""'}), "([bs, self.nz, self.ny, self.nx], dtype=torch.uint8, device='cuda')\n", (13634, 13701), False, 'import torch\n'), ((16665, 16765), 'torch.ones_like', 'torch.ones_like', (['mirr_coords[..., 0]'], {'dtype': 'torch.uint8', 'device': 'mirr_fore_voxelwise_mask.device'}), '(mirr_coords[..., 0], dtype=torch.uint8, device=\n mirr_fore_voxelwise_mask.device)\n', (16680, 16765), False, 'import torch\n'), ((3087, 3147), 'torch.cat', 'torch.cat', (["[occ_pnts[..., :3], batch_dict['voxels']]"], {'dim': '(-1)'}), "([occ_pnts[..., :3], batch_dict['voxels']], dim=-1)\n", (3096, 3147), False, 'import torch\n'), ((6005, 6034), 'numpy.asarray', 'np.asarray', (['[[0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0]])\n', (6015, 6034), True, 'import numpy as np\n'), ((11597, 11672), 'torch.zeros', 'torch.zeros', (['[uni_coords.shape[0], 3]'], {'dtype': 'feat.dtype', 'device': 'feat.device'}), '([uni_coords.shape[0], 3], dtype=feat.dtype, device=feat.device)\n', (11608, 11672), False, 'import torch\n')]
|
import csv
from collections import defaultdict
import numpy as np
from PySAM.ResourceTools import SAM_CSV_to_solar_data
from hybrid.keys import get_developer_nrel_gov_key
from hybrid.log import hybrid_logger as logger
from hybrid.resource.resource import *
class SolarResource(Resource):
"""
Class to manage Solar Resource data
"""
def __init__(self, lat, lon, year, path_resource="", filepath="", **kwargs):
"""
:param lat: float
:param lon: float
:param year: int
:param path_resource: directory where to save downloaded files
:param filepath: file path of resource file to load
:param kwargs:
"""
super().__init__(lat, lon, year)
if os.path.isdir(path_resource):
self.path_resource = path_resource
self.solar_attributes = 'ghi,dhi,dni,wind_speed,air_temperature,solar_zenith_angle'
self.path_resource = os.path.join(self.path_resource, 'solar')
# Force override any internal definitions if passed in
self.__dict__.update(kwargs)
# resource_files files
if filepath == "":
filepath = os.path.join(self.path_resource,
str(lat) + "_" + str(lon) + "_psmv3_" + str(self.interval) + "_" + str(
year) + ".csv")
self.filename = filepath
self.check_download_dir()
if not os.path.isfile(self.filename):
self.download_resource()
self.format_data()
logger.info("SolarResource: {}".format(self.filename))
def download_resource(self):
url = 'https://developer.nrel.gov/api/nsrdb/v2/solar/psm3-download.csv?wkt=POINT({lon}+{lat})&names={year}&leap_day={leap}&interval={interval}&utc={utc}&full_name={name}&email={email}&affiliation={affiliation}&mailing_list={mailing_list}&reason={reason}&api_key={api}&attributes={attr}'.format(
year=self.year, lat=self.latitude, lon=self.longitude, leap=self.leap_year, interval=self.interval,
utc=self.utc, name=self.name, email=self.email,
mailing_list=self.mailing_list, affiliation=self.affiliation, reason=self.reason, api=get_developer_nrel_gov_key(),
attr=self.solar_attributes)
success = self.call_api(url, filename=self.filename)
return success
def format_data(self):
"""
Format as 'solar_resource_data' dictionary for use in PySAM.
"""
if not os.path.isfile(self.filename):
raise FileNotFoundError(self.filename + " does not exist. Try `download_resource` first.")
self.data = self.filename
@Resource.data.setter
def data(self, data_dict):
"""
Sets the solar resource data
For hourly resource, year, month, day, hour, and minute will be auto-filled if not provided.
:key tz: time zone, not UTC
:key elev: elevation in meters
:key year: array
:key month: array
:key day: array
:key hour: array
:key minute: array
:key dn: array, direct normal irradiance
:key df: array, direct horizontal irradiance
:key wspd: array, wind speed [m/s]
:key tdry: array, dry bulb temp [C]
"""
self._data = SAM_CSV_to_solar_data(data_dict)
def roll_timezone(self, roll_hours, timezone):
"""
:param roll_hours:
:param timezone:
:return:
"""
rollable_keys = ['dn', 'df', 'gh', 'wspd', 'tdry']
for key in rollable_keys:
if any(k == key for k in rollable_keys):
roll_range = range(0, -roll_hours + 1)
weather_array = np.array(self._data[key])
weather_array_rolled = np.delete(weather_array, roll_range)
weather_array_rolled = np.pad(weather_array_rolled, (0, -roll_hours + 1), 'constant')
self._data[key] = weather_array_rolled.tolist()
self._data['tz'] = timezone
logger.info('Rolled solar data by {} hours for timezone {}'.format(roll_hours, timezone))
|
[
"numpy.pad",
"hybrid.keys.get_developer_nrel_gov_key",
"numpy.array",
"PySAM.ResourceTools.SAM_CSV_to_solar_data",
"numpy.delete"
] |
[((3314, 3346), 'PySAM.ResourceTools.SAM_CSV_to_solar_data', 'SAM_CSV_to_solar_data', (['data_dict'], {}), '(data_dict)\n', (3335, 3346), False, 'from PySAM.ResourceTools import SAM_CSV_to_solar_data\n'), ((2219, 2247), 'hybrid.keys.get_developer_nrel_gov_key', 'get_developer_nrel_gov_key', ([], {}), '()\n', (2245, 2247), False, 'from hybrid.keys import get_developer_nrel_gov_key\n'), ((3727, 3752), 'numpy.array', 'np.array', (['self._data[key]'], {}), '(self._data[key])\n', (3735, 3752), True, 'import numpy as np\n'), ((3793, 3829), 'numpy.delete', 'np.delete', (['weather_array', 'roll_range'], {}), '(weather_array, roll_range)\n', (3802, 3829), True, 'import numpy as np\n'), ((3869, 3931), 'numpy.pad', 'np.pad', (['weather_array_rolled', '(0, -roll_hours + 1)', '"""constant"""'], {}), "(weather_array_rolled, (0, -roll_hours + 1), 'constant')\n", (3875, 3931), True, 'import numpy as np\n')]
|
import os
from os import path
import numpy as np
import pytest
from astropy import cosmology as cosmo
import autofit as af
import autolens as al
from autolens.fit.fit import InterferometerFit
from test_autolens.mock import mock_pipeline
pytestmark = pytest.mark.filterwarnings(
"ignore:Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of "
"`arr[seq]`. In the future this will be interpreted as an arrays index, `arr[np.arrays(seq)]`, which will result "
"either in an error or a different result."
)
directory = path.dirname(path.realpath(__file__))
@pytest.fixture(scope="session", autouse=True)
def do_something():
af.conf.instance = af.conf.Config(
"{}/../test_files/config/phase_interferometer_7".format(directory)
)
def clean_images():
try:
os.remove("{}/source_lens_phase/source_image_0.fits".format(directory))
os.remove("{}/source_lens_phase/lens_image_0.fits".format(directory))
os.remove("{}/source_lens_phase/model_image_0.fits".format(directory))
except FileNotFoundError:
pass
af.conf.instance.dataset_path = directory
class TestPhase:
def test__make_analysis__masks_visibilities_and_noise_map_correctly(
self, phase_interferometer_7, interferometer_7, visibilities_mask_7x2
):
analysis = phase_interferometer_7.make_analysis(
dataset=interferometer_7, mask=visibilities_mask_7x2
)
assert (
analysis.masked_interferometer.visibilities == interferometer_7.visibilities
).all()
assert (
analysis.masked_interferometer.noise_map == interferometer_7.noise_map
).all()
def test__make_analysis__phase_info_is_made(
self, phase_interferometer_7, interferometer_7, visibilities_mask_7x2
):
phase_interferometer_7.make_analysis(
dataset=interferometer_7, mask=visibilities_mask_7x2
)
file_phase_info = "{}/{}".format(
phase_interferometer_7.optimizer.paths.phase_output_path, "phase.info"
)
phase_info = open(file_phase_info, "r")
optimizer = phase_info.readline()
sub_size = phase_info.readline()
primary_beam_shape_2d = phase_info.readline()
positions_threshold = phase_info.readline()
cosmology = phase_info.readline()
phase_info.close()
assert optimizer == "Optimizer = MockNLO \n"
assert sub_size == "Sub-grid size = 2 \n"
assert primary_beam_shape_2d == "Primary Beam shape = None \n"
assert positions_threshold == "Positions Threshold = None \n"
assert (
cosmology
== 'Cosmology = FlatLambdaCDM(name="Planck15", H0=67.7 km / (Mpc s), Om0=0.307, Tcmb0=2.725 K, '
"Neff=3.05, m_nu=[0. 0. 0.06] eV, Ob0=0.0486) \n"
)
def test__fit_using_interferometer(
self, interferometer_7, mask_7x7, visibilities_mask_7x2
):
phase_interferometer_7 = al.PhaseInterferometer(
optimizer_class=mock_pipeline.MockNLO,
galaxies=dict(
lens=al.GalaxyModel(redshift=0.5, light=al.lp.EllipticalSersic),
source=al.GalaxyModel(redshift=1.0, light=al.lp.EllipticalSersic),
),
real_space_mask=mask_7x7,
phase_name="test_phase_test_fit",
)
result = phase_interferometer_7.run(
dataset=interferometer_7, mask=visibilities_mask_7x2
)
assert isinstance(result.instance.galaxies[0], al.Galaxy)
assert isinstance(result.instance.galaxies[0], al.Galaxy)
def test_modify_visibilities(
self, interferometer_7, mask_7x7, visibilities_mask_7x2
):
class MyPhase(al.PhaseInterferometer):
def modify_visibilities(self, visibilities, results):
assert interferometer_7.visibilities.shape_1d == visibilities.shape_1d
visibilities = al.visibilities.full(fill_value=20.0, shape_1d=(7,))
return visibilities
phase_interferometer_7 = MyPhase(
phase_name="phase_interferometer_7", real_space_mask=mask_7x7
)
analysis = phase_interferometer_7.make_analysis(
dataset=interferometer_7, mask=visibilities_mask_7x2
)
assert (
analysis.masked_dataset.visibilities == 20.0 * np.ones(shape=(7, 2))
).all()
def test__phase_can_receive_hyper_image_and_noise_maps(self, mask_7x7):
phase_interferometer_7 = al.PhaseInterferometer(
galaxies=dict(
lens=al.GalaxyModel(redshift=al.Redshift),
lens1=al.GalaxyModel(redshift=al.Redshift),
),
real_space_mask=mask_7x7,
hyper_background_noise=al.hyper_data.HyperBackgroundNoise,
optimizer_class=af.MultiNest,
phase_name="test_phase",
)
instance = phase_interferometer_7.model.instance_from_physical_vector(
[0.1, 0.2, 0.3]
)
assert instance.galaxies[0].redshift == 0.1
assert instance.galaxies[1].redshift == 0.2
assert instance.hyper_background_noise.noise_scale == 0.3
def test__extended_with_hyper_and_pixelizations(self, phase_interferometer_7):
phase_extended = phase_interferometer_7.extend_with_multiple_hyper_phases(
hyper_galaxy=False, inversion=False
)
assert phase_extended == phase_interferometer_7
phase_extended = phase_interferometer_7.extend_with_multiple_hyper_phases(
inversion=True
)
assert type(phase_extended.hyper_phases[0]) == al.InversionPhase
phase_extended = phase_interferometer_7.extend_with_multiple_hyper_phases(
hyper_galaxy=True, inversion=False
)
assert type(phase_extended.hyper_phases[0]) == al.HyperGalaxyPhase
phase_extended = phase_interferometer_7.extend_with_multiple_hyper_phases(
hyper_galaxy=False, inversion=True
)
assert type(phase_extended.hyper_phases[0]) == al.InversionPhase
phase_extended = phase_interferometer_7.extend_with_multiple_hyper_phases(
hyper_galaxy=True, inversion=True
)
assert type(phase_extended.hyper_phases[0]) == al.HyperGalaxyPhase
assert type(phase_extended.hyper_phases[1]) == al.InversionPhase
def test__fit_figure_of_merit__matches_correct_fit_given_galaxy_profiles(
self, interferometer_7, mask_7x7, visibilities_mask_7x2
):
lens_galaxy = al.Galaxy(
redshift=0.5, light=al.lp.EllipticalSersic(intensity=0.1)
)
phase_interferometer_7 = al.PhaseInterferometer(
real_space_mask=mask_7x7,
galaxies=[lens_galaxy],
cosmology=cosmo.FLRW,
sub_size=2,
phase_name="test_phase",
)
analysis = phase_interferometer_7.make_analysis(
dataset=interferometer_7, mask=visibilities_mask_7x2
)
instance = phase_interferometer_7.model.instance_from_unit_vector([])
fit_figure_of_merit = analysis.fit(instance=instance)
real_space_mask = phase_interferometer_7.meta_interferometer_fit.mask_with_phase_sub_size_from_mask(
mask=mask_7x7
)
masked_interferometer = al.masked.interferometer(
interferometer=interferometer_7,
visibilities_mask=visibilities_mask_7x2,
real_space_mask=real_space_mask,
)
tracer = analysis.tracer_for_instance(instance=instance)
fit = al.fit(masked_dataset=masked_interferometer, tracer=tracer)
assert fit.likelihood == fit_figure_of_merit
def test__fit_figure_of_merit__includes_hyper_image_and_noise__matches_fit(
self, interferometer_7, mask_7x7, visibilities_mask_7x2
):
hyper_background_noise = al.hyper_data.HyperBackgroundNoise(noise_scale=1.0)
lens_galaxy = al.Galaxy(
redshift=0.5, light=al.lp.EllipticalSersic(intensity=0.1)
)
phase_interferometer_7 = al.PhaseInterferometer(
real_space_mask=mask_7x7,
galaxies=[lens_galaxy],
hyper_background_noise=hyper_background_noise,
cosmology=cosmo.FLRW,
sub_size=4,
phase_name="test_phase",
)
analysis = phase_interferometer_7.make_analysis(
dataset=interferometer_7, mask=visibilities_mask_7x2
)
instance = phase_interferometer_7.model.instance_from_unit_vector([])
fit_figure_of_merit = analysis.fit(instance=instance)
real_space_mask = phase_interferometer_7.meta_interferometer_fit.mask_with_phase_sub_size_from_mask(
mask=mask_7x7
)
assert real_space_mask.sub_size == 4
masked_interferometer = al.masked.interferometer(
interferometer=interferometer_7,
visibilities_mask=visibilities_mask_7x2,
real_space_mask=real_space_mask,
)
tracer = analysis.tracer_for_instance(instance=instance)
fit = InterferometerFit(
masked_interferometer=masked_interferometer,
tracer=tracer,
hyper_background_noise=hyper_background_noise,
)
assert fit.likelihood == fit_figure_of_merit
|
[
"autolens.masked.interferometer",
"autolens.PhaseInterferometer",
"os.path.realpath",
"pytest.fixture",
"numpy.ones",
"autolens.visibilities.full",
"autolens.GalaxyModel",
"autolens.fit",
"autolens.hyper_data.HyperBackgroundNoise",
"autolens.fit.fit.InterferometerFit",
"pytest.mark.filterwarnings",
"autolens.lp.EllipticalSersic"
] |
[((253, 558), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an arrays index, `arr[np.arrays(seq)]`, which will result either in an error or a different result."""'], {}), "(\n 'ignore:Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an arrays index, `arr[np.arrays(seq)]`, which will result either in an error or a different result.'\n )\n", (279, 558), False, 'import pytest\n'), ((623, 668), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""', 'autouse': '(True)'}), "(scope='session', autouse=True)\n", (637, 668), False, 'import pytest\n'), ((595, 618), 'os.path.realpath', 'path.realpath', (['__file__'], {}), '(__file__)\n', (608, 618), False, 'from os import path\n'), ((6730, 6865), 'autolens.PhaseInterferometer', 'al.PhaseInterferometer', ([], {'real_space_mask': 'mask_7x7', 'galaxies': '[lens_galaxy]', 'cosmology': 'cosmo.FLRW', 'sub_size': '(2)', 'phase_name': '"""test_phase"""'}), "(real_space_mask=mask_7x7, galaxies=[lens_galaxy],\n cosmology=cosmo.FLRW, sub_size=2, phase_name='test_phase')\n", (6752, 6865), True, 'import autolens as al\n'), ((7384, 7520), 'autolens.masked.interferometer', 'al.masked.interferometer', ([], {'interferometer': 'interferometer_7', 'visibilities_mask': 'visibilities_mask_7x2', 'real_space_mask': 'real_space_mask'}), '(interferometer=interferometer_7, visibilities_mask\n =visibilities_mask_7x2, real_space_mask=real_space_mask)\n', (7408, 7520), True, 'import autolens as al\n'), ((7643, 7702), 'autolens.fit', 'al.fit', ([], {'masked_dataset': 'masked_interferometer', 'tracer': 'tracer'}), '(masked_dataset=masked_interferometer, tracer=tracer)\n', (7649, 7702), True, 'import autolens as al\n'), ((7942, 7993), 'autolens.hyper_data.HyperBackgroundNoise', 'al.hyper_data.HyperBackgroundNoise', ([], {'noise_scale': '(1.0)'}), '(noise_scale=1.0)\n', (7976, 7993), True, 'import autolens as al\n'), ((8142, 8328), 'autolens.PhaseInterferometer', 'al.PhaseInterferometer', ([], {'real_space_mask': 'mask_7x7', 'galaxies': '[lens_galaxy]', 'hyper_background_noise': 'hyper_background_noise', 'cosmology': 'cosmo.FLRW', 'sub_size': '(4)', 'phase_name': '"""test_phase"""'}), "(real_space_mask=mask_7x7, galaxies=[lens_galaxy],\n hyper_background_noise=hyper_background_noise, cosmology=cosmo.FLRW,\n sub_size=4, phase_name='test_phase')\n", (8164, 8328), True, 'import autolens as al\n'), ((8901, 9037), 'autolens.masked.interferometer', 'al.masked.interferometer', ([], {'interferometer': 'interferometer_7', 'visibilities_mask': 'visibilities_mask_7x2', 'real_space_mask': 'real_space_mask'}), '(interferometer=interferometer_7, visibilities_mask\n =visibilities_mask_7x2, real_space_mask=real_space_mask)\n', (8925, 9037), True, 'import autolens as al\n'), ((9159, 9288), 'autolens.fit.fit.InterferometerFit', 'InterferometerFit', ([], {'masked_interferometer': 'masked_interferometer', 'tracer': 'tracer', 'hyper_background_noise': 'hyper_background_noise'}), '(masked_interferometer=masked_interferometer, tracer=\n tracer, hyper_background_noise=hyper_background_noise)\n', (9176, 9288), False, 'from autolens.fit.fit import InterferometerFit\n'), ((3995, 4047), 'autolens.visibilities.full', 'al.visibilities.full', ([], {'fill_value': '(20.0)', 'shape_1d': '(7,)'}), '(fill_value=20.0, shape_1d=(7,))\n', (4015, 4047), True, 'import autolens as al\n'), ((6648, 6685), 'autolens.lp.EllipticalSersic', 'al.lp.EllipticalSersic', ([], {'intensity': '(0.1)'}), '(intensity=0.1)\n', (6670, 6685), True, 'import autolens as al\n'), ((8060, 8097), 'autolens.lp.EllipticalSersic', 'al.lp.EllipticalSersic', ([], {'intensity': '(0.1)'}), '(intensity=0.1)\n', (8082, 8097), True, 'import autolens as al\n'), ((3153, 3211), 'autolens.GalaxyModel', 'al.GalaxyModel', ([], {'redshift': '(0.5)', 'light': 'al.lp.EllipticalSersic'}), '(redshift=0.5, light=al.lp.EllipticalSersic)\n', (3167, 3211), True, 'import autolens as al\n'), ((3236, 3294), 'autolens.GalaxyModel', 'al.GalaxyModel', ([], {'redshift': '(1.0)', 'light': 'al.lp.EllipticalSersic'}), '(redshift=1.0, light=al.lp.EllipticalSersic)\n', (3250, 3294), True, 'import autolens as al\n'), ((4420, 4441), 'numpy.ones', 'np.ones', ([], {'shape': '(7, 2)'}), '(shape=(7, 2))\n', (4427, 4441), True, 'import numpy as np\n'), ((4640, 4676), 'autolens.GalaxyModel', 'al.GalaxyModel', ([], {'redshift': 'al.Redshift'}), '(redshift=al.Redshift)\n', (4654, 4676), True, 'import autolens as al\n'), ((4700, 4736), 'autolens.GalaxyModel', 'al.GalaxyModel', ([], {'redshift': 'al.Redshift'}), '(redshift=al.Redshift)\n', (4714, 4736), True, 'import autolens as al\n')]
|
# Copyright (c) Microsoft Corporation and Fairlearn contributors.
# Licensed under the MIT License.
import functools
import numpy as np
from sklearn.metrics import recall_score
from fairlearn.metrics._annotated_metric_function import AnnotatedMetricFunction
def test_constructor_unnamed():
fc = AnnotatedMetricFunction(func=recall_score, name=None)
assert fc.name == recall_score.__name__
assert np.array_equal(fc.postional_argument_names, ["y_true", "y_pred"])
assert isinstance(fc.kw_argument_mapping, dict)
assert len(fc.kw_argument_mapping) == 0
def test_constructor_no_name(recwarn):
# Tests case where no name is given and the function has no __name__
my_func = functools.partial(recall_score, pos_label=0)
fc = AnnotatedMetricFunction(func=my_func, name=None)
assert fc.name == "metric"
assert np.array_equal(fc.postional_argument_names, ["y_true", "y_pred"])
assert isinstance(fc.kw_argument_mapping, dict)
assert len(fc.kw_argument_mapping) == 0
assert len(recwarn) == 1
assert str(recwarn[0].message) == "Supplied 'func' had no __name__ attribute"
def test_constructor_named():
fc = AnnotatedMetricFunction(func=recall_score, name="OverrideName")
assert fc.name == "OverrideName"
assert np.array_equal(fc.postional_argument_names, ["y_true", "y_pred"])
assert isinstance(fc.kw_argument_mapping, dict)
assert len(fc.kw_argument_mapping) == 0
|
[
"numpy.array_equal",
"functools.partial",
"fairlearn.metrics._annotated_metric_function.AnnotatedMetricFunction"
] |
[((304, 357), 'fairlearn.metrics._annotated_metric_function.AnnotatedMetricFunction', 'AnnotatedMetricFunction', ([], {'func': 'recall_score', 'name': 'None'}), '(func=recall_score, name=None)\n', (327, 357), False, 'from fairlearn.metrics._annotated_metric_function import AnnotatedMetricFunction\n'), ((413, 478), 'numpy.array_equal', 'np.array_equal', (['fc.postional_argument_names', "['y_true', 'y_pred']"], {}), "(fc.postional_argument_names, ['y_true', 'y_pred'])\n", (427, 478), True, 'import numpy as np\n'), ((703, 747), 'functools.partial', 'functools.partial', (['recall_score'], {'pos_label': '(0)'}), '(recall_score, pos_label=0)\n', (720, 747), False, 'import functools\n'), ((758, 806), 'fairlearn.metrics._annotated_metric_function.AnnotatedMetricFunction', 'AnnotatedMetricFunction', ([], {'func': 'my_func', 'name': 'None'}), '(func=my_func, name=None)\n', (781, 806), False, 'from fairlearn.metrics._annotated_metric_function import AnnotatedMetricFunction\n'), ((849, 914), 'numpy.array_equal', 'np.array_equal', (['fc.postional_argument_names', "['y_true', 'y_pred']"], {}), "(fc.postional_argument_names, ['y_true', 'y_pred'])\n", (863, 914), True, 'import numpy as np\n'), ((1163, 1226), 'fairlearn.metrics._annotated_metric_function.AnnotatedMetricFunction', 'AnnotatedMetricFunction', ([], {'func': 'recall_score', 'name': '"""OverrideName"""'}), "(func=recall_score, name='OverrideName')\n", (1186, 1226), False, 'from fairlearn.metrics._annotated_metric_function import AnnotatedMetricFunction\n'), ((1275, 1340), 'numpy.array_equal', 'np.array_equal', (['fc.postional_argument_names', "['y_true', 'y_pred']"], {}), "(fc.postional_argument_names, ['y_true', 'y_pred'])\n", (1289, 1340), True, 'import numpy as np\n')]
|
# coding: utf-8
""" demo on forward 2D """
# Copyright (c) <NAME>. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import absolute_import, division, print_function
import matplotlib.pyplot as plt
import numpy as np
import pyeit.eit.protocol as protocol
import pyeit.mesh as mesh
from pyeit.eit.fem import Forward
from pyeit.mesh.shape import thorax
from pyeit.mesh.wrapper import PyEITAnomaly_Circle
""" 0. build mesh """
n_el = 16 # nb of electrodes
use_customize_shape = False
if use_customize_shape:
# Mesh shape is specified with fd parameter in the instantiation, e.g : fd=thorax
mesh_obj = mesh.create(n_el, h0=0.1, fd=thorax)
else:
mesh_obj = mesh.create(n_el, h0=0.1)
el_pos = mesh_obj.el_pos
# extract node, element, alpha
pts = mesh_obj.node
tri = mesh_obj.element
x, y = pts[:, 0], pts[:, 1]
mesh_obj.print_stats()
# change permittivity
anomaly = PyEITAnomaly_Circle(center=[0.4, 0.5], r=0.2, perm=100.0)
mesh_new = mesh.set_perm(mesh_obj, anomaly=anomaly, background=1.0)
perm = mesh_new.perm
""" 1. FEM forward simulations """
# setup EIT scan conditions
protocol_obj = protocol.create(n_el, dist_exc=7, step_meas=1, parser_meas="std")
# Define electrode current sink and current source
ex_line = protocol_obj.ex_mat[0].ravel()
# calculate simulated data using FEM
fwd = Forward(mesh_new)
f = fwd.solve(ex_line)
f = np.real(f)
""" 2. plot """
fig = plt.figure()
ax1 = fig.add_subplot(111)
# draw equi-potential lines
vf = np.linspace(min(f), max(f), 32)
# vf = np.sort(f[el_pos])
# Draw contour lines on an unstructured triangular grid.
ax1.tricontour(x, y, tri, f, vf, cmap=plt.cm.viridis)
# draw mesh structure
# Create a pseudocolor plot of an unstructured triangular grid
ax1.tripcolor(
x,
y,
tri,
np.real(perm),
edgecolors="k",
shading="flat",
alpha=0.5,
cmap=plt.cm.Greys,
)
# draw electrodes
ax1.plot(x[el_pos], y[el_pos], "ro")
for i, e in enumerate(el_pos):
ax1.text(x[e], y[e], str(i + 1), size=12)
ax1.set_title("equi-potential lines")
# clean up
ax1.set_aspect("equal")
ax1.set_ylim([-1.2, 1.2])
ax1.set_xlim([-1.2, 1.2])
fig.set_size_inches(6, 6)
# fig.savefig('demo_bp.png', dpi=96)
plt.show()
|
[
"pyeit.mesh.wrapper.PyEITAnomaly_Circle",
"matplotlib.pyplot.show",
"pyeit.eit.protocol.create",
"pyeit.mesh.set_perm",
"pyeit.eit.fem.Forward",
"matplotlib.pyplot.figure",
"numpy.real",
"pyeit.mesh.create"
] |
[((938, 995), 'pyeit.mesh.wrapper.PyEITAnomaly_Circle', 'PyEITAnomaly_Circle', ([], {'center': '[0.4, 0.5]', 'r': '(0.2)', 'perm': '(100.0)'}), '(center=[0.4, 0.5], r=0.2, perm=100.0)\n', (957, 995), False, 'from pyeit.mesh.wrapper import PyEITAnomaly_Circle\n'), ((1007, 1063), 'pyeit.mesh.set_perm', 'mesh.set_perm', (['mesh_obj'], {'anomaly': 'anomaly', 'background': '(1.0)'}), '(mesh_obj, anomaly=anomaly, background=1.0)\n', (1020, 1063), True, 'import pyeit.mesh as mesh\n'), ((1164, 1229), 'pyeit.eit.protocol.create', 'protocol.create', (['n_el'], {'dist_exc': '(7)', 'step_meas': '(1)', 'parser_meas': '"""std"""'}), "(n_el, dist_exc=7, step_meas=1, parser_meas='std')\n", (1179, 1229), True, 'import pyeit.eit.protocol as protocol\n'), ((1367, 1384), 'pyeit.eit.fem.Forward', 'Forward', (['mesh_new'], {}), '(mesh_new)\n', (1374, 1384), False, 'from pyeit.eit.fem import Forward\n'), ((1412, 1422), 'numpy.real', 'np.real', (['f'], {}), '(f)\n', (1419, 1422), True, 'import numpy as np\n'), ((1446, 1458), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1456, 1458), True, 'import matplotlib.pyplot as plt\n'), ((2231, 2241), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2239, 2241), True, 'import matplotlib.pyplot as plt\n'), ((670, 706), 'pyeit.mesh.create', 'mesh.create', (['n_el'], {'h0': '(0.1)', 'fd': 'thorax'}), '(n_el, h0=0.1, fd=thorax)\n', (681, 706), True, 'import pyeit.mesh as mesh\n'), ((728, 753), 'pyeit.mesh.create', 'mesh.create', (['n_el'], {'h0': '(0.1)'}), '(n_el, h0=0.1)\n', (739, 753), True, 'import pyeit.mesh as mesh\n'), ((1816, 1829), 'numpy.real', 'np.real', (['perm'], {}), '(perm)\n', (1823, 1829), True, 'import numpy as np\n')]
|
import numpy as np
from scipy.spatial.distance import cdist
# reference vector generation
def das_dennis(n_part, n_obj):
if n_part == 0:
return np.full((1, n_obj), 1 / n_obj)
else:
ref_dirs = []
ref_dir = np.full(n_obj, np.nan)
das_dennis_recursion(ref_dirs, ref_dir, n_part, n_part, 0)
return np.concatenate(ref_dirs, axis=0)
def das_dennis_recursion(ref_dirs, ref_dir, n_part, beta, depth):
if depth == len(ref_dir) - 1:
ref_dir[depth] = beta / (1.0 * n_part)
ref_dir = ref_dir / np.sqrt( np.sum(ref_dir ** 2) )
ref_dirs.append(ref_dir[None, :])
else:
for i in range(beta + 1):
ref_dir[depth] = 1.0 * i / (1.0 * n_part)
das_dennis_recursion(ref_dirs, np.copy(ref_dir), n_part, beta - i, depth + 1)
def neighboring_angle(ref_dirs):
cosine_refdirs = np.dot(ref_dirs, ref_dirs.T)
sorted_cosine_refdirs = - np.sort(- cosine_refdirs, axis=1)
arccosine_refdirs = np.arccos( np.clip(sorted_cosine_refdirs[:,1], 0, 1) )
return arccosine_refdirs
|
[
"numpy.full",
"numpy.sum",
"numpy.copy",
"numpy.clip",
"numpy.sort",
"numpy.dot",
"numpy.concatenate"
] |
[((869, 897), 'numpy.dot', 'np.dot', (['ref_dirs', 'ref_dirs.T'], {}), '(ref_dirs, ref_dirs.T)\n', (875, 897), True, 'import numpy as np\n'), ((157, 187), 'numpy.full', 'np.full', (['(1, n_obj)', '(1 / n_obj)'], {}), '((1, n_obj), 1 / n_obj)\n', (164, 187), True, 'import numpy as np\n'), ((238, 260), 'numpy.full', 'np.full', (['n_obj', 'np.nan'], {}), '(n_obj, np.nan)\n', (245, 260), True, 'import numpy as np\n'), ((343, 375), 'numpy.concatenate', 'np.concatenate', (['ref_dirs'], {'axis': '(0)'}), '(ref_dirs, axis=0)\n', (357, 375), True, 'import numpy as np\n'), ((928, 960), 'numpy.sort', 'np.sort', (['(-cosine_refdirs)'], {'axis': '(1)'}), '(-cosine_refdirs, axis=1)\n', (935, 960), True, 'import numpy as np\n'), ((997, 1039), 'numpy.clip', 'np.clip', (['sorted_cosine_refdirs[:, 1]', '(0)', '(1)'], {}), '(sorted_cosine_refdirs[:, 1], 0, 1)\n', (1004, 1039), True, 'import numpy as np\n'), ((561, 581), 'numpy.sum', 'np.sum', (['(ref_dir ** 2)'], {}), '(ref_dir ** 2)\n', (567, 581), True, 'import numpy as np\n'), ((767, 783), 'numpy.copy', 'np.copy', (['ref_dir'], {}), '(ref_dir)\n', (774, 783), True, 'import numpy as np\n')]
|
"""
Quinitc Polynomials Planner
author: <NAME> (@Atsushi_twi)
Ref:
- [Local Path Planning And Motion Control For Agv In Positioning](http://ieeexplore.ieee.org/document/637936/)
"""
import numpy as np
import matplotlib.pyplot as plt
import math
# parameter
MAX_T = 100.0 # maximum time to the goal [s]
MIN_T = 5.0 # minimum time to the goal[s]
show_animation = True
class quinic_polynomial:
def __init__(self, xs, vxs, axs, xe, vxe, axe, T):
# calc coefficient of quinic polynomial
self.xs = xs
self.vxs = vxs
self.axs = axs
self.xe = xe
self.vxe = vxe
self.axe = axe
self.a0 = xs
self.a1 = vxs
self.a2 = axs / 2.0
A = np.array([[T**3, T**4, T**5],
[3 * T ** 2, 4 * T ** 3, 5 * T ** 4],
[6 * T, 12 * T ** 2, 20 * T ** 3]])
b = np.array([xe - self.a0 - self.a1 * T - self.a2 * T**2,
vxe - self.a1 - 2 * self.a2 * T,
axe - 2 * self.a2])
x = np.linalg.solve(A, b)
self.a3 = x[0]
self.a4 = x[1]
self.a5 = x[2]
def calc_point(self, t):
xt = self.a0 + self.a1 * t + self.a2 * t**2 + \
self.a3 * t**3 + self.a4 * t**4 + self.a5 * t**5
return xt
def calc_first_derivative(self, t):
xt = self.a1 + 2 * self.a2 * t + \
3 * self.a3 * t**2 + 4 * self.a4 * t**3 + 5 * self.a5 * t**4
return xt
def calc_second_derivative(self, t):
xt = 2 * self.a2 + 6 * self.a3 * t + 12 * self.a4 * t**2 + 20 * self.a5 * t**3
return xt
def calc_third_derivative(self, t):
xt = 6 * self.a3 + 24 * self.a4 * t + 60 * self.a5 * t**2
return xt
def quinic_polynomials_planner(sx, sy, syaw, sv, sa, gx, gy, gyaw, gv, ga, max_accel, max_jerk, dt):
"""
quinic polynomial planner
input
sx: start x position [m]
sy: start y position [m]
syaw: start yaw angle [rad]
sa: start accel [m/ss]
gx: goal x position [m]
gy: goal y position [m]
gyaw: goal yaw angle [rad]
ga: goal accel [m/ss]
max_accel: maximum accel [m/ss]
max_jerk: maximum jerk [m/sss]
dt: time tick [s]
return
time: time result
rx: x position result list
ry: y position result list
ryaw: yaw angle result list
rv: velocity result list
ra: accel result list
"""
vxs = sv * math.cos(syaw)
vys = sv * math.sin(syaw)
vxg = gv * math.cos(gyaw)
vyg = gv * math.sin(gyaw)
axs = sa * math.cos(syaw)
ays = sa * math.sin(syaw)
axg = ga * math.cos(gyaw)
ayg = ga * math.sin(gyaw)
for T in np.arange(MIN_T, MAX_T, MIN_T):
xqp = quinic_polynomial(sx, vxs, axs, gx, vxg, axg, T)
yqp = quinic_polynomial(sy, vys, ays, gy, vyg, ayg, T)
time, rx, ry, ryaw, rv, ra, rj = [], [], [], [], [], [], []
for t in np.arange(0.0, T + dt, dt):
time.append(t)
rx.append(xqp.calc_point(t))
ry.append(yqp.calc_point(t))
vx = xqp.calc_first_derivative(t)
vy = yqp.calc_first_derivative(t)
v = np.hypot(vx, vy)
yaw = math.atan2(vy, vx)
rv.append(v)
ryaw.append(yaw)
ax = xqp.calc_second_derivative(t)
ay = yqp.calc_second_derivative(t)
a = np.hypot(ax, ay)
if len(rv) >= 2 and rv[-1] - rv[-2] < 0.0:
a *= -1
ra.append(a)
jx = xqp.calc_third_derivative(t)
jy = yqp.calc_third_derivative(t)
j = np.hypot(jx, jy)
if len(ra) >= 2 and ra[-1] - ra[-2] < 0.0:
j *= -1
rj.append(j)
if max([abs(i) for i in ra]) <= max_accel and max([abs(i) for i in rj]) <= max_jerk:
print("find path!!")
break
if show_animation:
for i in range(len(rx)):
plt.cla()
plt.grid(True)
plt.axis("equal")
plot_arrow(sx, sy, syaw)
plot_arrow(gx, gy, gyaw)
plot_arrow(rx[i], ry[i], ryaw[i])
plt.title("Time[s]:" + str(time[i])[0:4] +
" v[m/s]:" + str(rv[i])[0:4] +
" a[m/ss]:" + str(ra[i])[0:4] +
" jerk[m/sss]:" + str(rj[i])[0:4],
)
plt.pause(0.001)
return time, rx, ry, ryaw, rv, ra, rj
def plot_arrow(x, y, yaw, length=1.0, width=0.5, fc="r", ec="k"):
"""
Plot arrow
"""
if not isinstance(x, float):
for (ix, iy, iyaw) in zip(x, y, yaw):
plot_arrow(ix, iy, iyaw)
else:
plt.arrow(x, y, length * math.cos(yaw), length * math.sin(yaw),
fc=fc, ec=ec, head_width=width, head_length=width)
plt.plot(x, y)
def main():
print(__file__ + " start!!")
sx = 10.0 # start x position [m]
sy = 10.0 # start y position [m]
syaw = math.radians(10.0) # start yaw angle [rad]
sv = 1.0 # start speed [m/s]
sa = 0.1 # start accel [m/ss]
gx = 30.0 # goal x position [m]
gy = -10.0 # goal y position [m]
gyaw = math.radians(20.0) # goal yaw angle [rad]
gv = 1.0 # goal speed [m/s]
ga = 0.1 # goal accel [m/ss]
max_accel = 1.0 # max accel [m/ss]
max_jerk = 0.5 # max jerk [m/sss]
dt = 0.1 # time tick [s]
time, x, y, yaw, v, a, j = quinic_polynomials_planner(
sx, sy, syaw, sv, sa, gx, gy, gyaw, gv, ga, max_accel, max_jerk, dt)
if show_animation:
plt.plot(x, y, "-r")
# plt.subplots()
# plt.plot(time, [math.degrees(i) for i in yaw], "-r")
# plt.xlabel("Time[s]")
# plt.ylabel("Yaw[deg]")
# plt.grid(True)
#
# plt.subplots()
# plt.plot(time, v, "-r")
# plt.xlabel("Time[s]")
# plt.ylabel("Speed[m/s]")
# plt.grid(True)
#
# plt.subplots()
# plt.plot(time, a, "-r")
# plt.xlabel("Time[s]")
# plt.ylabel("accel[m/ss]")
# plt.grid(True)
#
# plt.subplots()
# plt.plot(time, j, "-r")
# plt.xlabel("Time[s]")
# plt.ylabel("jerk[m/sss]")
# plt.grid(True)
plt.show()
if __name__ == '__main__':
main()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"math.atan2",
"math.radians",
"matplotlib.pyplot.axis",
"math.sin",
"numpy.hypot",
"numpy.arange",
"math.cos",
"numpy.array",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.pause",
"numpy.linalg.solve",
"matplotlib.pyplot.grid"
] |
[((2750, 2780), 'numpy.arange', 'np.arange', (['MIN_T', 'MAX_T', 'MIN_T'], {}), '(MIN_T, MAX_T, MIN_T)\n', (2759, 2780), True, 'import numpy as np\n'), ((5052, 5070), 'math.radians', 'math.radians', (['(10.0)'], {}), '(10.0)\n', (5064, 5070), False, 'import math\n'), ((5251, 5269), 'math.radians', 'math.radians', (['(20.0)'], {}), '(20.0)\n', (5263, 5269), False, 'import math\n'), ((727, 841), 'numpy.array', 'np.array', (['[[T ** 3, T ** 4, T ** 5], [3 * T ** 2, 4 * T ** 3, 5 * T ** 4], [6 * T, 12 *\n T ** 2, 20 * T ** 3]]'], {}), '([[T ** 3, T ** 4, T ** 5], [3 * T ** 2, 4 * T ** 3, 5 * T ** 4], [\n 6 * T, 12 * T ** 2, 20 * T ** 3]])\n', (735, 841), True, 'import numpy as np\n'), ((887, 1000), 'numpy.array', 'np.array', (['[xe - self.a0 - self.a1 * T - self.a2 * T ** 2, vxe - self.a1 - 2 * self.a2 *\n T, axe - 2 * self.a2]'], {}), '([xe - self.a0 - self.a1 * T - self.a2 * T ** 2, vxe - self.a1 - 2 *\n self.a2 * T, axe - 2 * self.a2])\n', (895, 1000), True, 'import numpy as np\n'), ((1051, 1072), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (1066, 1072), True, 'import numpy as np\n'), ((2510, 2524), 'math.cos', 'math.cos', (['syaw'], {}), '(syaw)\n', (2518, 2524), False, 'import math\n'), ((2540, 2554), 'math.sin', 'math.sin', (['syaw'], {}), '(syaw)\n', (2548, 2554), False, 'import math\n'), ((2570, 2584), 'math.cos', 'math.cos', (['gyaw'], {}), '(gyaw)\n', (2578, 2584), False, 'import math\n'), ((2600, 2614), 'math.sin', 'math.sin', (['gyaw'], {}), '(gyaw)\n', (2608, 2614), False, 'import math\n'), ((2631, 2645), 'math.cos', 'math.cos', (['syaw'], {}), '(syaw)\n', (2639, 2645), False, 'import math\n'), ((2661, 2675), 'math.sin', 'math.sin', (['syaw'], {}), '(syaw)\n', (2669, 2675), False, 'import math\n'), ((2691, 2705), 'math.cos', 'math.cos', (['gyaw'], {}), '(gyaw)\n', (2699, 2705), False, 'import math\n'), ((2721, 2735), 'math.sin', 'math.sin', (['gyaw'], {}), '(gyaw)\n', (2729, 2735), False, 'import math\n'), ((2995, 3021), 'numpy.arange', 'np.arange', (['(0.0)', '(T + dt)', 'dt'], {}), '(0.0, T + dt, dt)\n', (3004, 3021), True, 'import numpy as np\n'), ((4902, 4916), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (4910, 4916), True, 'import matplotlib.pyplot as plt\n'), ((5639, 5659), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""-r"""'], {}), "(x, y, '-r')\n", (5647, 5659), True, 'import matplotlib.pyplot as plt\n'), ((6333, 6343), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6341, 6343), True, 'import matplotlib.pyplot as plt\n'), ((3241, 3257), 'numpy.hypot', 'np.hypot', (['vx', 'vy'], {}), '(vx, vy)\n', (3249, 3257), True, 'import numpy as np\n'), ((3276, 3294), 'math.atan2', 'math.atan2', (['vy', 'vx'], {}), '(vy, vx)\n', (3286, 3294), False, 'import math\n'), ((3460, 3476), 'numpy.hypot', 'np.hypot', (['ax', 'ay'], {}), '(ax, ay)\n', (3468, 3476), True, 'import numpy as np\n'), ((3690, 3706), 'numpy.hypot', 'np.hypot', (['jx', 'jy'], {}), '(jx, jy)\n', (3698, 3706), True, 'import numpy as np\n'), ((4025, 4034), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (4032, 4034), True, 'import matplotlib.pyplot as plt\n'), ((4047, 4061), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4055, 4061), True, 'import matplotlib.pyplot as plt\n'), ((4074, 4091), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (4082, 4091), True, 'import matplotlib.pyplot as plt\n'), ((4467, 4483), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (4476, 4483), True, 'import matplotlib.pyplot as plt\n'), ((4786, 4799), 'math.cos', 'math.cos', (['yaw'], {}), '(yaw)\n', (4794, 4799), False, 'import math\n'), ((4810, 4823), 'math.sin', 'math.sin', (['yaw'], {}), '(yaw)\n', (4818, 4823), False, 'import math\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.