code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
"""Classification methods."""
import numpy as np
from machine_learning.constants import N_CLASSES, FOLDS, MAX_K, RANDOM_SEED
from machine_learning.utilities import k_fold_split_indexes, get_k_nn
def classification(method, error_func, train, test, **kwargs):
"""Perform classification for data and return error.
Arguments:
method {function} -- Classification method.
error_func {function} -- Error function.
train {DataTuple} -- Training data.
test {DataTuple} -- Test data.
All extra keyword arguments are passed to method.
Returns:
float -- Error value returned by error_func.
"""
y_pred = method(train, test, **kwargs)
return error_func(y_pred, test.y.values)
def max_classifier(train, test):
"""Maximum classifier.
Classifies using the most common class in training data.
Arguments:
train {DataTuple} -- Training data.
test {DataTuple} -- Test data.
Returns:
ndarray -- Predicted values.
"""
max_category = max_classifier_fit(train.X, train.y)
y_pred = max_classifier_predict(test.X, max_category)
return y_pred
def max_classifier_fit(X, y):
"""Determines the most common class in input.
Arguments:
X {DataFrame} -- Indendent variables.
y {DataFrame} -- Dependent variable.
Returns:
int -- Most common class.
"""
y = y.values
max_category = np.bincount(y.astype(int)).argmax()
return max_category
def max_classifier_predict(X, max_category):
"""Classify using max classifier.
Arguments:
X {DataFrame} -- Independent variables.
max_category {int} -- Class to classify to.
Returns:
ndarray -- Predicted values.
"""
y_pred = np.ones((X.shape[0], 1), dtype=np.int) * max_category
return y_pred
def multinomial_naive_bayes_classifier(train, test, n_classes=N_CLASSES):
"""Multinomial naive bayes classifier.
See more at:
https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Multinomial_naive_Bayes
Arguments:
train {DataTuple} -- Training data.
test {DataTuple} -- Test data.
Keyword Arguments:
n_classes {int} -- Number of classes. (default: {N_CLASSES})
Returns:
ndarray -- Predicted values.
"""
train_X = train.X.values
train_y = train.y.values
test_X = test.X.values
class_priors, feature_likelihoods = mnb_classifier_fit(train_X, train_y,
n_classes)
y_pred = mnb_classifier_predict(test_X, class_priors, feature_likelihoods)
return y_pred
def mnb_classifier_fit(X, y, n_classes):
"""Fit MNB classifier.
Calculates class priors and feature likelihoods.
Arguments:
X {ndarray} -- Independent variables.
y {ndarray} -- Dependent variables.
n_classes {int} -- Number of classes.
Returns:
ndarray -- Class priors.
ndarray -- Feature likelihoods.
"""
class_priors = mnb_class_priors(y, n_classes)
feature_likelihoods = mnb_feature_likelihoods(X, y, n_classes)
return class_priors, feature_likelihoods
def mnb_class_priors(y, n_classes):
"""Calculates the logaritm of the probability of belonging to each class.
Arguments:
y {ndarray} -- Class labels.
n_classes {int} -- Number of class labels.
Returns:
ndarray -- Log of prior probabilities.
"""
priors = np.zeros(n_classes)
for c in range(n_classes):
priors[c] = np.log(np.sum(y == c) / y.size)
return priors
def mnb_feature_likelihoods(X, y, n_classes):
"""Calculates the probability of feature j, given class k, using Laplace smoothing.
Arguments:
X {ndarray} -- Features.
y {ndarray} -- Class labels.
n_classes {int} -- Number of classes.
Returns:
ndarray -- Logs of feature likelihoods.
"""
n_features = X.shape[1]
p_ij = np.zeros((n_classes, n_features))
for c in range(n_classes):
Fc_sum = np.sum(X[y == c, :])
for j in range(n_features):
Fnc = np.sum(X[y == c, j])
p_ij[c, j] = np.log((1.0 + Fnc) / (n_features + Fc_sum))
return p_ij
def mnb_classifier_predict(X, class_priors, feature_likelihoods):
"""Classify using MNB classifier.
Arguments:
X {ndarray} -- Independent variables.
class_priors {ndarray} -- Class priors.
feature_likelihoods {ndarray} -- Feature likelihoods.
Returns:
ndarray -- Predicted values.
"""
n_classes = class_priors.size
N = X.shape[0]
posterior = np.zeros((N, n_classes))
for i in range(N):
posterior[i, :] = feature_likelihoods.dot(X[i, :])
for c in range(n_classes):
posterior[:, c] = posterior[:, c] + class_priors[c]
y_pred = np.argmax(posterior, axis=1)
return y_pred
def k_nn_classifier(train, test, k):
"""K-nearest neighbors classifier.
Arguments:
train {DataTuple} -- Training data.
test {DataTuple} -- Test data.
k {int} -- Value for k.
Returns:
ndarray -- Predicted values.
"""
y_pred = k_nn_classifier_predict(test.X, train.X, train.y, k)
return y_pred
def k_nn_classifier_fit(train, n_folds=FOLDS, max_k=MAX_K):
"""'Fit' K-nearest neighbors classifier by finding optimal value for k using cross validation.
Arguments:
train {DataTuple} -- Training data.
Keyword Arguments:
n_folds {int} -- Number of folds to use for validation. (default: {FOLDS})
max_k {int} -- Maximum value for k. (default: {MAX_K})
Returns:
int -- Optimal value for k.
float -- Error for selected k.
"""
# TODO: combine with k_nn_regression_fit()?
X = train.X.values
y = train.y.values
N = X.shape[0]
folds = k_fold_split_indexes(N, n_folds)
min_error = np.infty
best_k = 1
for k in range(1, max_k):
errors = np.zeros(n_folds)
for i in range(n_folds):
tmp_folds = folds[:]
valid_ix = tmp_folds.pop(i)
train_ix = np.concatenate(tmp_folds)
y_pred = k_nn_classifier_predict(X[valid_ix, :], X[train_ix, :],
y[train_ix], k)
error = classification_error(y_pred, y[valid_ix])
errors[i] = (valid_ix.size * error)
mean_error = np.sum(errors) / N
if mean_error < min_error:
min_error = mean_error
best_k = k
return int(best_k), min_error
def k_nn_classifier_predict(X, X_train, y_train, k, n_classes=N_CLASSES):
"""Classify using K-nearest neighbors classifier.
Assigns class labels based on the most common class in k-nearest neighbors.
Arguments:
X {DataFrame} -- Independent variables.
X_train {DataFrame} -- Independent training variables.
y_train {DataFrame} -- Dependent training variables.
k {int} -- Value of k.
Keyword Arguments:
n_classes {int} -- Number of classes. (default: {N_CLASSES})
Returns:
ndarray -- Predicted variables.
"""
try:
X = X.values
except AttributeError:
pass
try:
X_train = X_train.values
except AttributeError:
pass
try:
y_train = y_train.values
except AttributeError:
pass
assert X.shape[1] == X_train.shape[1]
N = X.shape[0]
y_pred = np.zeros((N, 1))
for i in range(N):
point = X[i, :]
neighbors, _ = get_k_nn(point, X_train, k)
train_labels = y_train[neighbors]
class_sums = [np.sum(train_labels == i) for i in range(n_classes)]
y_pred[i] = k_nn_assign_label(class_sums)
return y_pred
def k_nn_assign_label(class_sums):
"""Assing label according the most common class.
If there are multiple candidates, pick one randomly.
Arguments:
class_sums {list} -- Class frequencies.
Returns:
int -- Assinged class label.
"""
order = np.argsort(class_sums)[::-1]
candidates = [x for x in order if x == order[0]]
return np.random.RandomState(RANDOM_SEED).choice(candidates)
def classification_error(y_pred, y_true):
"""Return classification error.
Sum of incorrectly assinged classes divided by the number of points.
Arguments:
y_pred {ndarray} -- Predicted values.
y_true {ndarray} -- True values.
Returns:
float -- Error.
"""
y_true = y_true.reshape(y_pred.shape)
return np.sum(y_pred.astype(np.int)
!= y_true.astype(np.int)) / float(y_pred.size)
|
[
"numpy.sum",
"numpy.log",
"numpy.argmax",
"machine_learning.utilities.k_fold_split_indexes",
"numpy.zeros",
"numpy.ones",
"numpy.random.RandomState",
"numpy.argsort",
"machine_learning.utilities.get_k_nn",
"numpy.concatenate"
] |
[((3476, 3495), 'numpy.zeros', 'np.zeros', (['n_classes'], {}), '(n_classes)\n', (3484, 3495), True, 'import numpy as np\n'), ((3975, 4008), 'numpy.zeros', 'np.zeros', (['(n_classes, n_features)'], {}), '((n_classes, n_features))\n', (3983, 4008), True, 'import numpy as np\n'), ((4645, 4669), 'numpy.zeros', 'np.zeros', (['(N, n_classes)'], {}), '((N, n_classes))\n', (4653, 4669), True, 'import numpy as np\n'), ((4856, 4884), 'numpy.argmax', 'np.argmax', (['posterior'], {'axis': '(1)'}), '(posterior, axis=1)\n', (4865, 4884), True, 'import numpy as np\n'), ((5870, 5902), 'machine_learning.utilities.k_fold_split_indexes', 'k_fold_split_indexes', (['N', 'n_folds'], {}), '(N, n_folds)\n', (5890, 5902), False, 'from machine_learning.utilities import k_fold_split_indexes, get_k_nn\n'), ((7472, 7488), 'numpy.zeros', 'np.zeros', (['(N, 1)'], {}), '((N, 1))\n', (7480, 7488), True, 'import numpy as np\n'), ((1769, 1807), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {'dtype': 'np.int'}), '((X.shape[0], 1), dtype=np.int)\n', (1776, 1807), True, 'import numpy as np\n'), ((4057, 4077), 'numpy.sum', 'np.sum', (['X[y == c, :]'], {}), '(X[y == c, :])\n', (4063, 4077), True, 'import numpy as np\n'), ((5990, 6007), 'numpy.zeros', 'np.zeros', (['n_folds'], {}), '(n_folds)\n', (5998, 6007), True, 'import numpy as np\n'), ((7559, 7586), 'machine_learning.utilities.get_k_nn', 'get_k_nn', (['point', 'X_train', 'k'], {}), '(point, X_train, k)\n', (7567, 7586), False, 'from machine_learning.utilities import k_fold_split_indexes, get_k_nn\n'), ((8055, 8077), 'numpy.argsort', 'np.argsort', (['class_sums'], {}), '(class_sums)\n', (8065, 8077), True, 'import numpy as np\n'), ((4132, 4152), 'numpy.sum', 'np.sum', (['X[y == c, j]'], {}), '(X[y == c, j])\n', (4138, 4152), True, 'import numpy as np\n'), ((4178, 4221), 'numpy.log', 'np.log', (['((1.0 + Fnc) / (n_features + Fc_sum))'], {}), '((1.0 + Fnc) / (n_features + Fc_sum))\n', (4184, 4221), True, 'import numpy as np\n'), ((6137, 6162), 'numpy.concatenate', 'np.concatenate', (['tmp_folds'], {}), '(tmp_folds)\n', (6151, 6162), True, 'import numpy as np\n'), ((6432, 6446), 'numpy.sum', 'np.sum', (['errors'], {}), '(errors)\n', (6438, 6446), True, 'import numpy as np\n'), ((7651, 7676), 'numpy.sum', 'np.sum', (['(train_labels == i)'], {}), '(train_labels == i)\n', (7657, 7676), True, 'import numpy as np\n'), ((8148, 8182), 'numpy.random.RandomState', 'np.random.RandomState', (['RANDOM_SEED'], {}), '(RANDOM_SEED)\n', (8169, 8182), True, 'import numpy as np\n'), ((3554, 3568), 'numpy.sum', 'np.sum', (['(y == c)'], {}), '(y == c)\n', (3560, 3568), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
from . import ResError
def remove_leap_day(timeseries):
"""Removes leap days from a given timeseries
Parameters
----------
timeseries : array_like
The time series data to remove leap days from
* If something array_like is given, the length must be 8784
* If a pandas DataFrame or Series is given, time indexes will be used
directly
Returns
-------
Array
"""
if isinstance(timeseries, np.ndarray):
if timeseries.shape[0] == 8760:
return timeseries
elif timeseries.shape[0] == 8784:
times = pd.date_range("01-01-2000 00:00:00",
"12-31-2000 23:00:00", freq="H")
sel = np.logical_and((times.day == 29), (times.month == 2))
if len(timeseries.shape) == 1:
return timeseries[~sel]
else:
return timeseries[~sel, :]
else:
raise ResError('Cannot handle array shape ' + str(timeseries.shape))
elif isinstance(timeseries, pd.Series) or isinstance(timeseries, pd.DataFrame):
times = timeseries.index
sel = np.logical_and((times.day == 29), (times.month == 2))
if isinstance(timeseries, pd.Series):
return timeseries[~sel]
else:
return timeseries.loc[~sel]
else:
return remove_leap_day(np.array(timeseries))
|
[
"numpy.array",
"pandas.date_range",
"numpy.logical_and"
] |
[((1197, 1246), 'numpy.logical_and', 'np.logical_and', (['(times.day == 29)', '(times.month == 2)'], {}), '(times.day == 29, times.month == 2)\n', (1211, 1246), True, 'import numpy as np\n'), ((650, 719), 'pandas.date_range', 'pd.date_range', (['"""01-01-2000 00:00:00"""', '"""12-31-2000 23:00:00"""'], {'freq': '"""H"""'}), "('01-01-2000 00:00:00', '12-31-2000 23:00:00', freq='H')\n", (663, 719), True, 'import pandas as pd\n'), ((772, 821), 'numpy.logical_and', 'np.logical_and', (['(times.day == 29)', '(times.month == 2)'], {}), '(times.day == 29, times.month == 2)\n', (786, 821), True, 'import numpy as np\n'), ((1429, 1449), 'numpy.array', 'np.array', (['timeseries'], {}), '(timeseries)\n', (1437, 1449), True, 'import numpy as np\n')]
|
from joblib import Memory
import math
import music21 as m21
import numpy as np
import os
from scipy.fftpack import fft, ifft
def get_composers():
return ["Haydn", "Mozart"]
def get_data_dir():
return "/scratch/vl1019/nemisig2018_data"
def get_dataset_name():
return "nemisig2018"
def concatenate_layers(Sx, depth):
layers = []
for m in range(depth+1):
layers.append(Sx[m].flatten())
return np.concatenate(layers)
def frequential_filterbank(dim, J_fr, xi=0.4, sigma=0.16):
N = 2**J_fr
filterbank = np.zeros((N, 1, 2*(J_fr-2)+1))
for j in range(J_fr-2):
xi_j = xi * 2**(-j)
sigma_j = sigma * 2**(-j)
center = xi_j * N
den = 2 * sigma_j * sigma_j * N * N
psi = morlet(center, den, N, n_periods=4)
filterbank[:, 0, j] = psi
for j in range(J_fr-2, 2*(J_fr-2)):
psi = filterbank[:, 0, j - (J_fr-2)]
rev_psi = np.concatenate((psi[0:1], psi[1:][::-1]))
filterbank[:, 0, j] = rev_psi
sigma_phi = 2.0 * sigma * 2**(-(J_fr-2))
center_phi = 0.0
den_phi = sigma_phi * sigma_phi * N * N
phi = gabor(center_phi, den_phi, N)
rev_phi = np.concatenate((phi[0:1], phi[1:][::-1]))
phi = phi + rev_phi
phi[0] = 1.0
filterbank[:, 0, -1] = phi
for m in range(dim):
filterbank = np.expand_dims(filterbank, axis=2)
return filterbank
def gabor(center, den, N):
omegas = np.array(range(N))
return gauss(omegas - center, den)
def gauss(omega, den):
return np.exp(- omega*omega / den)
def is_even(n):
return (n%2 == 0)
def morlet(center, den, N, n_periods):
half_N = N >> 1
p_start = - ((n_periods-1) >> 1) - is_even(n_periods)
p_stop = ((n_periods-1) >> 1) + 1
omega_start = p_start * N
omega_stop = p_stop * N
omegas = np.array(range(omega_start, omega_stop))
gauss_center = gauss(omegas - center, den)
corrective_gaussians = np.zeros((N*n_periods, n_periods))
for p in range(n_periods):
offset = (p_start + p) * N
corrective_gaussians[:, p] = gauss(omegas - offset, den)
p_range = range(p_start, p_stop)
b = np.array([gauss(p*N - center, den) for p in p_range])
A = np.array([gauss((q-p)*N, den)
for p in range(n_periods)
for q in range(n_periods)]).reshape(n_periods, n_periods)
corrective_factors = np.linalg.solve(A, b)
y = gauss_center - np.dot(corrective_gaussians, corrective_factors)
y = np.fft.fftshift(y)
y = np.reshape(y, (n_periods, N))
y = np.sum(y, axis=0)
return y
def scatter(U, filterbank, dim):
U_ft = fft(U, axis=dim)
U_ft = np.expand_dims(U_ft, axis=-1)
Y_ft = U_ft * filterbank
Y = ifft(Y_ft, axis=dim)
return Y
def setup_timefrequency_scattering(J_tm, J_fr, depth):
filterbanks_tm = []
filterbanks_fr = []
for m in range(depth):
filterbank_tm = temporal_filterbank(2*m, J_tm)
filterbank_fr = frequential_filterbank(2*m+1, J_fr)
filterbanks_tm.append(filterbank_tm)
filterbanks_fr.append(filterbank_fr)
return (filterbanks_tm, filterbanks_fr)
def temporal_filterbank(dim, J_tm, xi=0.4, sigma=0.16):
N = 2**J_tm
filterbank = np.zeros((1, N, J_tm-2))
for j in range(J_tm-2):
xi_j = xi * 2**(-j)
sigma_j = sigma * 2**(-j)
center = xi_j * N
den = 2 * sigma_j * sigma_j * N * N
psi = morlet(center, den, N, n_periods=4)
filterbank[0, :, j] = psi
for m in range(dim):
filterbank = np.expand_dims(filterbank, axis=2)
return filterbank
def temporal_scattering(pianoroll, filterbanks, nonlinearity):
depth = len(filterbanks)
Us = [pianoroll]
Ss = []
for m in range(depth):
U = Us[m]
S = np.sum(U, axis=(0, 1))
filterbank = filterbanks[m]
Y = scatter(U, filterbank, 1)
if nonlinearity == "abs":
U = np.abs(Y)
else:
raise NotImplementedError
Us.append(U)
Ss.append(S)
S = np.sum(U, axis=(0, 1))
Ss.append(S)
return Ss
def timefrequency_scattering(pianoroll, filterbanks, nonlinearity):
filterbanks_tm = filterbanks[0]
filterbanks_fr = filterbanks[1]
depth = len(filterbanks_tm)
Us = [pianoroll]
Ss = []
for m in range(depth):
U = Us[m]
S = np.sum(U, axis=(0,1))
filterbank_tm = filterbanks_tm[m]
filterbank_fr = filterbanks_fr[m]
Y_tm = scatter(U, filterbank_tm, 1)
Y_fr = scatter(Y_tm, filterbank_fr, 0)
if nonlinearity == "abs":
U = np.abs(Y_fr)
else:
raise NotImplementedError
Us.append(U)
Ss.append(S)
S = np.sum(U, axis=(0, 1))
Ss.append(S)
return Ss
|
[
"numpy.sum",
"numpy.abs",
"numpy.zeros",
"numpy.expand_dims",
"scipy.fftpack.fft",
"scipy.fftpack.ifft",
"numpy.fft.fftshift",
"numpy.exp",
"numpy.reshape",
"numpy.dot",
"numpy.linalg.solve",
"numpy.concatenate"
] |
[((429, 451), 'numpy.concatenate', 'np.concatenate', (['layers'], {}), '(layers)\n', (443, 451), True, 'import numpy as np\n'), ((546, 582), 'numpy.zeros', 'np.zeros', (['(N, 1, 2 * (J_fr - 2) + 1)'], {}), '((N, 1, 2 * (J_fr - 2) + 1))\n', (554, 582), True, 'import numpy as np\n'), ((1168, 1209), 'numpy.concatenate', 'np.concatenate', (['(phi[0:1], phi[1:][::-1])'], {}), '((phi[0:1], phi[1:][::-1]))\n', (1182, 1209), True, 'import numpy as np\n'), ((1521, 1549), 'numpy.exp', 'np.exp', (['(-omega * omega / den)'], {}), '(-omega * omega / den)\n', (1527, 1549), True, 'import numpy as np\n'), ((1932, 1968), 'numpy.zeros', 'np.zeros', (['(N * n_periods, n_periods)'], {}), '((N * n_periods, n_periods))\n', (1940, 1968), True, 'import numpy as np\n'), ((2378, 2399), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (2393, 2399), True, 'import numpy as np\n'), ((2480, 2498), 'numpy.fft.fftshift', 'np.fft.fftshift', (['y'], {}), '(y)\n', (2495, 2498), True, 'import numpy as np\n'), ((2507, 2536), 'numpy.reshape', 'np.reshape', (['y', '(n_periods, N)'], {}), '(y, (n_periods, N))\n', (2517, 2536), True, 'import numpy as np\n'), ((2545, 2562), 'numpy.sum', 'np.sum', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (2551, 2562), True, 'import numpy as np\n'), ((2622, 2638), 'scipy.fftpack.fft', 'fft', (['U'], {'axis': 'dim'}), '(U, axis=dim)\n', (2625, 2638), False, 'from scipy.fftpack import fft, ifft\n'), ((2650, 2679), 'numpy.expand_dims', 'np.expand_dims', (['U_ft'], {'axis': '(-1)'}), '(U_ft, axis=-1)\n', (2664, 2679), True, 'import numpy as np\n'), ((2717, 2737), 'scipy.fftpack.ifft', 'ifft', (['Y_ft'], {'axis': 'dim'}), '(Y_ft, axis=dim)\n', (2721, 2737), False, 'from scipy.fftpack import fft, ifft\n'), ((3223, 3249), 'numpy.zeros', 'np.zeros', (['(1, N, J_tm - 2)'], {}), '((1, N, J_tm - 2))\n', (3231, 3249), True, 'import numpy as np\n'), ((4038, 4060), 'numpy.sum', 'np.sum', (['U'], {'axis': '(0, 1)'}), '(U, axis=(0, 1))\n', (4044, 4060), True, 'import numpy as np\n'), ((4718, 4740), 'numpy.sum', 'np.sum', (['U'], {'axis': '(0, 1)'}), '(U, axis=(0, 1))\n', (4724, 4740), True, 'import numpy as np\n'), ((924, 965), 'numpy.concatenate', 'np.concatenate', (['(psi[0:1], psi[1:][::-1])'], {}), '((psi[0:1], psi[1:][::-1]))\n', (938, 965), True, 'import numpy as np\n'), ((1328, 1362), 'numpy.expand_dims', 'np.expand_dims', (['filterbank'], {'axis': '(2)'}), '(filterbank, axis=2)\n', (1342, 1362), True, 'import numpy as np\n'), ((2423, 2471), 'numpy.dot', 'np.dot', (['corrective_gaussians', 'corrective_factors'], {}), '(corrective_gaussians, corrective_factors)\n', (2429, 2471), True, 'import numpy as np\n'), ((3538, 3572), 'numpy.expand_dims', 'np.expand_dims', (['filterbank'], {'axis': '(2)'}), '(filterbank, axis=2)\n', (3552, 3572), True, 'import numpy as np\n'), ((3779, 3801), 'numpy.sum', 'np.sum', (['U'], {'axis': '(0, 1)'}), '(U, axis=(0, 1))\n', (3785, 3801), True, 'import numpy as np\n'), ((4356, 4378), 'numpy.sum', 'np.sum', (['U'], {'axis': '(0, 1)'}), '(U, axis=(0, 1))\n', (4362, 4378), True, 'import numpy as np\n'), ((3926, 3935), 'numpy.abs', 'np.abs', (['Y'], {}), '(Y)\n', (3932, 3935), True, 'import numpy as np\n'), ((4603, 4615), 'numpy.abs', 'np.abs', (['Y_fr'], {}), '(Y_fr)\n', (4609, 4615), True, 'import numpy as np\n')]
|
## Comborbidities:
## Comborbidities:
## Asthma, Obesity, Smoking, Diabetes, Heart diseae, Hypertension
## Symptom list: Covid-Recovered, Covid-Positive, Taste, Fever, Headache,
# Pneumonia, Stomach, Myocarditis, Blood-Clots, Death
## Mild symptoms: Taste, Fever, Headache, Stomach
## Critical symptoms: Pneumonia, Myocarditis, Blood-Clots
import numpy as np
import pickle
class Person:
def __init__(self, pop):
self.genes = np.random.choice(2, size=pop.n_genes)
self.gender = np.random.choice(2, 1)
self.age = np.random.gamma(3, 11)
self.age_adj = self.age / 100 # age affects everything
self.income = np.random.gamma(1, 10000)
self.comorbidities = [0] * pop.n_comorbidities
self.comorbidities[0] = pop.asthma
self.comorbidities[1] = pop.obesity * self.age_adj
self.comorbidities[2] = pop.smoking
self.diab = pop.diabetes + self.comorbidities[1] * 0.5
self.HT = pop.htension + self.comorbidities[2] * 0.5
self.comorbidities[3] = self.diab
self.comorbidities[4] = pop.heart * self.age_adj
self.comorbidities[5] = self.HT * self.age_adj
for i in range(pop.n_comorbidities):
if (np.random.uniform() < self.comorbidities[i]):
self.comorbidities[i] = 1
else:
self.comorbidities[i] = 0
self.symptom_baseline = np.array(
[pop.historical_prevalence, pop.prevalence, 0.01, 0.05, 0.05, 0.01,
0.02, 0.001, 0.001, 0.001]);
self.symptom_baseline = np.array(
np.matrix(self.genes) * pop.G).flatten() * self.symptom_baseline
self.symptom_baseline[0] = pop.historical_prevalence;
self.symptom_baseline[1] = pop.prevalence;
if (self.gender == 1):
self.symptom_baseline[8] += 0.01
else:
self.symptom_baseline[7] += 0.01
self.symptom_baseline[9] += 0.01
# Initially no symptoms apart from Covid+/CovidPre
self.symptoms = [0] * pop.n_symptoms
if (np.random.uniform() <= self.symptom_baseline[0]):
self.symptoms[0] = 1
# increase symptom probabilities for symptoms when covid+
if (np.random.uniform() <= self.symptom_baseline[1]):
self.symptoms[1] = 1
self.symptom_baseline = np.array(
[pop.historical_prevalence, pop.prevalence, 0.3, 0.2, 0.05,
0.2, 0.02, 0.05, 0.2, 0.1]);
self.vaccines = [0] * pop.n_vaccines
# use vaccine = -1 if no vaccine is given
def vaccinate(self, vaccine_array, pop):
## Vaccinated
if (sum(vaccine_array) >= 0):
vaccinated = True
else:
vaccinated = False
if (vaccinated):
vaccine = np.argmax(vaccine_array)
self.vaccines = vaccine_array
self.symptom_baseline[1] *= pop.baseline_efficacy[vaccine]
if (vaccinated and self.symptoms[1] == 1):
self.symptom_baseline[[2, 3, 4, 6]] *= pop.mild_efficacy[vaccine]
self.symptom_baseline[[5, 7, 8]] *= pop.critical_efficacy[vaccine]
self.symptom_baseline[9] *= pop.death_efficacy[vaccine]
if (self.symptoms[0] == 1):
self.symptom_baseline *= 0.5
# baseline symptoms of non-covid patients
if (self.symptoms[0] == 0 and self.symptoms[1] == 0):
self.symptom_baseline = np.array(
[0, 0, 0.001, 0.01, 0.02, 0.002, 0.005, 0.001, 0.002, 0.0001])
## Common side-effects
if (vaccine == 1):
self.symptom_baseline[8] += 0.01
self.symptom_baseline[9] += 0.001
if (vaccine == 2):
self.symptom_baseline[7] += 0.01
if (vaccine >= 0):
self.symptom_baseline[3] += 0.2
self.symptom_baseline[4] += 0.1
# model long covid sufferers by increasing the chances of various
# symptoms slightly
if (self.symptoms[0] == 1 and self.symptoms[1] == 0):
self.symptom_baseline += np.array(
[0, 0, 0.06, 0.04, 0.01, 0.04, 0.004, 0.01, 0.04, 0.01]);
# genetic factors
self.symptom_baseline = np.array(
np.matrix(self.genes) * pop.G).flatten() * self.symptom_baseline
# print("V:", vaccine, symptom_baseline)
for s in range(2, pop.n_symptoms):
if (np.random.uniform() < self.symptom_baseline[s]):
self.symptoms[s] = 1
class Population:
def __init__(self, n_genes, n_vaccines, n_treatments):
self.n_genes = n_genes
self.n_comorbidities = 6;
self.n_symptoms = 10
self.n_vaccines = n_vaccines
self.n_treatments = n_treatments
self.G = np.random.uniform(size=[self.n_genes, self.n_symptoms])
self.G /= sum(self.G)
self.A = np.random.uniform(size=[self.n_treatments, self.n_symptoms])
self.asthma = 0.08
self.obesity = 0.3
self.smoking = 0.2
self.diabetes = 0.1
self.heart = 0.15
self.htension = 0.3
self.baseline_efficacy = [0.5, 0.6, 0.7]
self.mild_efficacy = [0.6, 0.7, 0.8]
self.critical_efficacy = [0.8, 0.75, 0.85]
self.death_efficacy = [0.9, 0.95, 0.9]
self.vaccination_rate = [0.7, 0.1, 0.1, 0.1]
self.prevalence = 0.1
self.historical_prevalence = 0.1
## Generates data with the following structure:
## X: characteristics before treatment, including whether or not they
# were vaccinated
## The generated population may already be vaccinated.
def generate(self, n_individuals):
"""Generate a population.
Call this function before anything else is done.
Calling this function again generates a completely new population
sample, purging the previous one from memory.
:param int n_individuals: the number of individuals to generate
"""
self.n_individuals = n_individuals
X = np.zeros([n_individuals,
3 + self.n_genes + self.n_comorbidities
+ self.n_vaccines + self.n_symptoms])
Y = np.zeros([n_individuals, self.n_treatments, self.n_symptoms])
self.persons = []
for t in range(n_individuals):
person = Person(self)
vaccine = np.random.choice(4, p=self.vaccination_rate) - 1
vaccine_array = np.zeros(self.n_vaccines)
if (vaccine >= 0):
vaccine_array[vaccine] = 1
person.vaccinate(vaccine_array, self)
self.persons.append(person)
x_t = np.concatenate(
[person.symptoms, [person.age, person.gender, person.income],
person.genes, person.comorbidities, person.vaccines])
X[t, :] = x_t
self.X = X
return X
def vaccinate(self, person_index, vaccine_array):
""" Give a vaccine to a specific person.
Args:
person_index (int array), indices of person in the population
vaccine_array (n*|A| array), array indicating which vaccines are to
be given to each patient
Returns:
The symptoms of the selected individuals
Notes:
Currently only one vaccine dose is implemented, but in the future
multiple doses may be modelled.
"""
outcome = np.zeros([len(person_index), self.n_symptoms])
i = 0
for t in person_index:
self.persons[t].vaccinate(vaccine_array[i], self)
outcome[i] = self.persons[i].symptoms
i += 1
return outcome
def treat(self, person_index, treatment):
""" Treat a patient.
Args:
person_index (int array), indices of persons in the population to treat
treatment_array (n*|A| array), array indicating which treatments are
to be given to each patient
Returns:
The symptoms of the selected individuals
"""
N = len(person_index)
result = np.zeros([N, self.n_symptoms])
# use i to index the treated
# use t to index the original population
# print(treatment)
for i in range(N):
t = person_index[i]
r = np.array(np.matrix(treatment[i]) * self.A).flatten()
for k in range(self.n_symptoms):
if (k <= 1):
result[i, k] = self.X[t, k]
else:
if (np.random.uniform() < r[k]):
result[i, k] = 0
else:
result[i, k] = self.X[t, k]
return result
def get_features(self, person_index):
x_t = np.concatenate([self.persons[t].symptoms,
[self.persons[t].age, self.persons[t].gender,
self.persons[t].income], self.persons[t].genes,
self.persons[t].comorbidities,
self.persons[t].vaccines])
return x_t
## Treats a population
def treatment(self, X, policy):
treatments = np.zeros([X.shape[0], self.n_treatments])
result = np.zeros([X.shape[0], self.n_symptoms])
for t in range(X.shape[0]):
# print ("X:", result[t])
treatments[t][policy.get_action(X[t])] = 1
r = np.array(np.matrix(treatments[t]) * self.A).flatten()
for k in range(self.n_symptoms):
if (k <= 1):
result[t, k] = X[t, k]
else:
if (np.random.uniform() < r[k]):
result[t, k] = 0
else:
result[t, k] = X[t, k]
##print("X:", X[t,:self.n_symptoms] , "Y:", result[t])
return treatments, result
# main
if __name__ == "__main__":
import pandas
try:
import policy
except:
import project2.src.covid.policy
n_symptoms = 10
n_genes = 128
n_vaccines = 3
n_treatments = 4
pop = Population(n_genes, n_vaccines, n_treatments)
n_observations = 1000
X_observation = pop.generate(n_observations)
pandas.DataFrame(X_observation).to_csv('observation_features.csv',
header=False, index=False)
n_treated = 1000
X_treatment = pop.generate(n_treated)
X_treatment = X_treatment[X_treatment[:, 1] == 1]
print("Generating treatment outcomes")
a, y = pop.treatment(X_treatment, policy.RandomPolicy(n_treatments))
pandas.DataFrame(X_treatment).to_csv('treatment_features.csv',
header=False, index=False)
pandas.DataFrame(a).to_csv('treatment_actions.csv', header=False,
index=False)
pandas.DataFrame(y).to_csv('treatment_outcomes.csv', header=False,
index=False)
|
[
"pandas.DataFrame",
"numpy.random.uniform",
"numpy.matrix",
"policy.RandomPolicy",
"numpy.argmax",
"numpy.zeros",
"policy.get_action",
"numpy.random.gamma",
"numpy.array",
"numpy.random.choice",
"numpy.concatenate"
] |
[((440, 477), 'numpy.random.choice', 'np.random.choice', (['(2)'], {'size': 'pop.n_genes'}), '(2, size=pop.n_genes)\n', (456, 477), True, 'import numpy as np\n'), ((500, 522), 'numpy.random.choice', 'np.random.choice', (['(2)', '(1)'], {}), '(2, 1)\n', (516, 522), True, 'import numpy as np\n'), ((542, 564), 'numpy.random.gamma', 'np.random.gamma', (['(3)', '(11)'], {}), '(3, 11)\n', (557, 564), True, 'import numpy as np\n'), ((651, 676), 'numpy.random.gamma', 'np.random.gamma', (['(1)', '(10000)'], {}), '(1, 10000)\n', (666, 676), True, 'import numpy as np\n'), ((1399, 1507), 'numpy.array', 'np.array', (['[pop.historical_prevalence, pop.prevalence, 0.01, 0.05, 0.05, 0.01, 0.02, \n 0.001, 0.001, 0.001]'], {}), '([pop.historical_prevalence, pop.prevalence, 0.01, 0.05, 0.05, 0.01,\n 0.02, 0.001, 0.001, 0.001])\n', (1407, 1507), True, 'import numpy as np\n'), ((4791, 4846), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[self.n_genes, self.n_symptoms]'}), '(size=[self.n_genes, self.n_symptoms])\n', (4808, 4846), True, 'import numpy as np\n'), ((4894, 4954), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[self.n_treatments, self.n_symptoms]'}), '(size=[self.n_treatments, self.n_symptoms])\n', (4911, 4954), True, 'import numpy as np\n'), ((6039, 6146), 'numpy.zeros', 'np.zeros', (['[n_individuals, 3 + self.n_genes + self.n_comorbidities + self.n_vaccines +\n self.n_symptoms]'], {}), '([n_individuals, 3 + self.n_genes + self.n_comorbidities + self.\n n_vaccines + self.n_symptoms])\n', (6047, 6146), True, 'import numpy as np\n'), ((6198, 6259), 'numpy.zeros', 'np.zeros', (['[n_individuals, self.n_treatments, self.n_symptoms]'], {}), '([n_individuals, self.n_treatments, self.n_symptoms])\n', (6206, 6259), True, 'import numpy as np\n'), ((8072, 8102), 'numpy.zeros', 'np.zeros', (['[N, self.n_symptoms]'], {}), '([N, self.n_symptoms])\n', (8080, 8102), True, 'import numpy as np\n'), ((8739, 8942), 'numpy.concatenate', 'np.concatenate', (['[self.persons[t].symptoms, [self.persons[t].age, self.persons[t].gender,\n self.persons[t].income], self.persons[t].genes, self.persons[t].\n comorbidities, self.persons[t].vaccines]'], {}), '([self.persons[t].symptoms, [self.persons[t].age, self.\n persons[t].gender, self.persons[t].income], self.persons[t].genes, self\n .persons[t].comorbidities, self.persons[t].vaccines])\n', (8753, 8942), True, 'import numpy as np\n'), ((9158, 9199), 'numpy.zeros', 'np.zeros', (['[X.shape[0], self.n_treatments]'], {}), '([X.shape[0], self.n_treatments])\n', (9166, 9199), True, 'import numpy as np\n'), ((9217, 9256), 'numpy.zeros', 'np.zeros', (['[X.shape[0], self.n_symptoms]'], {}), '([X.shape[0], self.n_symptoms])\n', (9225, 9256), True, 'import numpy as np\n'), ((10552, 10585), 'policy.RandomPolicy', 'policy.RandomPolicy', (['n_treatments'], {}), '(n_treatments)\n', (10571, 10585), False, 'import policy\n'), ((2060, 2079), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2077, 2079), True, 'import numpy as np\n'), ((2222, 2241), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2239, 2241), True, 'import numpy as np\n'), ((2341, 2442), 'numpy.array', 'np.array', (['[pop.historical_prevalence, pop.prevalence, 0.3, 0.2, 0.05, 0.2, 0.02, 0.05,\n 0.2, 0.1]'], {}), '([pop.historical_prevalence, pop.prevalence, 0.3, 0.2, 0.05, 0.2, \n 0.02, 0.05, 0.2, 0.1])\n', (2349, 2442), True, 'import numpy as np\n'), ((2794, 2818), 'numpy.argmax', 'np.argmax', (['vaccine_array'], {}), '(vaccine_array)\n', (2803, 2818), True, 'import numpy as np\n'), ((3436, 3507), 'numpy.array', 'np.array', (['[0, 0, 0.001, 0.01, 0.02, 0.002, 0.005, 0.001, 0.002, 0.0001]'], {}), '([0, 0, 0.001, 0.01, 0.02, 0.002, 0.005, 0.001, 0.002, 0.0001])\n', (3444, 3507), True, 'import numpy as np\n'), ((4099, 4164), 'numpy.array', 'np.array', (['[0, 0, 0.06, 0.04, 0.01, 0.04, 0.004, 0.01, 0.04, 0.01]'], {}), '([0, 0, 0.06, 0.04, 0.01, 0.04, 0.004, 0.01, 0.04, 0.01])\n', (4107, 4164), True, 'import numpy as np\n'), ((6458, 6483), 'numpy.zeros', 'np.zeros', (['self.n_vaccines'], {}), '(self.n_vaccines)\n', (6466, 6483), True, 'import numpy as np\n'), ((6667, 6801), 'numpy.concatenate', 'np.concatenate', (['[person.symptoms, [person.age, person.gender, person.income], person.genes,\n person.comorbidities, person.vaccines]'], {}), '([person.symptoms, [person.age, person.gender, person.income],\n person.genes, person.comorbidities, person.vaccines])\n', (6681, 6801), True, 'import numpy as np\n'), ((10217, 10248), 'pandas.DataFrame', 'pandas.DataFrame', (['X_observation'], {}), '(X_observation)\n', (10233, 10248), False, 'import pandas\n'), ((10591, 10620), 'pandas.DataFrame', 'pandas.DataFrame', (['X_treatment'], {}), '(X_treatment)\n', (10607, 10620), False, 'import pandas\n'), ((10726, 10745), 'pandas.DataFrame', 'pandas.DataFrame', (['a'], {}), '(a)\n', (10742, 10745), False, 'import pandas\n'), ((10840, 10859), 'pandas.DataFrame', 'pandas.DataFrame', (['y'], {}), '(y)\n', (10856, 10859), False, 'import pandas\n'), ((1218, 1237), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1235, 1237), True, 'import numpy as np\n'), ((4437, 4456), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (4454, 4456), True, 'import numpy as np\n'), ((6381, 6425), 'numpy.random.choice', 'np.random.choice', (['(4)'], {'p': 'self.vaccination_rate'}), '(4, p=self.vaccination_rate)\n', (6397, 6425), True, 'import numpy as np\n'), ((9357, 9380), 'policy.get_action', 'policy.get_action', (['X[t]'], {}), '(X[t])\n', (9374, 9380), False, 'import policy\n'), ((8512, 8531), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (8529, 8531), True, 'import numpy as np\n'), ((9619, 9638), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (9636, 9638), True, 'import numpy as np\n'), ((1585, 1606), 'numpy.matrix', 'np.matrix', (['self.genes'], {}), '(self.genes)\n', (1594, 1606), True, 'import numpy as np\n'), ((4264, 4285), 'numpy.matrix', 'np.matrix', (['self.genes'], {}), '(self.genes)\n', (4273, 4285), True, 'import numpy as np\n'), ((8300, 8323), 'numpy.matrix', 'np.matrix', (['treatment[i]'], {}), '(treatment[i])\n', (8309, 8323), True, 'import numpy as np\n'), ((9411, 9435), 'numpy.matrix', 'np.matrix', (['treatments[t]'], {}), '(treatments[t])\n', (9420, 9435), True, 'import numpy as np\n')]
|
# <NAME>
# <EMAIL>
# MIT License
# As-simple-as-possible training loop for an autoencoder.
import torch
import numpy as np
import torch.optim as optim
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from model.shallow_autoencoder import ConvAutoencoder
# load model definition
model = ConvAutoencoder()
model = model.double() # tackles a type error
# define loss and optimizer
criterion = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
# Toy data:
# Using separate input and output variables to cover all cases,
# since Y could differ from X (e.g. for denoising autoencoders).
X = np.random.random((300, 1, 100))
Y = X
# prepare pytorch dataloader
dataset = TensorDataset(torch.tensor(X), torch.tensor(Y))
dataloader = DataLoader(dataset, batch_size=256, shuffle=True)
# Training loop
for epoch in range(200):
for x, y in dataloader:
optimizer.zero_grad()
# forward and backward pass
out = model(x)
loss = criterion(out, y)
loss.backward()
optimizer.step()
print(loss.item()) # loss should be decreasing
|
[
"torch.nn.MSELoss",
"torch.utils.data.DataLoader",
"numpy.random.random",
"model.shallow_autoencoder.ConvAutoencoder",
"torch.tensor"
] |
[((318, 335), 'model.shallow_autoencoder.ConvAutoencoder', 'ConvAutoencoder', ([], {}), '()\n', (333, 335), False, 'from model.shallow_autoencoder import ConvAutoencoder\n'), ((424, 436), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (434, 436), False, 'from torch import nn\n'), ((649, 680), 'numpy.random.random', 'np.random.random', (['(300, 1, 100)'], {}), '((300, 1, 100))\n', (665, 680), True, 'import numpy as np\n'), ((790, 839), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(256)', 'shuffle': '(True)'}), '(dataset, batch_size=256, shuffle=True)\n', (800, 839), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((743, 758), 'torch.tensor', 'torch.tensor', (['X'], {}), '(X)\n', (755, 758), False, 'import torch\n'), ((760, 775), 'torch.tensor', 'torch.tensor', (['Y'], {}), '(Y)\n', (772, 775), False, 'import torch\n')]
|
import cv2
from PIL import Image
import numpy as np
import constants
import os
import math
import matplotlib.pyplot as plt
import time
def hammingDistance(v1, v2):
t = 0
for i in range(len(v1)):
if v1[i] != v2[i]:
t += 1
return t
# read thresholds from thresholds.txt and then store them into thresholds list
thresholds = []
with open('./thresholds.txt', 'r') as f:
threshold = f.readline()
while threshold:
threshold = threshold.rstrip("\n")
thresholds.append(float(threshold))
threshold = f.readline()
f.close()
# read barcode and image location from barcodes.txt file
imageLocations = []
barcodes = []
with open("barcodes.txt", 'r') as f:
line = f.readline()
while line:
line = line.rstrip("\n")
line = line.split(",")
imageLocation = line.pop()
barcode = []
for bit in line:
barcode.append(int(bit))
imageLocations.append(imageLocation)
barcodes.append(barcode)
line = f.readline()
f.close()
def create_barcode(imagePath):
barcode = []
opcv = cv2.imread(imagePath, 0) # read image file as cv2 image
# ret2, th2 = cv2.threshold(opcv, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # apply threshold it just makes pixel values either black or white
img = Image.fromarray(opcv) # create image from thresholded 2d image array
barcode = []
degree = constants.MIN_DEGREE
while degree < constants.MAX_DEGREE: # loop through MIN_DEGREE to MAX_DEGREE by STEP_DEGREE
currentProjectionThreshold = int(degree / constants.STEP_DEGREE) # find the appropriate threshold index
rotated_image = img.rotate(degree) # rotate the image
image2d = np.array(rotated_image) # get 2d representation of the rotated image
for row in image2d: # loop through each row in thresholded image
row_sum = 0 # initialize row pixel counter
for pixel in row: # loop through each pixel in the row
pixel = pixel / 255 # since we have either 0 or 255 as a pixel value divide this number by 255 to get 0 or 1 which is there is pixel or there is not
row_sum+=pixel # sum of pixels across a single row
# thresholds the sum of the row to 1 or 0 based on calculated threshold
if row_sum >= thresholds[currentProjectionThreshold]:
barcode.append(1)
else:
barcode.append(0)
degree += constants.STEP_DEGREE
return barcode
class CalculateAccuracyHitRatio:
def __init__(self, barcodes, imageLocations):
self.barcodes = barcodes
self.imageLocations = imageLocations
def calculateAccuracy(self):
accuracy = lambda x : x / 100
successCount = 0
for currDigit in range(constants.NUMBER_OF_DIGITS): # loop through 0 to NUMBER_OF_DIGITS-1
directory = r'./MNIST_DS/{}'.format(currDigit) # digit folder path
for imageName in os.listdir(directory): # loop thorugh every file in the directory
print("Checking image {}".format(os.path.join(directory, imageName)))
searchBarcode = create_barcode(os.path.join(directory, imageName))
s, hd, resultImgLoc, resultImgBarcode = self.checkSuccess(searchBarcode, currDigit)
print("\tHamming Distance: {}\n\tResult Image: {}".format(hd, resultImgLoc))
# time.sleep(0.5/4)
if s:
successCount += 1
hitRatio = accuracy(successCount)
return hitRatio
def checkSuccess(self, searchBarcode, searchDigitGroup):
success = False # variable for holding the success information
minHMD = (constants.IMAGE_SIZE*constants.NUM_PROJECTIONS)+1 # Minimum Hamming Distance. It is (maxiumum hamming distance + 1) by default
minBarcode = None # barcode that corresponds to the minimum hamming distance
imageLoc = None # result image location
for i, barcode in enumerate(self.barcodes): # loop through every barcode in the barcodes list
currentHMD = hammingDistance( barcode, searchBarcode) # check each bit in both barcodes and calculate how many of these not same
if currentHMD == 0: # hamming distance 0 means the barcodes are identical which means they are the same image
continue # skip
elif currentHMD < minHMD: # if the current calculated hamming distance is less than the minimum hamming distance
minHMD = currentHMD # then set minimum hamming distance to current calculated hamming distance
minBarcode = barcode # set the current barcode as
imageLoc = self.imageLocations[i]
resultDigitGroup = imageLoc.split("_", 1)[0]
if int(resultDigitGroup) == int(searchDigitGroup):
success = True
return success, minHMD, imageLoc, minBarcode
class SearchSimilar:
def __init__(self):
self.digitSelectMenu()
def findSimilar(self, inputBarcode):
minHMD = (constants.IMAGE_SIZE*constants.NUM_PROJECTIONS)+1
print(minHMD)
minBarcode = None
imageLoc = None
for i, barcode in enumerate(barcodes):
print(imageLocations[i])
currentHMD = hammingDistance( barcode, inputBarcode)
print(currentHMD)
if currentHMD == 0:
continue
elif currentHMD < minHMD:
minHMD = currentHMD
minBarcode = barcode
imageLoc = imageLocations[i]
return minHMD, minBarcode, imageLoc
def digitSelectMenu(self):
digitFolder = int(input("enter a digit (0 - 9): "))
while digitFolder >= 0 and digitFolder <= 9:
directory = r'.\MNIST_DS\{}'.format(digitFolder)
for c, imageName in enumerate(os.listdir(directory)):
print(c , " - ", imageName)
selectImage = int(input("select image from above list: "))
selectedImagePath = os.path.join(directory, os.listdir(directory)[selectImage])
print(selectedImagePath)
selectedImageBarcode = create_barcode(selectedImagePath)
minHMD = (constants.IMAGE_SIZE*constants.NUM_PROJECTIONS)+1
print(minHMD)
minBarcode = None
imageLoc = None
for i, barcode in enumerate(barcodes):
print(imageLocations[i])
currentHMD = hammingDistance( barcode,selectedImageBarcode)
print(currentHMD)
if currentHMD == 0:
continue
elif currentHMD < minHMD:
minHMD = currentHMD
minBarcode = barcode
imageLoc = imageLocations[i]
print("Result:")
print("\tHD: {}".format(minHMD))
print("\tImage Location: {}".format(imageLoc))
print("\tBarcode: {}".format(minBarcode))
fig = plt.figure(figsize=(10, 7))
fig.suptitle("Hamming Distance: {}".format(minHMD))
rows, columns = 2, 2
selectedImage = cv2.imread(selectedImagePath)
resultImageRelativePath = imageLoc.split("_", 1)
resultImagePath = os.path.join(r".\MNIST_DS", r"{}\{}".format(resultImageRelativePath[0], resultImageRelativePath[1]))
resultImage = cv2.imread(resultImagePath)
from create_barcode_image import BarcodeImageGenerator as big
big.generate_barcode_image(selectedImageBarcode, r".\temp\searchImage.png")
big.generate_barcode_image(minBarcode, r".\temp\resultImage.png")
searchBarcodeImage = cv2.imread(r".\temp\searchImage.png")
resultBarcodeImage = cv2.imread(r".\temp\resultImage.png")
fig.add_subplot(rows, columns, 1)
plt.imshow(selectedImage)
plt.axis("off")
plt.title("Search Image")
fig.add_subplot(rows, columns, 2)
plt.imshow(resultImage)
plt.axis("off")
plt.title("Result Image")
fig.add_subplot(rows, columns, 3)
plt.imshow(searchBarcodeImage)
plt.axis("off")
plt.title("Search Barcode")
fig.add_subplot(rows, columns, 4)
plt.imshow(resultBarcodeImage)
plt.axis("off")
plt.title("Result Barcode")
plt.show()
digitFolder = int(input("enter a digit (0 - 9): "))
def showAllResults(self):
fig = plt.figure(figsize=(16,100), dpi=100)
rows, cols = constants.NUMBER_OF_DIGITS*constants.NUMBER_IMAGES, 2
for currDigit in range(constants.NUMBER_OF_DIGITS): # loop through 0 to NUMBER_OF_DIGITS-1
directory = r'./MNIST_DS/{}'.format(currDigit) # digit folder path
for i, imageName in zip((i for i in range(1, 20, 2)), os.listdir(directory)): # loop thorugh every file in the directory
selectedImagePath = os.path.join(directory, imageName)
print("Checking image {}".format(os.path.join(directory, imageName)))
searchBarcode = create_barcode(os.path.join(directory, imageName))
hmd, resultBarcode, resultImgLoc = self.findSimilar(searchBarcode)
selectedImage = cv2.imread(selectedImagePath)
resultImageRelativePath = resultImgLoc.split("_", 1)
resultImagePath = os.path.join(r".\MNIST_DS", r"{}\{}".format(resultImageRelativePath[0], resultImageRelativePath[1]))
resultImage = cv2.imread(resultImagePath)
sii = currDigit*20+i
fig.add_subplot(rows, cols, sii)
plt.imshow(selectedImage)
plt.axis("off")
plt.title(selectedImagePath, fontsize=9, y=0.90)
fig.add_subplot(rows, cols, sii+1)
plt.imshow(resultImage)
plt.axis("off")
plt.title(resultImagePath, fontsize=9, y=0.90)
return fig
import matplotlib
# Make sure that we are using QT5
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
from PyQt5 import QtWidgets
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
class ScrollableWindow(QtWidgets.QMainWindow):
def __init__(self, fig):
self.qapp = QtWidgets.QApplication([])
QtWidgets.QMainWindow.__init__(self)
self.widget = QtWidgets.QWidget()
self.setCentralWidget(self.widget)
self.widget.setLayout(QtWidgets.QVBoxLayout())
self.widget.layout().setContentsMargins(0,0,0,0)
self.widget.layout().setSpacing(0)
self.fig = fig
self.canvas = FigureCanvas(self.fig)
self.canvas.draw()
self.scroll = QtWidgets.QScrollArea(self.widget)
self.scroll.setWidget(self.canvas)
self.nav = NavigationToolbar(self.canvas, self.widget)
self.widget.layout().addWidget(self.nav)
self.widget.layout().addWidget(self.scroll)
self.show()
exit(self.qapp.exec_())
if __name__ == "__main__":
print("Search Menu")
print("Calculate Accuracy Hit Ratio")
print("Show All Results at Once")
input("Yes I have read the above notes. Press Enter to continue...")
print("\n\n\nEnter a number between 0 and 9 to search image")
print("Enter a number smaller than 0 or greater than 9 to exit the search menu")
print("Once you exit Search Menu you will get Calculate Accuracy Hit Ratio ")
input("Yes I have read the above notes. Press Enter to continue...")
si = SearchSimilar() # search menu
print("\n\n\nCalculating accuracy hit ratio...")
cahr = CalculateAccuracyHitRatio(barcodes, imageLocations) # accuracy calculator
print("Accuracy is {}".format(cahr.calculateAccuracy())) # calculate and display the accuracy
input("Yes I have read the above notes. Press Enter to DISPLAY ALL THE RESULTS at Once...")
print("\n\n\nSearching all the images in the dataset and finding results...")
print("Once you get the window maximize the window and scrolldown to see the results")
input("Yes I have read the above notes. Press Enter to continue...")
fig = si.showAllResults()
a = ScrollableWindow(fig)
|
[
"matplotlib.pyplot.title",
"PyQt5.QtWidgets.QMainWindow.__init__",
"PyQt5.QtWidgets.QVBoxLayout",
"matplotlib.pyplot.figure",
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"PyQt5.QtWidgets.QApplication",
"os.path.join",
"PyQt5.QtWidgets.QWidget",
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"create_barcode_image.BarcodeImageGenerator.generate_barcode_image",
"matplotlib.use",
"os.listdir",
"matplotlib.pyplot.axis",
"PyQt5.QtWidgets.QScrollArea",
"cv2.imread",
"numpy.array",
"PIL.Image.fromarray"
] |
[((10253, 10277), 'matplotlib.use', 'matplotlib.use', (['"""Qt5Agg"""'], {}), "('Qt5Agg')\n", (10267, 10277), False, 'import matplotlib\n'), ((1118, 1142), 'cv2.imread', 'cv2.imread', (['imagePath', '(0)'], {}), '(imagePath, 0)\n', (1128, 1142), False, 'import cv2\n'), ((1343, 1364), 'PIL.Image.fromarray', 'Image.fromarray', (['opcv'], {}), '(opcv)\n', (1358, 1364), False, 'from PIL import Image\n'), ((1754, 1777), 'numpy.array', 'np.array', (['rotated_image'], {}), '(rotated_image)\n', (1762, 1777), True, 'import numpy as np\n'), ((8683, 8721), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 100)', 'dpi': '(100)'}), '(figsize=(16, 100), dpi=100)\n', (8693, 8721), True, 'import matplotlib.pyplot as plt\n'), ((10606, 10632), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['[]'], {}), '([])\n', (10628, 10632), False, 'from PyQt5 import QtWidgets\n'), ((10642, 10678), 'PyQt5.QtWidgets.QMainWindow.__init__', 'QtWidgets.QMainWindow.__init__', (['self'], {}), '(self)\n', (10672, 10678), False, 'from PyQt5 import QtWidgets\n'), ((10701, 10720), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (10718, 10720), False, 'from PyQt5 import QtWidgets\n'), ((10965, 10987), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg', 'FigureCanvas', (['self.fig'], {}), '(self.fig)\n', (10977, 10987), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n'), ((11037, 11071), 'PyQt5.QtWidgets.QScrollArea', 'QtWidgets.QScrollArea', (['self.widget'], {}), '(self.widget)\n', (11058, 11071), False, 'from PyQt5 import QtWidgets\n'), ((11135, 11178), 'matplotlib.backends.backend_qt5agg.NavigationToolbar2QT', 'NavigationToolbar', (['self.canvas', 'self.widget'], {}), '(self.canvas, self.widget)\n', (11152, 11178), True, 'from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\n'), ((3046, 3067), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (3056, 3067), False, 'import os\n'), ((7107, 7134), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (7117, 7134), True, 'import matplotlib.pyplot as plt\n'), ((7261, 7290), 'cv2.imread', 'cv2.imread', (['selectedImagePath'], {}), '(selectedImagePath)\n', (7271, 7290), False, 'import cv2\n'), ((7509, 7536), 'cv2.imread', 'cv2.imread', (['resultImagePath'], {}), '(resultImagePath)\n', (7519, 7536), False, 'import cv2\n'), ((7625, 7701), 'create_barcode_image.BarcodeImageGenerator.generate_barcode_image', 'big.generate_barcode_image', (['selectedImageBarcode', '""".\\\\temp\\\\searchImage.png"""'], {}), "(selectedImageBarcode, '.\\\\temp\\\\searchImage.png')\n", (7651, 7701), True, 'from create_barcode_image import BarcodeImageGenerator as big\n'), ((7713, 7779), 'create_barcode_image.BarcodeImageGenerator.generate_barcode_image', 'big.generate_barcode_image', (['minBarcode', '""".\\\\temp\\\\resultImage.png"""'], {}), "(minBarcode, '.\\\\temp\\\\resultImage.png')\n", (7739, 7779), True, 'from create_barcode_image import BarcodeImageGenerator as big\n'), ((7813, 7851), 'cv2.imread', 'cv2.imread', (['""".\\\\temp\\\\searchImage.png"""'], {}), "('.\\\\temp\\\\searchImage.png')\n", (7823, 7851), False, 'import cv2\n'), ((7884, 7922), 'cv2.imread', 'cv2.imread', (['""".\\\\temp\\\\resultImage.png"""'], {}), "('.\\\\temp\\\\resultImage.png')\n", (7894, 7922), False, 'import cv2\n'), ((7982, 8007), 'matplotlib.pyplot.imshow', 'plt.imshow', (['selectedImage'], {}), '(selectedImage)\n', (7992, 8007), True, 'import matplotlib.pyplot as plt\n'), ((8020, 8035), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (8028, 8035), True, 'import matplotlib.pyplot as plt\n'), ((8048, 8073), 'matplotlib.pyplot.title', 'plt.title', (['"""Search Image"""'], {}), "('Search Image')\n", (8057, 8073), True, 'import matplotlib.pyplot as plt\n'), ((8134, 8157), 'matplotlib.pyplot.imshow', 'plt.imshow', (['resultImage'], {}), '(resultImage)\n', (8144, 8157), True, 'import matplotlib.pyplot as plt\n'), ((8170, 8185), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (8178, 8185), True, 'import matplotlib.pyplot as plt\n'), ((8198, 8223), 'matplotlib.pyplot.title', 'plt.title', (['"""Result Image"""'], {}), "('Result Image')\n", (8207, 8223), True, 'import matplotlib.pyplot as plt\n'), ((8284, 8314), 'matplotlib.pyplot.imshow', 'plt.imshow', (['searchBarcodeImage'], {}), '(searchBarcodeImage)\n', (8294, 8314), True, 'import matplotlib.pyplot as plt\n'), ((8327, 8342), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (8335, 8342), True, 'import matplotlib.pyplot as plt\n'), ((8355, 8382), 'matplotlib.pyplot.title', 'plt.title', (['"""Search Barcode"""'], {}), "('Search Barcode')\n", (8364, 8382), True, 'import matplotlib.pyplot as plt\n'), ((8443, 8473), 'matplotlib.pyplot.imshow', 'plt.imshow', (['resultBarcodeImage'], {}), '(resultBarcodeImage)\n', (8453, 8473), True, 'import matplotlib.pyplot as plt\n'), ((8486, 8501), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (8494, 8501), True, 'import matplotlib.pyplot as plt\n'), ((8514, 8541), 'matplotlib.pyplot.title', 'plt.title', (['"""Result Barcode"""'], {}), "('Result Barcode')\n", (8523, 8541), True, 'import matplotlib.pyplot as plt\n'), ((8555, 8565), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8563, 8565), True, 'import matplotlib.pyplot as plt\n'), ((10794, 10817), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (10815, 10817), False, 'from PyQt5 import QtWidgets\n'), ((5949, 5970), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (5959, 5970), False, 'import os\n'), ((9042, 9063), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (9052, 9063), False, 'import os\n'), ((9145, 9179), 'os.path.join', 'os.path.join', (['directory', 'imageName'], {}), '(directory, imageName)\n', (9157, 9179), False, 'import os\n'), ((9464, 9493), 'cv2.imread', 'cv2.imread', (['selectedImagePath'], {}), '(selectedImagePath)\n', (9474, 9493), False, 'import cv2\n'), ((9728, 9755), 'cv2.imread', 'cv2.imread', (['resultImagePath'], {}), '(resultImagePath)\n', (9738, 9755), False, 'import cv2\n'), ((9859, 9884), 'matplotlib.pyplot.imshow', 'plt.imshow', (['selectedImage'], {}), '(selectedImage)\n', (9869, 9884), True, 'import matplotlib.pyplot as plt\n'), ((9901, 9916), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (9909, 9916), True, 'import matplotlib.pyplot as plt\n'), ((9933, 9980), 'matplotlib.pyplot.title', 'plt.title', (['selectedImagePath'], {'fontsize': '(9)', 'y': '(0.9)'}), '(selectedImagePath, fontsize=9, y=0.9)\n', (9942, 9980), True, 'import matplotlib.pyplot as plt\n'), ((10049, 10072), 'matplotlib.pyplot.imshow', 'plt.imshow', (['resultImage'], {}), '(resultImage)\n', (10059, 10072), True, 'import matplotlib.pyplot as plt\n'), ((10089, 10104), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10097, 10104), True, 'import matplotlib.pyplot as plt\n'), ((10121, 10166), 'matplotlib.pyplot.title', 'plt.title', (['resultImagePath'], {'fontsize': '(9)', 'y': '(0.9)'}), '(resultImagePath, fontsize=9, y=0.9)\n', (10130, 10166), True, 'import matplotlib.pyplot as plt\n'), ((3245, 3279), 'os.path.join', 'os.path.join', (['directory', 'imageName'], {}), '(directory, imageName)\n', (3257, 3279), False, 'import os\n'), ((6146, 6167), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (6156, 6167), False, 'import os\n'), ((9313, 9347), 'os.path.join', 'os.path.join', (['directory', 'imageName'], {}), '(directory, imageName)\n', (9325, 9347), False, 'import os\n'), ((3161, 3195), 'os.path.join', 'os.path.join', (['directory', 'imageName'], {}), '(directory, imageName)\n', (3173, 3195), False, 'import os\n'), ((9229, 9263), 'os.path.join', 'os.path.join', (['directory', 'imageName'], {}), '(directory, imageName)\n', (9241, 9263), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 21 15:39:43 2019
@author: Manu
"""
import mne
from mne import io
import sys
sys.path.append('C:/_MANU/_U821/Python_Dev/')
import scipy
from util import tools,asr,raw_asrcalibration
import numpy as np
import matplotlib.pyplot as plt
from mne.viz import plot_evoked_topo
fname = 'C:/_MANU/_U821/_wip/ContextOdd/raw/ANDNI_0001.vhdr'
raw = io.read_raw_brainvision(fname, preload = False)
picks_eeg = mne.pick_types(raw.info, meg=False, eeg=True, eog=False,stim=False, exclude='bads')
ListChannels = np.array(raw.info['ch_names'])
montage = mne.channels.read_montage(kind='standard_1020',ch_names=ListChannels[picks_eeg])
raw = io.read_raw_brainvision(fname, montage=montage, preload = True)
picks_eeg = mne.pick_types(raw.info, meg=False, eeg=True, eog=False,stim=False, exclude='bads')
raw =raw.pick_types( meg=False, eeg=True, eog=False,stim=True, exclude='bads')
# ASR Calibration
raworig_Data= raw._data
l_freq = 2
h_freq = 20
Wn = [l_freq/(raw.info['sfreq']/2.), h_freq/(raw.info['sfreq']/2.) ]
b, a = scipy.signal.iirfilter(N=2, Wn=Wn, btype = 'bandpass', analog = False, ftype = 'butter', output = 'ba')
raw._data[picks_eeg,:]=scipy.signal.lfilter(b, a, raworig_Data[picks_eeg,:], axis = 1, zi = None)
rawCalibAsr=raw.copy()
tmin = 30
tmax = 60 #s
rawCalibAsr = rawCalibAsr.crop(tmin=tmin,tmax=tmax)
ChanName4VEOG = ['Fp1','Fp2'] # 2 VEOG
cutoff = 5 # Makoto preprocessing says best between 10 and 20 https://sccn.ucsd.edu/wiki/Makoto%27s_preprocessing_pipeline#Alternatively.2C_cleaning_continuous_data_using_ASR_.2803.2F26.2F2019_updated.29
Yule_Walker_filtering = True
state = raw_asrcalibration.raw_asrcalibration(rawCalibAsr,ChanName4VEOG, cutoff,Yule_Walker_filtering)
# ASR process on epoch
event_id = {'Std': 1, 'Dev': 2}
events_orig,_ = mne.events_from_annotations(raw)
ixdev = np.array(np.where(events_orig[:,2]==2))
ixstd= ixdev-1
events = events_orig[np.sort(np.array(np.hstack((ixstd , ixdev)))),:]
events = np.squeeze(events, axis=0)
tmin, tmax = -0.2, 0.5
raw4detect = raw.copy()
raw4detect._data,iirstate = asr.YW_filter(raw._data,raw.info['sfreq'],None) ## HERE
epochs4Detect = mne.Epochs(raw4detect, events=events, event_id=event_id, tmin=tmin,tmax=tmax, proj=True,baseline=None, reject=None, picks=picks_eeg)
epochs_filt = mne.Epochs(raw, events=events, event_id=event_id, tmin=tmin,tmax=tmax, proj=None,baseline=None, reject=None, picks=picks_eeg)
Data4detect = epochs4Detect.get_data()
Data2Correct = epochs_filt.get_data()
DataClean = np.zeros((Data2Correct.shape))
for i_epoch in range(Data4detect.shape[0]):
EpochYR = Data4detect[i_epoch,:,:]
Epoch2Corr = Data2Correct[i_epoch,:,:]
DataClean[i_epoch,:,:] = asr.asr_process_on_epoch(EpochYR,Epoch2Corr,state)
epochs_clean = mne.EpochsArray(DataClean,info=epochs_filt.info,events=events,event_id=event_id)
srate = raw.info['sfreq']
evoked_std = epochs_filt['Std'].average(picks=picks_eeg)
evoked_dev = epochs_filt['Dev'].average(picks=picks_eeg)
evoked_clean_std = epochs_clean['Std'].average(picks=picks_eeg)
evoked_clean_dev = epochs_clean['Dev'].average(picks=picks_eeg)
evoked_clean_std.first=-200
evoked_clean_std.last= tmax*srate
evoked_clean_dev.first=-200
evoked_clean_dev.last= tmax*srate
evoked_clean_std.times= np.around(np.linspace(-0.2, tmax, num=DataClean.shape[2]),decimals=3)
evoked_clean_dev.times= np.around(np.linspace(-0.2, tmax, num=DataClean.shape[2]),decimals=3)
evokeds = [evoked_std, evoked_dev, evoked_clean_std, evoked_clean_dev]
colors = 'blue', 'red','steelblue','magenta'
plot_evoked_topo(evokeds, color=colors, title='Std Dev', background_color='w')
plt.show()
evoked_clean_MMN=evoked_clean_std.copy()
evoked_clean_MMN.data = (evoked_clean_dev.data - evoked_clean_std.data)
evoked_MMN =evoked_clean_MMN.copy()
evoked_MMN.data = (evoked_dev.data-evoked_std.data)
evokeds_MMN= [evoked_clean_MMN,evoked_MMN]
colors = 'red', 'black'
plot_evoked_topo(evokeds_MMN, color=colors, title='MMN', background_color='w')
plt.show()
kwargs = dict(times=np.arange(-0.1, 0.40, 0.025), vmin=-1.5, vmax=1.5, layout='auto',
head_pos=dict(center=(0., 0.), scale=(1., 1.)))
evoked_MMN.plot_topomap(**kwargs)
evoked_clean_MMN.plot_topomap(**kwargs)
|
[
"mne.pick_types",
"util.asr.YW_filter",
"mne.io.read_raw_brainvision",
"util.raw_asrcalibration.raw_asrcalibration",
"numpy.arange",
"sys.path.append",
"mne.events_from_annotations",
"scipy.signal.lfilter",
"mne.channels.read_montage",
"scipy.signal.iirfilter",
"numpy.linspace",
"matplotlib.pyplot.show",
"mne.viz.plot_evoked_topo",
"numpy.hstack",
"numpy.squeeze",
"util.asr.asr_process_on_epoch",
"numpy.zeros",
"mne.Epochs",
"numpy.where",
"numpy.array",
"mne.EpochsArray"
] |
[((126, 171), 'sys.path.append', 'sys.path.append', (['"""C:/_MANU/_U821/Python_Dev/"""'], {}), "('C:/_MANU/_U821/Python_Dev/')\n", (141, 171), False, 'import sys\n'), ((391, 436), 'mne.io.read_raw_brainvision', 'io.read_raw_brainvision', (['fname'], {'preload': '(False)'}), '(fname, preload=False)\n', (414, 436), False, 'from mne import io\n'), ((451, 539), 'mne.pick_types', 'mne.pick_types', (['raw.info'], {'meg': '(False)', 'eeg': '(True)', 'eog': '(False)', 'stim': '(False)', 'exclude': '"""bads"""'}), "(raw.info, meg=False, eeg=True, eog=False, stim=False,\n exclude='bads')\n", (465, 539), False, 'import mne\n'), ((551, 581), 'numpy.array', 'np.array', (["raw.info['ch_names']"], {}), "(raw.info['ch_names'])\n", (559, 581), True, 'import numpy as np\n'), ((592, 678), 'mne.channels.read_montage', 'mne.channels.read_montage', ([], {'kind': '"""standard_1020"""', 'ch_names': 'ListChannels[picks_eeg]'}), "(kind='standard_1020', ch_names=ListChannels[\n picks_eeg])\n", (617, 678), False, 'import mne\n'), ((679, 740), 'mne.io.read_raw_brainvision', 'io.read_raw_brainvision', (['fname'], {'montage': 'montage', 'preload': '(True)'}), '(fname, montage=montage, preload=True)\n', (702, 740), False, 'from mne import io\n'), ((756, 844), 'mne.pick_types', 'mne.pick_types', (['raw.info'], {'meg': '(False)', 'eeg': '(True)', 'eog': '(False)', 'stim': '(False)', 'exclude': '"""bads"""'}), "(raw.info, meg=False, eeg=True, eog=False, stim=False,\n exclude='bads')\n", (770, 844), False, 'import mne\n'), ((1069, 1169), 'scipy.signal.iirfilter', 'scipy.signal.iirfilter', ([], {'N': '(2)', 'Wn': 'Wn', 'btype': '"""bandpass"""', 'analog': '(False)', 'ftype': '"""butter"""', 'output': '"""ba"""'}), "(N=2, Wn=Wn, btype='bandpass', analog=False, ftype=\n 'butter', output='ba')\n", (1091, 1169), False, 'import scipy\n'), ((1196, 1267), 'scipy.signal.lfilter', 'scipy.signal.lfilter', (['b', 'a', 'raworig_Data[picks_eeg, :]'], {'axis': '(1)', 'zi': 'None'}), '(b, a, raworig_Data[picks_eeg, :], axis=1, zi=None)\n', (1216, 1267), False, 'import scipy\n'), ((1656, 1756), 'util.raw_asrcalibration.raw_asrcalibration', 'raw_asrcalibration.raw_asrcalibration', (['rawCalibAsr', 'ChanName4VEOG', 'cutoff', 'Yule_Walker_filtering'], {}), '(rawCalibAsr, ChanName4VEOG, cutoff,\n Yule_Walker_filtering)\n', (1693, 1756), False, 'from util import tools, asr, raw_asrcalibration\n'), ((1836, 1868), 'mne.events_from_annotations', 'mne.events_from_annotations', (['raw'], {}), '(raw)\n', (1863, 1868), False, 'import mne\n'), ((2012, 2038), 'numpy.squeeze', 'np.squeeze', (['events'], {'axis': '(0)'}), '(events, axis=0)\n', (2022, 2038), True, 'import numpy as np\n'), ((2114, 2163), 'util.asr.YW_filter', 'asr.YW_filter', (['raw._data', "raw.info['sfreq']", 'None'], {}), "(raw._data, raw.info['sfreq'], None)\n", (2127, 2163), False, 'from util import tools, asr, raw_asrcalibration\n'), ((2186, 2325), 'mne.Epochs', 'mne.Epochs', (['raw4detect'], {'events': 'events', 'event_id': 'event_id', 'tmin': 'tmin', 'tmax': 'tmax', 'proj': '(True)', 'baseline': 'None', 'reject': 'None', 'picks': 'picks_eeg'}), '(raw4detect, events=events, event_id=event_id, tmin=tmin, tmax=\n tmax, proj=True, baseline=None, reject=None, picks=picks_eeg)\n', (2196, 2325), False, 'import mne\n'), ((2333, 2464), 'mne.Epochs', 'mne.Epochs', (['raw'], {'events': 'events', 'event_id': 'event_id', 'tmin': 'tmin', 'tmax': 'tmax', 'proj': 'None', 'baseline': 'None', 'reject': 'None', 'picks': 'picks_eeg'}), '(raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,\n proj=None, baseline=None, reject=None, picks=picks_eeg)\n', (2343, 2464), False, 'import mne\n'), ((2550, 2578), 'numpy.zeros', 'np.zeros', (['Data2Correct.shape'], {}), '(Data2Correct.shape)\n', (2558, 2578), True, 'import numpy as np\n'), ((2822, 2910), 'mne.EpochsArray', 'mne.EpochsArray', (['DataClean'], {'info': 'epochs_filt.info', 'events': 'events', 'event_id': 'event_id'}), '(DataClean, info=epochs_filt.info, events=events, event_id=\n event_id)\n', (2837, 2910), False, 'import mne\n'), ((3611, 3689), 'mne.viz.plot_evoked_topo', 'plot_evoked_topo', (['evokeds'], {'color': 'colors', 'title': '"""Std Dev"""', 'background_color': '"""w"""'}), "(evokeds, color=colors, title='Std Dev', background_color='w')\n", (3627, 3689), False, 'from mne.viz import plot_evoked_topo\n'), ((3690, 3700), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3698, 3700), True, 'import matplotlib.pyplot as plt\n'), ((3972, 4050), 'mne.viz.plot_evoked_topo', 'plot_evoked_topo', (['evokeds_MMN'], {'color': 'colors', 'title': '"""MMN"""', 'background_color': '"""w"""'}), "(evokeds_MMN, color=colors, title='MMN', background_color='w')\n", (3988, 4050), False, 'from mne.viz import plot_evoked_topo\n'), ((4051, 4061), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4059, 4061), True, 'import matplotlib.pyplot as plt\n'), ((1886, 1918), 'numpy.where', 'np.where', (['(events_orig[:, 2] == 2)'], {}), '(events_orig[:, 2] == 2)\n', (1894, 1918), True, 'import numpy as np\n'), ((2740, 2792), 'util.asr.asr_process_on_epoch', 'asr.asr_process_on_epoch', (['EpochYR', 'Epoch2Corr', 'state'], {}), '(EpochYR, Epoch2Corr, state)\n', (2764, 2792), False, 'from util import tools, asr, raw_asrcalibration\n'), ((3337, 3384), 'numpy.linspace', 'np.linspace', (['(-0.2)', 'tmax'], {'num': 'DataClean.shape[2]'}), '(-0.2, tmax, num=DataClean.shape[2])\n', (3348, 3384), True, 'import numpy as np\n'), ((3431, 3478), 'numpy.linspace', 'np.linspace', (['(-0.2)', 'tmax'], {'num': 'DataClean.shape[2]'}), '(-0.2, tmax, num=DataClean.shape[2])\n', (3442, 3478), True, 'import numpy as np\n'), ((4086, 4113), 'numpy.arange', 'np.arange', (['(-0.1)', '(0.4)', '(0.025)'], {}), '(-0.1, 0.4, 0.025)\n', (4095, 4113), True, 'import numpy as np\n'), ((1971, 1996), 'numpy.hstack', 'np.hstack', (['(ixstd, ixdev)'], {}), '((ixstd, ixdev))\n', (1980, 1996), True, 'import numpy as np\n')]
|
import numpy as np
import string
import re
import nltk
nltk.download('stopwords')
stop_words = nltk.corpus.stopwords.words('english')
class word_inform():
def __init__(self):
self.inform = {}
def wordinput(self):
WI = input('문장을 입력해주세요 : ') # 문장 받아오기. WI = word input.
WI = WI.replace('\n',' ') # 문단에 줄 내림이 있다면, 스페이스바로 바꿔주기
#be = {'am', 'is', 'are', 'be' , 'was', 'were'} # be 동사 저장.
WI = WI.lower()
#WI = WI.replace("i'm",'i am') # be동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("he's",'he is') # be동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("she's",'she is') # be동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("that's",'that is') # be동사를 찾아내기 위해, 변환을 해준다
#WI = WI.replace("what's",'what is') # be동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("it's",'it is') # be동사를 찾아내기 위해, 변환을 해준다. (is 줄임말 풀어주기.)
#WI = WI.replace("you're",'you are') # be동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("they're",'they are') # be동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("we're",'we are') # be동사를 찾아내기 위해, 변환을 해준다.
#Auxiliary_verb = {'will','would','can','could','shall','should','may','might','must'}
#WI = WI.replace("i'll",'i will') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("you'll",'you will') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("they'll",'they will') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("we'll",'we will') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("he'll",'he will') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("she'll",'she will') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("it'll",'it will') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("that'll",'that will') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("i'd",'i would') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("you'd",'you would') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("they'd",'they would') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("we'd",'we would') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("he'd",'he would') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("she'd",'she would') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = re.sub("[.]{2,}",'',WI) # 마침표 두개이상 없애기
WI = re.sub('[\\w.]+@[\\w.]+',' ',WI)
WI = re.sub("[?!'.]{1,}",'.',WI)
WI = re.sub("[^\w\s'.]+",'',WI) # 특수문자 제거하기 따옴표는 제거하지 않음... >> stop words에 포함된 단어 you'll 같은 거 때문.
WI = re.sub("[.]{1,}",'.',WI)
sentence = WI.strip(string.punctuation).split('.') # 문단에 마침표가 있다면, 문장을 분리해주기. 마지막에 있는 구두점 떼어주기.
sentence_words = [s.split() for s in sentence] # 각각의 문장속에 있는 단어 분리 해주기.
self.inform['sentence_words'] = sentence_words
def word_voc(self,voc):
before_voc_length = len(voc)
sentence_words = self.inform['sentence_words'] # 입력받은 문장 그대로.
for length in range(len(sentence_words)):
for vocab in sentence_words[length]:
if vocab.isdigit() == False: # 숫자가 계속 학습하는 문장에 들어가서 학습 효율이 떨어지는 듯 하다. ( 따라서 숫자는 제외한다.)
if vocab not in stop_words:
if vocab not in voc:
voc.append(vocab)
self.inform['voc'] = voc
after_voc_length = len(voc)
self.inform['voc_length_diff'] = (after_voc_length - before_voc_length)
self.inform['voc_length'] = after_voc_length
word_vector = [[] for i in sentence_words]
word_sentence = [[] for i in sentence_words]
voc_vectors = []
for word in voc:
voc_vector = np.zeros_like(voc, dtype = int)# 단어장 크기의 새로운 벡터를 만든다.
index_of_input_word = voc.index(word)
voc_vector[index_of_input_word] += 1 # 한단어가 단어장의 몇번 index에 있는지를 확인.
voc_vectors.append(voc_vector)
self.inform['voc_vectors'] = voc_vectors
# word_vector >> 입력받은 문장들을 단어별로 구분해 놓은 리스트.
for length in range(len(sentence_words)):
for word in sentence_words[length]:
if word.isdigit() == False: # 숫자가 계속 학습하는 문장에 들어가서 학습 효율이 떨어지는 듯 하다. ( 따라서 숫자는 제외한다.)
if word not in stop_words:
voc_vector = np.zeros_like(voc, dtype = int)# 단어장 크기의 새로운 벡터를 만든다.
index_of_input_word = voc.index(word)
voc_vector[index_of_input_word] += 1 # 한단어가 단어장의 몇번 index에 있는지를 확인.
word_vector[length].append(voc_vector)
word_sentence[length].append(word)
self.inform['sentence_words'] = word_sentence
self.inform['word_vector'] = word_vector
|
[
"nltk.download",
"numpy.zeros_like",
"re.sub",
"nltk.corpus.stopwords.words"
] |
[((55, 81), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (68, 81), False, 'import nltk\n'), ((95, 133), 'nltk.corpus.stopwords.words', 'nltk.corpus.stopwords.words', (['"""english"""'], {}), "('english')\n", (122, 133), False, 'import nltk\n'), ((2335, 2369), 're.sub', 're.sub', (['"""[\\\\w.]+@[\\\\w.]+"""', '""" """', 'WI'], {}), "('[\\\\w.]+@[\\\\w.]+', ' ', WI)\n", (2341, 2369), False, 'import re\n'), ((2381, 2410), 're.sub', 're.sub', (['"""[?!\'.]{1,}"""', '"""."""', 'WI'], {}), '("[?!\'.]{1,}", \'.\', WI)\n', (2387, 2410), False, 'import re\n'), ((2422, 2452), 're.sub', 're.sub', (['"""[^\\\\w\\\\s\'.]+"""', '""""""', 'WI'], {}), '("[^\\\\w\\\\s\'.]+", \'\', WI)\n', (2428, 2452), False, 'import re\n'), ((2530, 2556), 're.sub', 're.sub', (['"""[.]{1,}"""', '"""."""', 'WI'], {}), "('[.]{1,}', '.', WI)\n", (2536, 2556), False, 'import re\n'), ((3745, 3774), 'numpy.zeros_like', 'np.zeros_like', (['voc'], {'dtype': 'int'}), '(voc, dtype=int)\n', (3758, 3774), True, 'import numpy as np\n'), ((4394, 4423), 'numpy.zeros_like', 'np.zeros_like', (['voc'], {'dtype': 'int'}), '(voc, dtype=int)\n', (4407, 4423), True, 'import numpy as np\n')]
|
"""
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
import sys
import numpy as np
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import ParameterGrid, GridSearchCV
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
#print(__doc__)
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=Warning) #DeprecationWarning)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
class ItemSelector(BaseEstimator, TransformerMixin):
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class CustomFeatures(BaseEstimator):
def __init__(self):
pass
def get_feature_names(self):
return np.array(['sent_len']) #, 'lang_prob'])
def fit(self, documents, y=None):
return self
def transform(self, x_dataset):
X_num_token = list()
#X_count_nouns = list()
for sentence in x_dataset:
# takes raw text and calculates type token ratio
X_num_token.append(len(sentence))
# takes pos tag text and counts number of noun pos tags (NN, NNS etc.)
# X_count_nouns.append(count_nouns(sentence))
X = np.array([X_num_token]).T #, X_count_nouns]).T
if not hasattr(self, 'scalar'):
self.scalar = StandardScaler().fit(X)
return self.scalar.transform(X)
class FeatureExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
features = np.recarray(shape=(len(posts),),
dtype=[('words', object), ('meta', object)]) #('length', object), ('condscore', object), ('score', object), ('normscore', object), ('langpred', bool)])
for i, text in enumerate(posts):
elems = text.split('\t')
words, cs, s, ns, lp = elems[:5]
#print(elems)
features['words'][i] = words
features['meta'][i] = {'length': len(words.split()),
'condscore': float(cs), 'score': float(s),
'normscore': float(ns), 'langpred': bool(lp)}
if len(elems) > 5:
ecs, es, ens, ep = elems[5:]
features['meta'][i].update({'event_condscore': float(ecs),
'event_score': float(es), 'event_normscore': float(ens), 'event_pred': bool(ep)})
return features
# #############################################################################
# Load data test
def load_data(filename, suffix):
contents, labels = [], []
#data = StoryData()
with open(filename+'.true.'+suffix) as tinf, open(filename+'.false.'+suffix) as finf:
for line in tinf:
elems = line.strip()#.split('\t')
contents.append(elems)
labels.append(1)
for line in finf:
elems = line.strip()#.split('\t')
contents.append(elems)
labels.append(0)
print("data size:", len(contents))
return [contents, labels]
def event_orig_mapping(orig_idx_file, event_idx_file):
orig_idx_array = []
event_idx_dict = {}
with open(orig_idx_file) as oinf, open(event_idx_file) as einf:
oinf.readline()
einf.readline()
for line in oinf:
elems = line.strip().split()
orig_idx_array.append(elems[0])
counter = 0
for line in einf:
elems = line.strip().split()
event_idx_dict[elems[0]] = counter
counter += 1
origin_to_event = {}
for i, oidx in enumerate(orig_idx_array):
if oidx in event_idx_dict:
origin_to_event[i] = event_idx_dict[oidx]
print ('map dictionary size:', len(origin_to_event))
return origin_to_event
def add_e2e_scores(original_data_array, event_data_array, origin_to_event):
assert len(event_data_array) == 2 * len(origin_to_event), (len(event_data_array), len(origin_to_event))
assert len(original_data_array) >= len(event_data_array)
half_len = len(original_data_array) / 2
for i, elems in enumerate(original_data_array):
if i in origin_to_event:
original_data_array[i] = elems + '\t' + event_data_array[origin_to_event[i]]
if i - half_len in origin_to_event:
#print(i, origin_to_event[i-half_len], len(origin_to_event))
original_data_array[i] = elems + '\t' + event_data_array[origin_to_event[i-half_len] + len(origin_to_event)]
return original_data_array
def pairwise_eval(probs):
mid = int(len(probs) / 2)
print('middle point: %d' % mid)
pos = probs[:mid]
neg = probs[mid:]
assert len(pos) == len(neg)
count = 0.0
for p, n in zip(pos, neg):
if p[1] > n[1]:
count += 1.0
# print('True')
# else:
# print('False')
acc = count/mid
print('Test result: %.3f' % acc)
return acc
train_data = load_data(sys.argv[1], sys.argv[3])
test_data = load_data(sys.argv[2], sys.argv[3])
#train_event = load_data(sys.argv[4], sys.argv[6])
#test_event = load_data(sys.argv[5], sys.argv[6])
#train_e2o = event_orig_mapping(sys.argv[7], sys.argv[8])
#test_e2o = event_orig_mapping(sys.argv[9], sys.argv[10])
# add event-to-event info
#train_data[0] = add_e2e_scores(train_data[0], train_event[0], train_e2o)
#test_data[0] = add_e2e_scores(test_data[0], test_event[0], test_e2o)
print('Finished data loading!!')
for elem in train_data[0][:10]:
print (elem)
# #############################################################################
# Define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('featextract', FeatureExtractor()),
('union', FeatureUnion(
transformer_list=[
('meta', Pipeline([
('selector', ItemSelector(key='meta')),
('vect', DictVectorizer()),
('scale', StandardScaler(with_mean=False)),
])),
('word', Pipeline([
('selector', ItemSelector(key='words')),
('vect', CountVectorizer(ngram_range=(1,5), max_df=0.9)),
('tfidf', TfidfTransformer()),
])),
('char', Pipeline([
('selector', ItemSelector(key='words')),
('vect', CountVectorizer(ngram_range=(1,5), analyzer='char', max_df=0.8)),
('tfidf', TfidfTransformer()),
])),
],
transformer_weights={
'meta': 0.3,
'word': 1.0,
'char': 1.0,
},
)),
('clf', SGDClassifier(loss='log', alpha=0.0005, tol=0.005, random_state=0)),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'union__transformer_weights': ({'meta': 0.6, 'word': 1.0, 'char': 1.0},
# {'meta': 1.0, 'word': 1.0, 'char': 0.75},
# {'meta': 1.0, 'word': 1.0, 'char': 0.5},
# {'meta': 1.0, 'word': 0.75, 'char': 1.0},
# {'meta': 1.0, 'word': 0.75, 'char': 0.75},
# {'meta': 1.0, 'word': 0.75, 'char': 0.5},
# {'meta': 1.0, 'word': 0.5, 'char': 1.0},
# {'meta': 1.0, 'word': 0.5, 'char': 0.75},
# {'meta': 1.0, 'word': 0.5, 'char': 0.5},
{'meta': 0.7, 'word': 1.0, 'char': 1.0},
{'meta': 0.5, 'word': 1.0, 'char': 1.0},
{'meta': 0.4, 'word': 1.0, 'char': 1.0},
{'meta': 0.3, 'word': 1.0, 'char': 1.0},
# {'meta': 0.75, 'word': 1.0, 'char': 0.75},
# {'meta': 0.75, 'word': 1.0, 'char': 0.5},
# {'meta': 0.75, 'word': 0.75, 'char': 1.0},
# {'meta': 0.75, 'word': 0.75, 'char': 0.75},
# {'meta': 0.75, 'word': 0.75, 'char': 0.5},
# {'meta': 0.75, 'word': 0.5, 'char': 1.0},
# {'meta': 0.75, 'word': 0.5, 'char': 0.75},
# {'meta': 0.75, 'word': 0.5, 'char': 0.5},
# {'meta': 0.5, 'word': 1.0, 'char': 1.0},
# {'meta': 0.5, 'word': 1.0, 'char': 0.75},
# {'meta': 0.5, 'word': 1.0, 'char': 0.5},
# {'meta': 0.5, 'word': 0.75, 'char': 1.0},
# {'meta': 0.5, 'word': 0.75, 'char': 0.75},
# {'meta': 0.5, 'word': 0.75, 'char': 0.5},
# {'meta': 0.5, 'word': 0.5, 'char': 1.0},
# {'meta': 0.5, 'word': 0.5, 'char': 0.75},
# {'meta': 0.5, 'word': 0.5, 'char': 0.5},
),
'union__word__vect__max_df': (0.7, 0.8, 0.9, 1.0), #0.5,
'union__char__vect__max_df': (0.7, 0.8, 0.9, 1.0), #0.5,
#'vect__max_features': (None, 5000, 10000, 50000),
#'union__word__vect__ngram_range': ((1, 4), (1, 5)), # trigram or 5-grams (1, 4),
#'union__char__vect__ngram_range': ((1, 4), (1, 5)), # trigram or 5-grams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.001, 0.0005, 0.0001),
#'clf__penalty': ('l2', 'l1'),
'clf__tol': (5e-3, 1e-3, 5e-4),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
# pipeline.fit(train_data[0], train_data[1])
# probs = pipeline.predict_proba(test_data[0])
# acc = pairwise_eval(probs)
# exit(0)
#grid_params = list(ParameterGrid(parameters))
grid_search = GridSearchCV(pipeline, parameters, cv=5, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
#pipeline.fit(train_data[0], train_data[1]) #.contents, train_data.labels)
'''for params in grid_params:
print('Current parameters:', params)
pipeline.set_params(**params)
pipeline.fit(train_data[0], train_data[1])
probs = pipeline.predict_proba(test_data[0])
acc = pairwise_eval(probs)
exit(0)
'''
grid_search.fit(train_data[0], train_data[1])
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
print('predicting on the test data...')
score = grid_search.score(test_data[0], test_data[1])
print('Test score: %.3f' % score)
probs = grid_search.predict_proba(test_data[0])
pairwise_eval(probs)
|
[
"sklearn.model_selection.GridSearchCV",
"sklearn.feature_extraction.text.CountVectorizer",
"sklearn.preprocessing.StandardScaler",
"logging.basicConfig",
"warnings.filterwarnings",
"sklearn.linear_model.SGDClassifier",
"time.time",
"warnings.catch_warnings",
"pprint.pprint",
"numpy.array",
"sklearn.feature_extraction.DictVectorizer",
"sklearn.feature_extraction.text.TfidfTransformer"
] |
[((2196, 2288), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s %(levelname)s %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s %(levelname)s %(message)s')\n", (2215, 2288), False, 'import logging\n'), ((2058, 2083), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2081, 2083), False, 'import warnings\n'), ((2089, 2140), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'Warning'}), "('ignore', category=Warning)\n", (2112, 2140), False, 'import warnings\n'), ((11551, 11613), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['pipeline', 'parameters'], {'cv': '(5)', 'n_jobs': '(-1)', 'verbose': '(1)'}), '(pipeline, parameters, cv=5, n_jobs=-1, verbose=1)\n', (11563, 11613), False, 'from sklearn.model_selection import ParameterGrid, GridSearchCV\n'), ((11744, 11762), 'pprint.pprint', 'pprint', (['parameters'], {}), '(parameters)\n', (11750, 11762), False, 'from pprint import pprint\n'), ((11772, 11778), 'time.time', 'time', ([], {}), '()\n', (11776, 11778), False, 'from time import time\n'), ((2660, 2682), 'numpy.array', 'np.array', (["['sent_len']"], {}), "(['sent_len'])\n", (2668, 2682), True, 'import numpy as np\n'), ((3155, 3178), 'numpy.array', 'np.array', (['[X_num_token]'], {}), '([X_num_token])\n', (3163, 3178), True, 'import numpy as np\n'), ((8663, 8729), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': '"""log"""', 'alpha': '(0.0005)', 'tol': '(0.005)', 'random_state': '(0)'}), "(loss='log', alpha=0.0005, tol=0.005, random_state=0)\n", (8676, 8729), False, 'from sklearn.linear_model import SGDClassifier\n'), ((12214, 12220), 'time.time', 'time', ([], {}), '()\n', (12218, 12220), False, 'from time import time\n'), ((3270, 3286), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3284, 3286), False, 'from sklearn.preprocessing import StandardScaler\n'), ((8009, 8025), 'sklearn.feature_extraction.DictVectorizer', 'DictVectorizer', ([], {}), '()\n', (8023, 8025), False, 'from sklearn.feature_extraction import DictVectorizer\n'), ((8050, 8081), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_mean': '(False)'}), '(with_mean=False)\n', (8064, 8081), False, 'from sklearn.preprocessing import StandardScaler\n'), ((8199, 8246), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'ngram_range': '(1, 5)', 'max_df': '(0.9)'}), '(ngram_range=(1, 5), max_df=0.9)\n', (8214, 8246), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((8270, 8288), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {}), '()\n', (8286, 8288), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n'), ((8406, 8470), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'ngram_range': '(1, 5)', 'analyzer': '"""char"""', 'max_df': '(0.8)'}), "(ngram_range=(1, 5), analyzer='char', max_df=0.8)\n", (8421, 8470), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((8494, 8512), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {}), '()\n', (8510, 8512), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n')]
|
"""!
All functions providing plotting functionalities.
"""
import matplotlib.pylab as plt
import matplotlib.dates as mdates
import matplotlib.image as image
import pandas as pd
import re
import argparse
import datetime as dt
import numpy as np
from pandas.plotting import register_matplotlib_converters
from datetime import datetime
register_matplotlib_converters()
plt.rcParams.update({'font.size': 22})
environment_sensor_pattern = re.compile(r"([0-9-]+)\s([0-9:.]+):\stemperature:\s([0-9.]+),\sgas:\s([0-9]+),\shumidity:\s([0-9.]+),\spressure:\s([0-9.]+),\saltitude:\s([0-9.]+)", re.MULTILINE)
soil_moisture_pattern = re.compile(r"([0-9-]+)\s([0-9.:]+):\s\[([0-9]+),\s([0-9.]+),\s([0-9.]+)\]", re.MULTILINE)
def plot_soil_moisture(dict, past24):
"""!
Plots soil moisture data in simple line chart
@param dict: Dicitonary containing timestamps and associated readings.
"""
lists = sorted(dict.items())
x, y = zip(*lists)
fig, ax = plt.subplots()
ax.plot(x, y, 'k', linewidth=2)
fig.autofmt_xdate()
hours6 = mdates.HourLocator(interval=6)
hours3 = mdates.HourLocator(interval=3)
# im = image.imread('./icons/Grow_Space_Logo.png')
# fig.figimage(im, 300, 0, zorder=3, alpha=0.2)
ax.xaxis.set_minor_locator(hours3)
ax.tick_params(which='major', length=7, width=2, color='black')
ax.tick_params(which='minor', length=4, width=2, color='black')
ax.xaxis.set_major_locator(hours6)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d - %H'))
ax.grid()
plt.xlabel("Day - Hour")
plt.ylabel("Moisture Percentage (%)")
plt.title("Soil Moisture % vs Time")
DPI = fig.get_dpi()
fig.set_size_inches(2400.0/float(DPI),1220.0/float(DPI))
if past24:
datemin = np.datetime64(x[-1], 'h') - np.timedelta64(24, 'h')
datemax = np.datetime64(x[-1], 'h')
ax.set_xlim(datemin, datemax)
plt.xlabel("Hour")
plt.title("Soil Moisture % Past 24 Hrs")
ax.xaxis.set_major_locator(hours3)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
plt.savefig('Moisture_vs_Time_24H.png', dpi=500)
plt.savefig('Moisture_vs_Time.png', dpi=500)
# plt.show()
def plot_temperature(dict, past24):
"""!
Plots temperature data in simple line chart
@param dict: Dicitonary containing timestamps and associated readings.
"""
lists = sorted(dict.items())
x, y = zip(*lists)
fig, ax = plt.subplots()
ax.plot(x, y, 'k', linewidth=2)
fig.autofmt_xdate()
hours6 = mdates.HourLocator(interval=6)
hours3 = mdates.HourLocator(interval=3)
# im = image.imread('./icons/Grow_Space_Logo.png')
# fig.figimage(im, 650, 0, zorder=3, alpha=0.2)
ax.xaxis.set_major_locator(hours6)
ax.xaxis.set_minor_locator(hours3)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d - %H'))
ax.tick_params(which='major', length=7, width=2, color='black')
ax.tick_params(which='minor', length=4, width=2, color='black')
ax.grid()
plt.title("Temperature Over Time")
plt.xlabel("Time (Month-Day Hour)")
plt.ylabel("Temperature (°C)")
DPI = fig.get_dpi()
fig.set_size_inches(2400.0/float(DPI),1220.0/float(DPI))
if past24:
datemin = np.datetime64(x[-1], 'h') - np.timedelta64(24, 'h')
datemax = np.datetime64(x[-1], 'h')
ax.set_xlim(datemin, datemax)
plt.xlabel("Hour")
plt.title('Temperature Past 24 Hrs')
ax.xaxis.set_major_locator(hours3)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
plt.savefig('Temperature_vs_Time_24H.png', dpi=500)
plt.savefig('Temperature_vs_Time.png', dpi=500)
# plt.show()
def boxplot_environment(df):
"""!
Creates a boxplot of all the relevant environment sensor data.
What is a boxplot?
Text from https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.boxplot.html:
The box extends from the Q1 to Q3 quartile values of the data, with a line at the median (Q2).
The whiskers extend from the edges of box to show the range of the data.
The position of the whiskers is set by default to 1.5 * IQR (IQR = Q3 - Q1) from the edges of the box.
Outlier points are those past the end of the whiskers.
@param df: dataframe object from which we generate a boxplot.
"""
df['VOC'] = df['VOC'].div(1000)
# with plt.style.context("seaborn"):
fig, ax = plt.subplots(1, 3)
fig.suptitle('Environment Sensor Data')
df.boxplot('Temperature', ax=ax[0])
df.boxplot('VOC', ax=ax[1], fontsize=12)
df.boxplot('Humidity', ax=ax[2])
ax[0].set_ylabel("Temperature (°C)")
ax[1].set_ylabel("VOC (kΩ)")
ax[2].set_ylabel("Humidity (%)")
plt.subplots_adjust(top=0.95)
DPI = fig.get_dpi()
fig.set_size_inches(2400.0/float(DPI),1220.0/float(DPI))
plt.savefig('Environment_Boxplot.png', dpi=500)
# plt.show()
def extract_data_from_log(data, pattern):
"""!
Function for extracting data out of a log file using regex matching.
Returns all regex match objects.
@param data: Raw data from the log file.
@param pattern: Regex pattern to use for matching.
"""
matches = list()
for line in data:
matches.append(re.match(pattern, line))
return matches
def generate_plots(root="./logs/", soil_sensor_log="soil_moisture_sensor_1.txt", environment_sensor_log="environment_sensor.txt"):
# Plot soil moisture data
with open(root+soil_sensor_log, "r") as myfile:
data = myfile.readlines()
matches = extract_data_from_log(data, soil_moisture_pattern)
data_dict = dict()
for match in matches:
# current_val = float(match.group(4)) # Raw voltage reading
current_val = float(match.group(5)) # Percentage reading
index_time = match.group(1) + " " + match.group(2)
index_dt = dt.datetime.strptime(index_time, "%Y-%m-%d %H:%M:%S.%f")
data_dict[index_dt] = current_val
plot_soil_moisture(data_dict, True)
plot_soil_moisture(data_dict, False)
# Plot temperature data
with open(root+environment_sensor_log, "r") as myfile:
data = myfile.readlines()
matches = extract_data_from_log(data, environment_sensor_pattern)
data_dict = dict()
temperature_dict = dict()
data_dict['Temperature'] = {}
data_dict['VOC'] = {}
data_dict['Humidity'] = {}
for match in matches:
index_time = match.group(1) + " " + match.group(2)
index_dt = dt.datetime.strptime(index_time, "%Y-%m-%d %H:%M:%S.%f")
data_dict['Temperature'][index_dt] = float(match.group(3))
data_dict['VOC'][index_dt] = float(match.group(4))
data_dict['Humidity'][index_dt] = float(match.group(5))
plot_temperature(data_dict['Temperature'], True)
plot_temperature(data_dict['Temperature'], False)
# Plot environment sensor data
df = pd.DataFrame.from_dict(data_dict, orient='columns')
df.reset_index(inplace=True)
boxplot_environment(df)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-r', '--root', type=str, default="", help='Root filepath of the log data')
parser.add_argument('-s', '--soil', type=str, default="soil_moisture_sensor_1.txt", help='Name of soil moisture sensor log file')
parser.add_argument('-e', '--environment', type=str, default="environment_sensor.txt", help='Name of the envrionment sensor log file')
args = parser.parse_args()
if args.root:
root_folder = "./logs/"+args.root+"/"
else:
root_folder = "./logs/"
generate_plots(root_folder, args.soil, args.environment)
|
[
"matplotlib.pylab.savefig",
"pandas.DataFrame.from_dict",
"argparse.ArgumentParser",
"numpy.datetime64",
"matplotlib.pylab.title",
"pandas.plotting.register_matplotlib_converters",
"matplotlib.pylab.ylabel",
"re.match",
"matplotlib.pylab.rcParams.update",
"matplotlib.dates.HourLocator",
"matplotlib.pylab.subplots_adjust",
"matplotlib.dates.DateFormatter",
"matplotlib.pylab.xlabel",
"datetime.datetime.strptime",
"numpy.timedelta64",
"matplotlib.pylab.subplots",
"re.compile"
] |
[((336, 368), 'pandas.plotting.register_matplotlib_converters', 'register_matplotlib_converters', ([], {}), '()\n', (366, 368), False, 'from pandas.plotting import register_matplotlib_converters\n'), ((369, 407), 'matplotlib.pylab.rcParams.update', 'plt.rcParams.update', (["{'font.size': 22}"], {}), "({'font.size': 22})\n", (388, 407), True, 'import matplotlib.pylab as plt\n'), ((437, 619), 're.compile', 're.compile', (['"""([0-9-]+)\\\\s([0-9:.]+):\\\\stemperature:\\\\s([0-9.]+),\\\\sgas:\\\\s([0-9]+),\\\\shumidity:\\\\s([0-9.]+),\\\\spressure:\\\\s([0-9.]+),\\\\saltitude:\\\\s([0-9.]+)"""', 're.MULTILINE'], {}), "(\n '([0-9-]+)\\\\s([0-9:.]+):\\\\stemperature:\\\\s([0-9.]+),\\\\sgas:\\\\s([0-9]+),\\\\shumidity:\\\\s([0-9.]+),\\\\spressure:\\\\s([0-9.]+),\\\\saltitude:\\\\s([0-9.]+)'\n , re.MULTILINE)\n", (447, 619), False, 'import re\n'), ((624, 723), 're.compile', 're.compile', (['"""([0-9-]+)\\\\s([0-9.:]+):\\\\s\\\\[([0-9]+),\\\\s([0-9.]+),\\\\s([0-9.]+)\\\\]"""', 're.MULTILINE'], {}), "('([0-9-]+)\\\\s([0-9.:]+):\\\\s\\\\[([0-9]+),\\\\s([0-9.]+),\\\\s([0-9.]+)\\\\]'\n , re.MULTILINE)\n", (634, 723), False, 'import re\n'), ((971, 985), 'matplotlib.pylab.subplots', 'plt.subplots', ([], {}), '()\n', (983, 985), True, 'import matplotlib.pylab as plt\n'), ((1059, 1089), 'matplotlib.dates.HourLocator', 'mdates.HourLocator', ([], {'interval': '(6)'}), '(interval=6)\n', (1077, 1089), True, 'import matplotlib.dates as mdates\n'), ((1103, 1133), 'matplotlib.dates.HourLocator', 'mdates.HourLocator', ([], {'interval': '(3)'}), '(interval=3)\n', (1121, 1133), True, 'import matplotlib.dates as mdates\n'), ((1539, 1563), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Day - Hour"""'], {}), "('Day - Hour')\n", (1549, 1563), True, 'import matplotlib.pylab as plt\n'), ((1568, 1605), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""Moisture Percentage (%)"""'], {}), "('Moisture Percentage (%)')\n", (1578, 1605), True, 'import matplotlib.pylab as plt\n'), ((1610, 1646), 'matplotlib.pylab.title', 'plt.title', (['"""Soil Moisture % vs Time"""'], {}), "('Soil Moisture % vs Time')\n", (1619, 1646), True, 'import matplotlib.pylab as plt\n'), ((2147, 2191), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""Moisture_vs_Time.png"""'], {'dpi': '(500)'}), "('Moisture_vs_Time.png', dpi=500)\n", (2158, 2191), True, 'import matplotlib.pylab as plt\n'), ((2458, 2472), 'matplotlib.pylab.subplots', 'plt.subplots', ([], {}), '()\n', (2470, 2472), True, 'import matplotlib.pylab as plt\n'), ((2546, 2576), 'matplotlib.dates.HourLocator', 'mdates.HourLocator', ([], {'interval': '(6)'}), '(interval=6)\n', (2564, 2576), True, 'import matplotlib.dates as mdates\n'), ((2590, 2620), 'matplotlib.dates.HourLocator', 'mdates.HourLocator', ([], {'interval': '(3)'}), '(interval=3)\n', (2608, 2620), True, 'import matplotlib.dates as mdates\n'), ((3026, 3060), 'matplotlib.pylab.title', 'plt.title', (['"""Temperature Over Time"""'], {}), "('Temperature Over Time')\n", (3035, 3060), True, 'import matplotlib.pylab as plt\n'), ((3065, 3100), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Time (Month-Day Hour)"""'], {}), "('Time (Month-Day Hour)')\n", (3075, 3100), True, 'import matplotlib.pylab as plt\n'), ((3105, 3135), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""Temperature (°C)"""'], {}), "('Temperature (°C)')\n", (3115, 3135), True, 'import matplotlib.pylab as plt\n'), ((3635, 3682), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""Temperature_vs_Time.png"""'], {'dpi': '(500)'}), "('Temperature_vs_Time.png', dpi=500)\n", (3646, 3682), True, 'import matplotlib.pylab as plt\n'), ((4450, 4468), 'matplotlib.pylab.subplots', 'plt.subplots', (['(1)', '(3)'], {}), '(1, 3)\n', (4462, 4468), True, 'import matplotlib.pylab as plt\n'), ((4750, 4779), 'matplotlib.pylab.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.95)'}), '(top=0.95)\n', (4769, 4779), True, 'import matplotlib.pylab as plt\n'), ((4869, 4916), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""Environment_Boxplot.png"""'], {'dpi': '(500)'}), "('Environment_Boxplot.png', dpi=500)\n", (4880, 4916), True, 'import matplotlib.pylab as plt\n'), ((6907, 6958), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data_dict'], {'orient': '"""columns"""'}), "(data_dict, orient='columns')\n", (6929, 6958), True, 'import pandas as pd\n'), ((7063, 7107), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (7086, 7107), False, 'import argparse\n'), ((1488, 1519), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%d - %H"""'], {}), "('%d - %H')\n", (1508, 1519), True, 'import matplotlib.dates as mdates\n'), ((1835, 1860), 'numpy.datetime64', 'np.datetime64', (['x[-1]', '"""h"""'], {}), "(x[-1], 'h')\n", (1848, 1860), True, 'import numpy as np\n'), ((1907, 1925), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Hour"""'], {}), "('Hour')\n", (1917, 1925), True, 'import matplotlib.pylab as plt\n'), ((1934, 1974), 'matplotlib.pylab.title', 'plt.title', (['"""Soil Moisture % Past 24 Hrs"""'], {}), "('Soil Moisture % Past 24 Hrs')\n", (1943, 1974), True, 'import matplotlib.pylab as plt\n'), ((2094, 2142), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""Moisture_vs_Time_24H.png"""'], {'dpi': '(500)'}), "('Moisture_vs_Time_24H.png', dpi=500)\n", (2105, 2142), True, 'import matplotlib.pylab as plt\n'), ((2839, 2870), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%d - %H"""'], {}), "('%d - %H')\n", (2859, 2870), True, 'import matplotlib.dates as mdates\n'), ((3324, 3349), 'numpy.datetime64', 'np.datetime64', (['x[-1]', '"""h"""'], {}), "(x[-1], 'h')\n", (3337, 3349), True, 'import numpy as np\n'), ((3396, 3414), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Hour"""'], {}), "('Hour')\n", (3406, 3414), True, 'import matplotlib.pylab as plt\n'), ((3423, 3459), 'matplotlib.pylab.title', 'plt.title', (['"""Temperature Past 24 Hrs"""'], {}), "('Temperature Past 24 Hrs')\n", (3432, 3459), True, 'import matplotlib.pylab as plt\n'), ((3579, 3630), 'matplotlib.pylab.savefig', 'plt.savefig', (['"""Temperature_vs_Time_24H.png"""'], {'dpi': '(500)'}), "('Temperature_vs_Time_24H.png', dpi=500)\n", (3590, 3630), True, 'import matplotlib.pylab as plt\n'), ((5888, 5944), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['index_time', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(index_time, '%Y-%m-%d %H:%M:%S.%f')\n", (5908, 5944), True, 'import datetime as dt\n'), ((6508, 6564), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['index_time', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(index_time, '%Y-%m-%d %H:%M:%S.%f')\n", (6528, 6564), True, 'import datetime as dt\n'), ((1765, 1790), 'numpy.datetime64', 'np.datetime64', (['x[-1]', '"""h"""'], {}), "(x[-1], 'h')\n", (1778, 1790), True, 'import numpy as np\n'), ((1793, 1816), 'numpy.timedelta64', 'np.timedelta64', (['(24)', '"""h"""'], {}), "(24, 'h')\n", (1807, 1816), True, 'import numpy as np\n'), ((2055, 2084), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (2075, 2084), True, 'import matplotlib.dates as mdates\n'), ((3254, 3279), 'numpy.datetime64', 'np.datetime64', (['x[-1]', '"""h"""'], {}), "(x[-1], 'h')\n", (3267, 3279), True, 'import numpy as np\n'), ((3282, 3305), 'numpy.timedelta64', 'np.timedelta64', (['(24)', '"""h"""'], {}), "(24, 'h')\n", (3296, 3305), True, 'import numpy as np\n'), ((3540, 3569), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (3560, 3569), True, 'import matplotlib.dates as mdates\n'), ((5271, 5294), 're.match', 're.match', (['pattern', 'line'], {}), '(pattern, line)\n', (5279, 5294), False, 'import re\n')]
|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import print_function
from caffe2.python import core, workspace
from ngraph.frontends.caffe2.c2_importer.importer import C2Importer
from ngraph.testing import ExecutorFactory
import numpy as np
import random as random
def run_all_close_compare_initiated_with_random_gauss(c2_op_name,
shape=None,
data=None,
expected=None):
workspace.ResetWorkspace()
if not shape:
shape = [2, 7]
if not data:
data = [random.gauss(mu=0, sigma=10) for i in range(np.prod(shape))]
net = core.Net("net")
net.GivenTensorFill([], "X", shape=shape, values=data, name="X")
getattr(net, c2_op_name)(["X"], ["Y"], name="Y")
# Execute via Caffe2
workspace.RunNetOnce(net)
# Import caffe2 network into ngraph
importer = C2Importer()
importer.parse_net_def(net.Proto(), verbose=False)
# Get handle
f_ng = importer.get_op_handle("Y")
# Execute
with ExecutorFactory() as ex:
f_result = ex.executor(f_ng)()
c2_y = workspace.FetchBlob("Y")
# compare Caffe2 and ngraph results
assert(np.allclose(f_result, c2_y, atol=1e-4, rtol=0, equal_nan=False))
# compare expected results and ngraph results
if expected:
assert(np.allclose(f_result, expected, atol=1e-3, rtol=0, equal_nan=False))
def test_relu():
run_all_close_compare_initiated_with_random_gauss('Relu',
shape=[10, 10])
def test_softmax():
shape = [2, 7]
data = [
1., 2., 3., 4., 1., 2., 3.,
1., 2., 3., 4., 1., 2., 3.
]
expected = [
[0.024, 0.064, 0.175, 0.475, 0.024, 0.064, 0.175],
[0.024, 0.064, 0.175, 0.475, 0.024, 0.064, 0.175],
]
run_all_close_compare_initiated_with_random_gauss('Softmax',
shape=shape,
data=data,
expected=expected)
def test_negative():
run_all_close_compare_initiated_with_random_gauss('Negative')
def test_sigmoid():
run_all_close_compare_initiated_with_random_gauss('Sigmoid')
def test_tanh():
run_all_close_compare_initiated_with_random_gauss('Tanh')
def test_exp():
workspace.ResetWorkspace()
shape = [2, 7]
data = [
1., 2., 3., 4., 1., 2., 3.,
1., 2., 3., 4., 1., 2., 3.
]
expected = [
[2.71828, 7.3890, 20.08553, 54.59815, 2.71828, 7.3890, 20.08553],
[2.71828, 7.3890, 20.08553, 54.59815, 2.71828, 7.3890, 20.08553],
]
run_all_close_compare_initiated_with_random_gauss('Exp',
shape=shape,
data=data,
expected=expected)
def test_NCHW2NHWC():
workspace.ResetWorkspace()
# NCHW
shape = [2, 3, 4, 5]
data1 = [float(i) for i in range(np.prod(shape))]
net = core.Net("net")
X = net.GivenTensorFill([], ["X"], shape=shape, values=data1, name="X")
X.NCHW2NHWC([], ["Y"], name="Y")
# Execute via Caffe2
workspace.RunNetOnce(net)
# Import caffe2 network into ngraph
importer = C2Importer()
importer.parse_net_def(net.Proto(), verbose=False)
# Get handle
f_ng = importer.get_op_handle("Y")
# Execute
with ExecutorFactory() as ex:
f_result = ex.executor(f_ng)()
# compare Caffe2 and ngraph results
assert(np.array_equal(f_result, workspace.FetchBlob("Y")))
def test_NHWC2NCHW():
workspace.ResetWorkspace()
# NHWC
shape = [2, 3, 4, 5]
data1 = [float(i) for i in range(np.prod(shape))]
net = core.Net("net")
X = net.GivenTensorFill([], ["X"], shape=shape, values=data1, name="X")
X.NCHW2NHWC([], ["Y"], name="Y")
# Execute via Caffe2
workspace.RunNetOnce(net)
# Import caffe2 network into ngraph
importer = C2Importer()
importer.parse_net_def(net.Proto(), verbose=False)
# Get handle
f_ng = importer.get_op_handle("Y")
# Execute
with ExecutorFactory() as ex:
f_result = ex.executor(f_ng)()
# compare Caffe2 and ngraph results
assert(np.array_equal(f_result, workspace.FetchBlob("Y")))
|
[
"caffe2.python.workspace.FetchBlob",
"caffe2.python.core.Net",
"numpy.allclose",
"caffe2.python.workspace.RunNetOnce",
"numpy.prod",
"ngraph.frontends.caffe2.c2_importer.importer.C2Importer",
"caffe2.python.workspace.ResetWorkspace",
"random.gauss",
"ngraph.testing.ExecutorFactory"
] |
[((1256, 1282), 'caffe2.python.workspace.ResetWorkspace', 'workspace.ResetWorkspace', ([], {}), '()\n', (1280, 1282), False, 'from caffe2.python import core, workspace\n'), ((1429, 1444), 'caffe2.python.core.Net', 'core.Net', (['"""net"""'], {}), "('net')\n", (1437, 1444), False, 'from caffe2.python import core, workspace\n'), ((1597, 1622), 'caffe2.python.workspace.RunNetOnce', 'workspace.RunNetOnce', (['net'], {}), '(net)\n', (1617, 1622), False, 'from caffe2.python import core, workspace\n'), ((1679, 1691), 'ngraph.frontends.caffe2.c2_importer.importer.C2Importer', 'C2Importer', ([], {}), '()\n', (1689, 1691), False, 'from ngraph.frontends.caffe2.c2_importer.importer import C2Importer\n'), ((3194, 3220), 'caffe2.python.workspace.ResetWorkspace', 'workspace.ResetWorkspace', ([], {}), '()\n', (3218, 3220), False, 'from caffe2.python import core, workspace\n'), ((3796, 3822), 'caffe2.python.workspace.ResetWorkspace', 'workspace.ResetWorkspace', ([], {}), '()\n', (3820, 3822), False, 'from caffe2.python import core, workspace\n'), ((3925, 3940), 'caffe2.python.core.Net', 'core.Net', (['"""net"""'], {}), "('net')\n", (3933, 3940), False, 'from caffe2.python import core, workspace\n'), ((4084, 4109), 'caffe2.python.workspace.RunNetOnce', 'workspace.RunNetOnce', (['net'], {}), '(net)\n', (4104, 4109), False, 'from caffe2.python import core, workspace\n'), ((4166, 4178), 'ngraph.frontends.caffe2.c2_importer.importer.C2Importer', 'C2Importer', ([], {}), '()\n', (4176, 4178), False, 'from ngraph.frontends.caffe2.c2_importer.importer import C2Importer\n'), ((4519, 4545), 'caffe2.python.workspace.ResetWorkspace', 'workspace.ResetWorkspace', ([], {}), '()\n', (4543, 4545), False, 'from caffe2.python import core, workspace\n'), ((4648, 4663), 'caffe2.python.core.Net', 'core.Net', (['"""net"""'], {}), "('net')\n", (4656, 4663), False, 'from caffe2.python import core, workspace\n'), ((4807, 4832), 'caffe2.python.workspace.RunNetOnce', 'workspace.RunNetOnce', (['net'], {}), '(net)\n', (4827, 4832), False, 'from caffe2.python import core, workspace\n'), ((4889, 4901), 'ngraph.frontends.caffe2.c2_importer.importer.C2Importer', 'C2Importer', ([], {}), '()\n', (4899, 4901), False, 'from ngraph.frontends.caffe2.c2_importer.importer import C2Importer\n'), ((1828, 1845), 'ngraph.testing.ExecutorFactory', 'ExecutorFactory', ([], {}), '()\n', (1843, 1845), False, 'from ngraph.testing import ExecutorFactory\n'), ((1908, 1932), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['"""Y"""'], {}), "('Y')\n", (1927, 1932), False, 'from caffe2.python import core, workspace\n'), ((1993, 2058), 'numpy.allclose', 'np.allclose', (['f_result', 'c2_y'], {'atol': '(0.0001)', 'rtol': '(0)', 'equal_nan': '(False)'}), '(f_result, c2_y, atol=0.0001, rtol=0, equal_nan=False)\n', (2004, 2058), True, 'import numpy as np\n'), ((4315, 4332), 'ngraph.testing.ExecutorFactory', 'ExecutorFactory', ([], {}), '()\n', (4330, 4332), False, 'from ngraph.testing import ExecutorFactory\n'), ((5038, 5055), 'ngraph.testing.ExecutorFactory', 'ExecutorFactory', ([], {}), '()\n', (5053, 5055), False, 'from ngraph.testing import ExecutorFactory\n'), ((1357, 1385), 'random.gauss', 'random.gauss', ([], {'mu': '(0)', 'sigma': '(10)'}), '(mu=0, sigma=10)\n', (1369, 1385), True, 'import random as random\n'), ((2153, 2221), 'numpy.allclose', 'np.allclose', (['f_result', 'expected'], {'atol': '(0.001)', 'rtol': '(0)', 'equal_nan': '(False)'}), '(f_result, expected, atol=0.001, rtol=0, equal_nan=False)\n', (2164, 2221), True, 'import numpy as np\n'), ((4464, 4488), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['"""Y"""'], {}), "('Y')\n", (4483, 4488), False, 'from caffe2.python import core, workspace\n'), ((5187, 5211), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['"""Y"""'], {}), "('Y')\n", (5206, 5211), False, 'from caffe2.python import core, workspace\n'), ((3897, 3911), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (3904, 3911), True, 'import numpy as np\n'), ((4620, 4634), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (4627, 4634), True, 'import numpy as np\n'), ((1401, 1415), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (1408, 1415), True, 'import numpy as np\n')]
|
# This is a module with functions that can be used to calculate the Froude
# number in a simple 2D system
# <NAME>, 2015
import numpy as np
import datetime
from salishsea_tools.nowcast import analyze
def find_mixed_depth_indices(n2, n2_thres=5e-6):
"""Finds the index of the mixed layer depth for each x-position.
The mixed layer depth is chosen based on the lowest near-surface vertical
grid cell where n2 >= n2_thres
A resaonable value for n2_thres is 5e-6.
If n2_thres = 'None' then the index of the maximum n2 is returned.
n2 is the masked array of buoyancy frequencies with dimensions (depth, x)
returns a list of indices of mixed layer depth cell for each x-position
"""
if n2_thres == 'None':
dinds = np.argmax(n2, axis=0)
else:
dinds = []
for ii in np.arange(n2.shape[-1]):
inds = np.where(n2[:, ii] >= n2_thres)
# exlclude first vertical index less <=1 because the
# buoyancy frequency is hard to define there
if inds[0].size:
inds = filter(lambda x: x > 1, inds[0])
if inds:
dinds.append(min(inds))
else:
dinds.append(0) # if no mixed layer depth found, set to 0
else:
dinds.append(0) # if no mixed layer depth found, set it to 0
return dinds
def average_mixed_layer_depth(mixed_depths, xmin, xmax):
"""Averages the mixed layer depths over indices xmin and xmax
mixed_depths is a 1d array of mixed layer depths
returns the mean mixed layer depth in the defined region
"""
mean_md = np.mean(mixed_depths[xmin:xmax+1])
return mean_md
def mld_time_series(n2, deps, times, time_origin,
xmin=300, xmax=700, n2_thres=5e-6):
"""Calculates the mean mixed layer depth in a region defined by
xmin and xmax over time
n2 is the buoyancy frequency array with dimensions (time, depth, x)
deps is the model depth array
times is the model time_counter array
time_origin is the model's time_origin as a datetime
returns a list of mixed layer depths mlds and dates
"""
mlds = []
dates = []
for t in np.arange(n2.shape[0]):
dinds = find_mixed_depth_indices(n2[t, ...], n2_thres=n2_thres)
mld = average_mixed_layer_depth(deps[dinds], xmin, xmax,)
mlds.append(mld)
dates.append(time_origin + datetime.timedelta(seconds=times[t]))
return mlds, dates
def calculate_density(t, s):
"""Caluclates the density given temperature in deg C (t)
and salinity in psu (s).
returns the density as an array (rho)
"""
rho = (
999.842594 + 6.793952e-2 * t
- 9.095290e-3 * t*t + 1.001685e-4 * t*t*t
- 1.120083e-6 * t*t*t*t + 6.536332e-9 * t*t*t*t*t
+ 8.24493e-1 * s - 4.0899e-3 * t*s
+ 7.6438e-5 * t*t*s - 8.2467e-7 * t*t*t*s
+ 5.3875e-9 * t*t*t*t*s - 5.72466e-3 * s**1.5
+ 1.0227e-4 * t*s**1.5 - 1.6546e-6 * t*t*s**1.5
+ 4.8314e-4 * s*s
)
return rho
def calculate_internal_wave_speed(rho, deps, dinds):
"""Calculates the internal wave speed
c = sqrt(g*(rho2-rho1)/rho2*h1)
where g is acceleration due to gravity, rho2 is denisty of lower layer,
rho1 is density of upper layer and h1 is thickness of upper layer.
rho is the model density (shape is depth, x), deps is the array of depths
and dinds is a list of indices that define the mixed layer depth.
rho must be a masked array
returns c, an array of internal wave speeds at each x-index in rho
"""
# acceleration due to gravity (m/s^2)
g = 9.81
# calculate average density in upper and lower layers
rho_1 = np.zeros((rho.shape[-1]))
rho_2 = np.zeros((rho.shape[-1]))
for ind, d in enumerate(dinds):
rho_1[ind] = analyze.depth_average(rho[0:d+1, ind],
deps[0:d+1], depth_axis=0)
rho_2[ind] = analyze.depth_average(rho[d+1:, ind],
deps[d+1:], depth_axis=0)
# calculate mixed layer depth
h_1 = deps[dinds]
# calcuate wave speed
c = np.sqrt(g*(rho_2-rho_1)/rho_2*h_1)
return c
def depth_averaged_current(u, deps):
"""Calculates the depth averaged current
u is the array with current speeds (shape is depth, x).
u must be a masked array
deps is the array of depths
returns u_avg, the depths averaged current (shape x)
"""
u_avg = analyze.depth_average(u, deps, depth_axis=0)
return u_avg
def calculate_froude_number(n2, rho, u, deps, depsU, n2_thres=5e-6):
"""Calculates the Froude number
n2, rho, u are buoyancy frequency, density and current arrays
(shape depth, x)
deps is the depth array
depsU is the depth array at U poinnts
returns: Fr, c, u_avg - the Froude number, wave speed, and depth averaged
velocity for each x-index
"""
# calculate mixed layers
dinds = find_mixed_depth_indices(n2, n2_thres=n2_thres)
# calculate internal wave speed
c = calculate_internal_wave_speed(rho, deps, dinds)
# calculate depth averaged currents
u_avg = depth_averaged_current(u, depsU)
# Froude numer
Fr = np.abs(u_avg)/c
return Fr, c, u_avg
def froude_time_series(n2, rho, u, deps, depsU, times, time_origin,
xmin=300, xmax=700, n2_thres=5e-6):
"""Calculates the Froude number time series
n2, rho, u are buoyancy frequency, density and current arrays
(shape time, depth, x)
deps is the model depth array
depsU is the model deps array at U points
times is the model time_counter array
time_origin is the mode's time_origin as a datetime
xmin,xmax define the averaging area
returns: Frs, cs, u_avgs, dates
the Froude number, internal wave speed, and depth averaged current
for each time associated with dates
"""
Frs = []
cs = []
u_avgs = []
dates = []
for t in np.arange(n2.shape[0]):
Fr, c, u_avg = calculate_froude_number(n2[t, ...], rho[t, ...],
u[t, ...], deps, depsU,
n2_thres=n2_thres)
Frs.append(np.mean(Fr[xmin:xmax+1]))
cs.append(np.mean(c[xmin:xmax+1]))
u_avgs.append(np.mean(u_avg[xmin:xmax+1]))
dates.append(time_origin + datetime.timedelta(seconds=times[t]))
return Frs, cs, u_avgs, dates
def calculate_buoyancy_frequency(temp, sal, e3, depth_axis=1):
""" Calculate the squared buoyancy frequency (n2) given temperature and
salinity profiles. N2 is set to g*drho/dz/rho. Note that NEMO uses a defini tion based on an question of state: g* (alpha dk[T] + beta dk[S] ) / e3w
temp and sal are the temperature and salinity arrays
e3 is an array of the vertical scale factors (grid spacing). Use e3w for
constistency with NEMO.
depth_axis defines the axis which corresponds to depth in the temp/sal
arrays
returns n2, an array of square buoyancy frequency at each point in temp/sal.
"""
# acceleration due to gravity
g = 9.80665
# First calculate density.
rho = calculate_density(temp, sal)
# Density gradient
drho = np.zeros(rho.shape)
# roll depth axis in rho and drho to first axis
# assume e3 already has depth axis in first axis
drho_r = np.rollaxis(drho, depth_axis)
rho_r = np.rollaxis(rho, depth_axis)
for k in np.arange(1, drho.shape[depth_axis]-1):
drho_r[k, ...] = 1/e3[k, ...]*(rho_r[k+1, ...] - rho_r[k, ...])
# Unroll drho
drho = np.rollaxis(drho_r, 0, depth_axis+1)
rho = np.rollaxis(rho_r, 0, depth_axis+1)
# Define N2
n2 = g*drho/rho # no negative because depth increases with increasking k
return n2
|
[
"numpy.abs",
"numpy.argmax",
"numpy.zeros",
"numpy.mean",
"numpy.arange",
"numpy.where",
"datetime.timedelta",
"numpy.rollaxis",
"salishsea_tools.nowcast.analyze.depth_average",
"numpy.sqrt"
] |
[((1656, 1692), 'numpy.mean', 'np.mean', (['mixed_depths[xmin:xmax + 1]'], {}), '(mixed_depths[xmin:xmax + 1])\n', (1663, 1692), True, 'import numpy as np\n'), ((2228, 2250), 'numpy.arange', 'np.arange', (['n2.shape[0]'], {}), '(n2.shape[0])\n', (2237, 2250), True, 'import numpy as np\n'), ((3763, 3786), 'numpy.zeros', 'np.zeros', (['rho.shape[-1]'], {}), '(rho.shape[-1])\n', (3771, 3786), True, 'import numpy as np\n'), ((3801, 3824), 'numpy.zeros', 'np.zeros', (['rho.shape[-1]'], {}), '(rho.shape[-1])\n', (3809, 3824), True, 'import numpy as np\n'), ((4211, 4253), 'numpy.sqrt', 'np.sqrt', (['(g * (rho_2 - rho_1) / rho_2 * h_1)'], {}), '(g * (rho_2 - rho_1) / rho_2 * h_1)\n', (4218, 4253), True, 'import numpy as np\n'), ((4544, 4588), 'salishsea_tools.nowcast.analyze.depth_average', 'analyze.depth_average', (['u', 'deps'], {'depth_axis': '(0)'}), '(u, deps, depth_axis=0)\n', (4565, 4588), False, 'from salishsea_tools.nowcast import analyze\n'), ((6043, 6065), 'numpy.arange', 'np.arange', (['n2.shape[0]'], {}), '(n2.shape[0])\n', (6052, 6065), True, 'import numpy as np\n'), ((7319, 7338), 'numpy.zeros', 'np.zeros', (['rho.shape'], {}), '(rho.shape)\n', (7327, 7338), True, 'import numpy as np\n'), ((7457, 7486), 'numpy.rollaxis', 'np.rollaxis', (['drho', 'depth_axis'], {}), '(drho, depth_axis)\n', (7468, 7486), True, 'import numpy as np\n'), ((7499, 7527), 'numpy.rollaxis', 'np.rollaxis', (['rho', 'depth_axis'], {}), '(rho, depth_axis)\n', (7510, 7527), True, 'import numpy as np\n'), ((7541, 7581), 'numpy.arange', 'np.arange', (['(1)', '(drho.shape[depth_axis] - 1)'], {}), '(1, drho.shape[depth_axis] - 1)\n', (7550, 7581), True, 'import numpy as np\n'), ((7682, 7720), 'numpy.rollaxis', 'np.rollaxis', (['drho_r', '(0)', '(depth_axis + 1)'], {}), '(drho_r, 0, depth_axis + 1)\n', (7693, 7720), True, 'import numpy as np\n'), ((7729, 7766), 'numpy.rollaxis', 'np.rollaxis', (['rho_r', '(0)', '(depth_axis + 1)'], {}), '(rho_r, 0, depth_axis + 1)\n', (7740, 7766), True, 'import numpy as np\n'), ((758, 779), 'numpy.argmax', 'np.argmax', (['n2'], {'axis': '(0)'}), '(n2, axis=0)\n', (767, 779), True, 'import numpy as np\n'), ((827, 850), 'numpy.arange', 'np.arange', (['n2.shape[-1]'], {}), '(n2.shape[-1])\n', (836, 850), True, 'import numpy as np\n'), ((3884, 3953), 'salishsea_tools.nowcast.analyze.depth_average', 'analyze.depth_average', (['rho[0:d + 1, ind]', 'deps[0:d + 1]'], {'depth_axis': '(0)'}), '(rho[0:d + 1, ind], deps[0:d + 1], depth_axis=0)\n', (3905, 3953), False, 'from salishsea_tools.nowcast import analyze\n'), ((4014, 4081), 'salishsea_tools.nowcast.analyze.depth_average', 'analyze.depth_average', (['rho[d + 1:, ind]', 'deps[d + 1:]'], {'depth_axis': '(0)'}), '(rho[d + 1:, ind], deps[d + 1:], depth_axis=0)\n', (4035, 4081), False, 'from salishsea_tools.nowcast import analyze\n'), ((5285, 5298), 'numpy.abs', 'np.abs', (['u_avg'], {}), '(u_avg)\n', (5291, 5298), True, 'import numpy as np\n'), ((871, 902), 'numpy.where', 'np.where', (['(n2[:, ii] >= n2_thres)'], {}), '(n2[:, ii] >= n2_thres)\n', (879, 902), True, 'import numpy as np\n'), ((6295, 6321), 'numpy.mean', 'np.mean', (['Fr[xmin:xmax + 1]'], {}), '(Fr[xmin:xmax + 1])\n', (6302, 6321), True, 'import numpy as np\n'), ((6339, 6364), 'numpy.mean', 'np.mean', (['c[xmin:xmax + 1]'], {}), '(c[xmin:xmax + 1])\n', (6346, 6364), True, 'import numpy as np\n'), ((6386, 6415), 'numpy.mean', 'np.mean', (['u_avg[xmin:xmax + 1]'], {}), '(u_avg[xmin:xmax + 1])\n', (6393, 6415), True, 'import numpy as np\n'), ((2450, 2486), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'times[t]'}), '(seconds=times[t])\n', (2468, 2486), False, 'import datetime\n'), ((6450, 6486), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'times[t]'}), '(seconds=times[t])\n', (6468, 6486), False, 'import datetime\n')]
|
import argparse
import torch
import sys
import os
import json
from collections import defaultdict
import h5py
from sentence_transformers import SentenceTransformer, util
import numpy
import tqdm
from itertools import zip_longest
from utils import grouper, load_sentences, load_bnids, load_visualsem_bnids
def retrieve_nodes_given_sentences(out_fname, batch_size, all_input_sentences, glosses_bnids, glosses_feats, topk):
"""
out_fname(str): Output file to write retrieved node ids to.
batch_size(int): Batch size for Sentence BERT.
all_input_sentences(list[str]): All input sentences loaded from `input_file`.
glosses_bnids(list[str]): All gloss BNids loaded from `args.glosses_bnids`. Aligned with `glosses_feats`.
glosses_feats(numpy.array): Numpy array with VisualSem gloss features computed with Sentence BERT.
topk(int): Number of nodes to retrieve for each input sentence.
"""
if os.path.isfile(out_fname):
raise Exception("File already exists: '%s'. Please remove it manually to avoid tampering."%out_fname)
n_examples = len(all_input_sentences)
print("Number of input examples to extract BNIDs for: ", n_examples)
model_name = "paraphrase-multilingual-mpnet-base-v2"
model = SentenceTransformer(model_name)
with open(out_fname, 'w', encoding='utf8') as fh_out:
ranks_predicted = []
for idxs_ in grouper(batch_size, range(n_examples)):
idxs = []
queries = []
for i in idxs_:
if not i is None:
idxs.append(i)
queries.append( all_input_sentences[i] )
queries_embs = model.encode(queries, convert_to_tensor=True)
if torch.cuda.is_available():
queries_embs = queries_embs.cuda()
scores = util.pytorch_cos_sim(queries_embs, glosses_feats)
scores = scores.cpu().numpy()
ranks = numpy.argsort(scores) # sort scores by cosine similarity (low to high)
ranks = ranks[:,::-1] # sort by cosine similarity (high to low)
for rank_idx in range(len(idxs[:ranks.shape[0]])):
bnids_predicted = []
for rank_predicted in range(topk*10):
bnid_pred = glosses_bnids[ ranks[rank_idx,rank_predicted] ]
bnid_pred_score = scores[rank_idx, ranks[rank_idx, rank_predicted]]
if not bnid_pred in bnids_predicted:
bnids_predicted.append((bnid_pred,bnid_pred_score))
if len(bnids_predicted)>=topk:
break
# write top-k predicted BNids
for iii, (bnid, score) in enumerate(bnids_predicted[:topk]):
fh_out.write(bnid+"\t"+"%.4f"%score)
if iii < topk-1:
fh_out.write("\t")
else: # iii == topk-1
fh_out.write("\n")
def encode_query(out_fname, batch_size, all_sentences):
"""
out_fname(str): Output file to write SBERT features for query.
batch_size(int): Batch size for Sentence BERT.
all_sentences(list[str]): Sentences to be used for retrieval.
"""
n_lines = len(all_sentences)
model_name = "paraphrase-multilingual-mpnet-base-v2"
model = SentenceTransformer(model_name)
shape_features = (n_lines, 768)
with h5py.File(out_fname, 'w') as fh_out:
fh_out.create_dataset("features", shape_features, dtype='float32', chunks=(1,768), maxshape=(None, 768), compression="gzip")
for from_idx in tqdm.trange(0,n_lines,batch_size):
to_idx = from_idx+batch_size if from_idx+batch_size <= n_lines else n_lines
batch_sentences = all_sentences[ from_idx: to_idx ]
emb_sentences = model.encode(batch_sentences, convert_to_tensor=True)
#test_queries(emb_sentences, all_sentences, model)
fh_out["features"][from_idx:to_idx] = emb_sentences.cpu().numpy()
if __name__=="__main__":
visualsem_path = os.path.dirname(os.path.realpath(__file__))
visualsem_nodes_path = "%s/dataset/nodes.v2.json"%visualsem_path
visualsem_images_path = "%s/dataset/images/"%visualsem_path
glosses_sentence_bert_path = "%s/dataset/gloss_files/glosses.en.txt.sentencebert.h5"%visualsem_path
glosses_bnids_path = "%s/dataset/gloss_files/glosses.en.txt.bnids"%visualsem_path
os.makedirs("%s/dataset/gloss_files/"%visualsem_path, exist_ok=True)
p = argparse.ArgumentParser()
p.add_argument('--input_files', type=str, nargs="+", default=["example_data/queries.txt"],
help="""Input file(s) to use for retrieval. Each line in each file should contain a detokenized sentence.""")
p.add_argument('--topk', type=int, default=1, help="Retrieve topk nodes for each input sentence.")
p.add_argument('--batch_size', type=int, default=1000)
p.add_argument('--visualsem_path', type=str, default=visualsem_path,
help="Path to directory containing VisualSem knowledge graph.")
p.add_argument('--visualsem_nodes_path', type=str, default=visualsem_nodes_path,
help="Path to file containing VisualSem nodes.")
p.add_argument('--visualsem_images_path', type=str, default=visualsem_images_path,
help="Path to directory containing VisualSem images.")
p.add_argument('--glosses_sentence_bert_path', type=str, default=glosses_sentence_bert_path,
help="""HDF5 file containing glosses index computed with Sentence BERT (computed with `extract_glosses_visualsem.py`).""")
p.add_argument('--glosses_bnids_path', type=str, default=glosses_bnids_path,
help="""Text file containing glosses BabelNet ids, one per line (computed with `extract_glosses_visualsem.py`).""")
p.add_argument('--input_valid', action='store_true',
help="""Perform retrieval for the glosses in the validation set. (See paper for reference)""")
p.add_argument('--input_test', action='store_true',
help="""Perform retrieval for the glosses in the test set. (See paper for reference)""")
args = p.parse_args()
# load all nodes in VisualSem
all_bnids = load_visualsem_bnids(args.visualsem_nodes_path, args.visualsem_images_path)
gloss_bnids = load_bnids( args.glosses_bnids_path )
gloss_bnids = numpy.array(gloss_bnids, dtype='object')
with h5py.File(args.glosses_sentence_bert_path, 'r') as fh_glosses:
glosses_feats = fh_glosses["features"][:]
glosses_feats = torch.tensor(glosses_feats)
if torch.cuda.is_available():
glosses_feats = glosses_feats.cuda()
# load train/valid/test gloss splits
glosses_splits = fh_glosses["split_idxs"][:]
train_idxs = (glosses_splits==0).nonzero()[0]
valid_idxs = (glosses_splits==1).nonzero()[0]
test_idxs = (glosses_splits==2).nonzero()[0]
# load gloss language splits
language_splits = fh_glosses["language_idxs"][:]
for input_file in args.input_files:
print("Processing input file: %s ..."%input_file)
sbert_out_fname = input_file+".sentencebert.h5"
if os.path.isfile( sbert_out_fname ):
raise Exception("File already exists: '%s'. Please remove it manually to avoid tampering."%sbert_out_fname)
input_sentences = load_sentences( input_file )
encode_query(sbert_out_fname, args.batch_size, input_sentences)
out_fname = input_file+".bnids"
retrieve_nodes_given_sentences(out_fname, args.batch_size, input_sentences, gloss_bnids, glosses_feats, args.topk)
# remove temporary SBERT index created for input file(s)
os.remove( sbert_out_fname )
print("Retrieved glosses: %s"%out_fname)
|
[
"utils.load_sentences",
"h5py.File",
"os.remove",
"os.makedirs",
"argparse.ArgumentParser",
"tqdm.trange",
"os.path.realpath",
"numpy.argsort",
"sentence_transformers.util.pytorch_cos_sim",
"os.path.isfile",
"utils.load_bnids",
"numpy.array",
"torch.cuda.is_available",
"utils.load_visualsem_bnids",
"sentence_transformers.SentenceTransformer",
"torch.tensor"
] |
[((1034, 1059), 'os.path.isfile', 'os.path.isfile', (['out_fname'], {}), '(out_fname)\n', (1048, 1059), False, 'import os\n'), ((1356, 1387), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['model_name'], {}), '(model_name)\n', (1375, 1387), False, 'from sentence_transformers import SentenceTransformer, util\n'), ((3493, 3524), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['model_name'], {}), '(model_name)\n', (3512, 3524), False, 'from sentence_transformers import SentenceTransformer, util\n'), ((4625, 4695), 'os.makedirs', 'os.makedirs', (["('%s/dataset/gloss_files/' % visualsem_path)"], {'exist_ok': '(True)'}), "('%s/dataset/gloss_files/' % visualsem_path, exist_ok=True)\n", (4636, 4695), False, 'import os\n'), ((4703, 4728), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4726, 4728), False, 'import argparse\n'), ((6396, 6471), 'utils.load_visualsem_bnids', 'load_visualsem_bnids', (['args.visualsem_nodes_path', 'args.visualsem_images_path'], {}), '(args.visualsem_nodes_path, args.visualsem_images_path)\n', (6416, 6471), False, 'from utils import grouper, load_sentences, load_bnids, load_visualsem_bnids\n'), ((6490, 6525), 'utils.load_bnids', 'load_bnids', (['args.glosses_bnids_path'], {}), '(args.glosses_bnids_path)\n', (6500, 6525), False, 'from utils import grouper, load_sentences, load_bnids, load_visualsem_bnids\n'), ((6546, 6586), 'numpy.array', 'numpy.array', (['gloss_bnids'], {'dtype': '"""object"""'}), "(gloss_bnids, dtype='object')\n", (6557, 6586), False, 'import numpy\n'), ((3570, 3595), 'h5py.File', 'h5py.File', (['out_fname', '"""w"""'], {}), "(out_fname, 'w')\n", (3579, 3595), False, 'import h5py\n'), ((3765, 3800), 'tqdm.trange', 'tqdm.trange', (['(0)', 'n_lines', 'batch_size'], {}), '(0, n_lines, batch_size)\n', (3776, 3800), False, 'import tqdm\n'), ((4251, 4277), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (4267, 4277), False, 'import os\n'), ((6597, 6644), 'h5py.File', 'h5py.File', (['args.glosses_sentence_bert_path', '"""r"""'], {}), "(args.glosses_sentence_bert_path, 'r')\n", (6606, 6644), False, 'import h5py\n'), ((6736, 6763), 'torch.tensor', 'torch.tensor', (['glosses_feats'], {}), '(glosses_feats)\n', (6748, 6763), False, 'import torch\n'), ((6775, 6800), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6798, 6800), False, 'import torch\n'), ((1831, 1856), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1854, 1856), False, 'import torch\n'), ((1930, 1979), 'sentence_transformers.util.pytorch_cos_sim', 'util.pytorch_cos_sim', (['queries_embs', 'glosses_feats'], {}), '(queries_embs, glosses_feats)\n', (1950, 1979), False, 'from sentence_transformers import SentenceTransformer, util\n'), ((2043, 2064), 'numpy.argsort', 'numpy.argsort', (['scores'], {}), '(scores)\n', (2056, 2064), False, 'import numpy\n'), ((7389, 7420), 'os.path.isfile', 'os.path.isfile', (['sbert_out_fname'], {}), '(sbert_out_fname)\n', (7403, 7420), False, 'import os\n'), ((7579, 7605), 'utils.load_sentences', 'load_sentences', (['input_file'], {}), '(input_file)\n', (7593, 7605), False, 'from utils import grouper, load_sentences, load_bnids, load_visualsem_bnids\n'), ((7936, 7962), 'os.remove', 'os.remove', (['sbert_out_fname'], {}), '(sbert_out_fname)\n', (7945, 7962), False, 'import os\n')]
|
import hcat.lib.functional
import hcat.lib.functional as functional
from hcat.lib.utils import calculate_indexes, load, cochlea_to_xml, correct_pixel_size, scale_to_hair_cell_diameter
from hcat.lib.cell import Cell
from hcat.lib.cochlea import Cochlea
from hcat.backends.detection import FasterRCNN_from_url
from hcat.backends.detection import HairCellFasterRCNN
from hcat.lib.utils import warn
import torch
from torch import Tensor
from tqdm import tqdm
from itertools import product
import numpy as np
from hcat.lib.explore_lif import get_xml
import torchvision.ops
import skimage.io as io
import os.path
from typing import Optional, List, Dict
# DOCUMENTED
def _detect(f: str, curve_path: str = None, cell_detection_threshold: float = 0.86, dtype=None,
nms_threshold: float = 0.2, save_xml=False, save_fig=False, pixel_size=None, cell_diameter=None):
"""
2D hair cell detection algorithm.
Loads arbitrarily large 2d image and performs iterative faster rcnn detection on the entire image.
:param *str* f: path to image by which to analyze
:param *float* cell_detection_threshold: cells below threshold are rejected
:param *float* nms_threshold: iou rejection threshold for nms.
:return: *Cochlea* object containing data of analysis.
"""
print('Initializing hair cell detection algorithm...')
if f is None:
warn('ERROR: No File to Analyze... \nAborting.', color='red')
return None
if not pixel_size:
warn('WARNING: Pixel Size is not set. Defaults to 288.88 nm x/y. '
'Consider suplying value for optimal performance.', color='yellow')
with torch.no_grad():
# Load and preprocess Image
image_base = load(f, 'TileScan 1 Merged', verbose=True) # from hcat.lib.utils
image_base = image_base[[2, 3],...].max(-1) if image_base.ndim == 4 else image_base
shape = list(image_base.shape)
shape[0] = 1
dtype = image_base.dtype if dtype is None else dtype
scale: int = hcat.lib.utils.get_dtype_offset(dtype)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
temp = np.zeros(shape)
temp = np.concatenate((temp, image_base)) / scale * 255
c, x, y = image_base.shape
print(
f'DONE: shape: {image_base.shape}, min: {image_base.min()}, max: {image_base.max()}, dtype: {image_base.dtype}')
if image_base.max() < scale * 0.33:
warn(f'WARNING: Image max value less than 1/3 the scale factor for bit depth. Image Max: {image_base.max()},'
f' Scale Factor: {scale}, dtype: {dtype}. Readjusting scale to 1.5 time Image max.', color='yellow')
scale = image_base.max() * 1.5
image_base = torch.from_numpy(image_base.astype(np.uint16) / scale).to(device)
if pixel_size is not None:
image_base: Tensor = correct_pixel_size(image_base, pixel_size) #model expects pixel size of 288.88
print(f'Rescaled Image to match pixel size of 288.88nm with a new shape of: {image_base.shape}')
elif cell_diameter is not None:
image_base: Tensor = scale_to_hair_cell_diameter(image_base, cell_diameter)
print(f'Rescaled Image to match pixel size of 288.88nm with a new shape of: {image_base.shape}')
# normalize around zero
image_base.sub_(0.5).div_(0.5)
if device == 'cuda':
warn('CUDA: GPU successfully initialized!', color='green')
else:
warn('WARNING: GPU not present or CUDA is not correctly intialized for GPU accelerated computation. '
'Analysis may be slow.', color='yellow')
# Initalize the model...
model = FasterRCNN_from_url(url='https://github.com/buswinka/hcat/blob/master/modelfiles/detection.trch?raw=true', device=device)
model.eval()
# Initalize curvature detection
predict_curvature = hcat.lib.functional.PredictCurvature(erode=3)
# Get the indicies for evaluating cropped regions
c, x, y = image_base.shape
image_base = torch.cat((torch.zeros((1, x, y), device=device), image_base), dim=0)
x_ind: List[List[int]] = calculate_indexes(10, 235, x, x) # [[0, 255], [30, 285], ...]
y_ind: List[List[int]] = calculate_indexes(10, 235, y, y) # [[0, 255], [30, 285], ...]
total: int = len(x_ind) * len(y_ind)
# Initalize other small things
cell_id = 1
cells = []
add_cell = cells.append # stupid but done for speed
for x, y in tqdm(product(x_ind, y_ind), total=total, desc='Detecting: '):
# Load and prepare image crop for ML model evaluation
image: Tensor = image_base[:, x[0]:x[1], y[0]:y[1]].unsqueeze(0)
# If the image has nothing in it we can skip for speed
if image.max() == -1:
continue
# Evaluate Deep Learning Model
out: Dict[str, Tensor] = model(image.float())[0]
scores: Tensor = out['scores'].cpu()
boxes: Tensor = out['boxes'].cpu()
labels: Tensor = out['labels'].cpu()
# The model output coords with respect to the crop of image_base. We have to adjust
# idk why the y and x are flipped. Breaks otherwise.
boxes[:, [0, 2]] += y[0]
boxes[:, [1, 3]] += x[0]
# center x, center y, width, height
centers: Tensor = torchvision.ops.box_convert(boxes, 'xyxy', 'cxcywh').cpu()
cx = centers[:, 0]
cy = centers[:, 1]
for i, score in enumerate(scores):
if score > cell_detection_threshold:
add_cell(Cell(id=cell_id,
loc=torch.tensor([0, cx[i], cy[i], 0]),
image=None,
mask=None,
cell_type='OHC' if labels[i] == 1 else 'IHC',
boxes=boxes[i, :],
scores=scores[i]))
cell_id += 1
# some cells may overlap. We remove cells after analysis is complete.
cells: List[Cell] = _cell_nms(cells, nms_threshold)
ohc = sum([int(c.type == 'OHC') for c in cells]) # number of ohc
ihc = sum([int(c.type == 'IHC') for c in cells]) # number of ihc
print(f'Total Cells: {len(cells)}\n OHC: {ohc}\n IHC: {ihc}' )
max_projection: Tensor = image_base[[1], ...].mul(0.5).add(0.5).unsqueeze(-1).cpu()
curvature, distance, apex = predict_curvature(max_projection, cells, curve_path)
if curvature is None:
warn('WARNING: All three methods to predict hair cell path have failed. Frequency Mapping functionality is '
'limited. Consider Manual Calculation.', color='yellow')
# curvature estimation really only works if there is a lot of tissue...
if distance is not None and distance.max() > 4000:
for c in cells: c.calculate_frequency(curvature[[0, 1], :], distance) # calculate cell's best frequency
cells = [c for c in cells if not c._distance_is_far_away] # remove a cell if its far away from curve
else:
curvature, distance, apex = None, None, None
warn('WARNING: Predicted Cochlear Distance is below 4000um. Not sufficient '
'information to determine cell frequency.', color='yellow')
xml = get_xml(f) if f.endswith('.lif') else None
filename = os.path.split(f)[-1]
# remove weird cell ID's
for i, c in enumerate(cells): c.id = i+1
# Store in compressible object for further use
c = Cochlea(mask=None,
filename=filename,
path=f,
analysis_type='detect',
leica_metadata=xml,
im_shape=image_base.shape,
cochlear_distance=distance,
curvature=curvature,
cells=cells,
apex=apex)
c.write_csv()
if save_xml: cochlea_to_xml(c)
if save_fig: c.make_detect_fig(image_base)
print('')
return c
def _cell_nms(cells: List[Cell], nms_threshold: float) -> List[Cell]:
"""
Perforns non maximum supression on the resulting cell predictions
:param cells: Iterable of cells
:param nms_threshold: cell iou threshold
:return: Iterable of cells
"""
# nms to get rid of cells
boxes = torch.zeros((len(cells), 4))
scores = torch.zeros(len(cells))
for i, c in enumerate(cells):
boxes[i, :] = c.boxes
scores[i] = c.scores
ind = torchvision.ops.nms(boxes, scores, nms_threshold)
# need to pop off list elements from an int64 tensor
ind_bool = torch.zeros(len(cells))
ind_bool[ind] = 1
for i, val in enumerate(ind_bool):
if val == 0:
cells[i] = None
return [c for c in cells if c]
|
[
"hcat.lib.utils.correct_pixel_size",
"hcat.lib.utils.warn",
"hcat.lib.utils.scale_to_hair_cell_diameter",
"numpy.zeros",
"hcat.lib.explore_lif.get_xml",
"hcat.lib.utils.calculate_indexes",
"hcat.lib.cochlea.Cochlea",
"hcat.backends.detection.FasterRCNN_from_url",
"hcat.lib.utils.load",
"torch.cuda.is_available",
"hcat.lib.utils.cochlea_to_xml",
"itertools.product",
"torch.zeros",
"torch.tensor",
"torch.no_grad",
"numpy.concatenate"
] |
[((1374, 1438), 'hcat.lib.utils.warn', 'warn', (['"""ERROR: No File to Analyze... \nAborting."""'], {'color': '"""red"""'}), '("""ERROR: No File to Analyze... \nAborting.""", color=\'red\')\n', (1378, 1438), False, 'from hcat.lib.utils import warn\n'), ((1487, 1628), 'hcat.lib.utils.warn', 'warn', (['"""WARNING: Pixel Size is not set. Defaults to 288.88 nm x/y. Consider suplying value for optimal performance."""'], {'color': '"""yellow"""'}), "(\n 'WARNING: Pixel Size is not set. Defaults to 288.88 nm x/y. Consider suplying value for optimal performance.'\n , color='yellow')\n", (1491, 1628), False, 'from hcat.lib.utils import warn\n'), ((1645, 1660), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1658, 1660), False, 'import torch\n'), ((1720, 1762), 'hcat.lib.utils.load', 'load', (['f', '"""TileScan 1 Merged"""'], {'verbose': '(True)'}), "(f, 'TileScan 1 Merged', verbose=True)\n", (1724, 1762), False, 'from hcat.lib.utils import calculate_indexes, load, cochlea_to_xml, correct_pixel_size, scale_to_hair_cell_diameter\n'), ((2141, 2156), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (2149, 2156), True, 'import numpy as np\n'), ((3724, 3855), 'hcat.backends.detection.FasterRCNN_from_url', 'FasterRCNN_from_url', ([], {'url': '"""https://github.com/buswinka/hcat/blob/master/modelfiles/detection.trch?raw=true"""', 'device': 'device'}), "(url=\n 'https://github.com/buswinka/hcat/blob/master/modelfiles/detection.trch?raw=true'\n , device=device)\n", (3743, 3855), False, 'from hcat.backends.detection import FasterRCNN_from_url\n'), ((4201, 4233), 'hcat.lib.utils.calculate_indexes', 'calculate_indexes', (['(10)', '(235)', 'x', 'x'], {}), '(10, 235, x, x)\n', (4218, 4233), False, 'from hcat.lib.utils import calculate_indexes, load, cochlea_to_xml, correct_pixel_size, scale_to_hair_cell_diameter\n'), ((4297, 4329), 'hcat.lib.utils.calculate_indexes', 'calculate_indexes', (['(10)', '(235)', 'y', 'y'], {}), '(10, 235, y, y)\n', (4314, 4329), False, 'from hcat.lib.utils import calculate_indexes, load, cochlea_to_xml, correct_pixel_size, scale_to_hair_cell_diameter\n'), ((7743, 7941), 'hcat.lib.cochlea.Cochlea', 'Cochlea', ([], {'mask': 'None', 'filename': 'filename', 'path': 'f', 'analysis_type': '"""detect"""', 'leica_metadata': 'xml', 'im_shape': 'image_base.shape', 'cochlear_distance': 'distance', 'curvature': 'curvature', 'cells': 'cells', 'apex': 'apex'}), "(mask=None, filename=filename, path=f, analysis_type='detect',\n leica_metadata=xml, im_shape=image_base.shape, cochlear_distance=\n distance, curvature=curvature, cells=cells, apex=apex)\n", (7750, 7941), False, 'from hcat.lib.cochlea import Cochlea\n'), ((2088, 2113), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2111, 2113), False, 'import torch\n'), ((2888, 2930), 'hcat.lib.utils.correct_pixel_size', 'correct_pixel_size', (['image_base', 'pixel_size'], {}), '(image_base, pixel_size)\n', (2906, 2930), False, 'from hcat.lib.utils import calculate_indexes, load, cochlea_to_xml, correct_pixel_size, scale_to_hair_cell_diameter\n'), ((3428, 3486), 'hcat.lib.utils.warn', 'warn', (['"""CUDA: GPU successfully initialized!"""'], {'color': '"""green"""'}), "('CUDA: GPU successfully initialized!', color='green')\n", (3432, 3486), False, 'from hcat.lib.utils import warn\n'), ((3513, 3662), 'hcat.lib.utils.warn', 'warn', (['"""WARNING: GPU not present or CUDA is not correctly intialized for GPU accelerated computation. Analysis may be slow."""'], {'color': '"""yellow"""'}), "(\n 'WARNING: GPU not present or CUDA is not correctly intialized for GPU accelerated computation. Analysis may be slow.'\n , color='yellow')\n", (3517, 3662), False, 'from hcat.lib.utils import warn\n'), ((4571, 4592), 'itertools.product', 'product', (['x_ind', 'y_ind'], {}), '(x_ind, y_ind)\n', (4578, 4592), False, 'from itertools import product\n'), ((6701, 6873), 'hcat.lib.utils.warn', 'warn', (['"""WARNING: All three methods to predict hair cell path have failed. Frequency Mapping functionality is limited. Consider Manual Calculation."""'], {'color': '"""yellow"""'}), "(\n 'WARNING: All three methods to predict hair cell path have failed. Frequency Mapping functionality is limited. Consider Manual Calculation.'\n , color='yellow')\n", (6705, 6873), False, 'from hcat.lib.utils import warn\n'), ((7339, 7482), 'hcat.lib.utils.warn', 'warn', (['"""WARNING: Predicted Cochlear Distance is below 4000um. Not sufficient information to determine cell frequency."""'], {'color': '"""yellow"""'}), "(\n 'WARNING: Predicted Cochlear Distance is below 4000um. Not sufficient information to determine cell frequency.'\n , color='yellow')\n", (7343, 7482), False, 'from hcat.lib.utils import warn\n'), ((7508, 7518), 'hcat.lib.explore_lif.get_xml', 'get_xml', (['f'], {}), '(f)\n', (7515, 7518), False, 'from hcat.lib.explore_lif import get_xml\n'), ((8158, 8175), 'hcat.lib.utils.cochlea_to_xml', 'cochlea_to_xml', (['c'], {}), '(c)\n', (8172, 8175), False, 'from hcat.lib.utils import calculate_indexes, load, cochlea_to_xml, correct_pixel_size, scale_to_hair_cell_diameter\n'), ((2172, 2206), 'numpy.concatenate', 'np.concatenate', (['(temp, image_base)'], {}), '((temp, image_base))\n', (2186, 2206), True, 'import numpy as np\n'), ((3150, 3204), 'hcat.lib.utils.scale_to_hair_cell_diameter', 'scale_to_hair_cell_diameter', (['image_base', 'cell_diameter'], {}), '(image_base, cell_diameter)\n', (3177, 3204), False, 'from hcat.lib.utils import calculate_indexes, load, cochlea_to_xml, correct_pixel_size, scale_to_hair_cell_diameter\n'), ((4109, 4146), 'torch.zeros', 'torch.zeros', (['(1, x, y)'], {'device': 'device'}), '((1, x, y), device=device)\n', (4120, 4146), False, 'import torch\n'), ((5771, 5805), 'torch.tensor', 'torch.tensor', (['[0, cx[i], cy[i], 0]'], {}), '([0, cx[i], cy[i], 0])\n', (5783, 5805), False, 'import torch\n')]
|
import csv
import time
import numpy as np
import argparse
import warnings
warnings.filterwarnings('ignore')
from sklearn.preprocessing import scale
from sklearn.model_selection import GridSearchCV
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction import DictVectorizer
def load_eyedata(data_folder):
datafile = '{}/eyedata.csv'.format(data_folder)
data = np.loadtxt(datafile, skiprows=1, delimiter=',')
data = scale(data)
X, y = data[:, :-1], data[:, -1]
featnames = np.array(
list(map(lambda i: '{:03}'.format(i), range(X.shape[1]))))
return X, y, featnames
def load_iwpc(data_folder):
datafile = '{}/iwpc-scaled.csv'.format(data_folder)
col_types = {'race': str,
'age': float,
'height': float,
'weight': float,
'amiodarone': int,
'decr': int,
'cyp2c9': str,
'vkorc1': str,
'dose': float}
X, y = [], []
with open(datafile) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
for col_name in reader.fieldnames:
col_type = col_types[col_name]
row[col_name] = col_type(row[col_name]) # cast to correct type
if col_name == 'dose':
y.append(row[col_name])
del row[col_name]
X.append(row)
dv = DictVectorizer()
X = dv.fit_transform(X)
y = np.array(y)
featnames = np.array(dv.get_feature_names())
return X, y, featnames
if __name__ == '__main__':
data_folder = '../data'
parser = argparse.ArgumentParser()
parser.add_argument('data', choices=['eyedata', 'iwpc'], help='Specify the data to use')
args = parser.parse_args()
dataset = args.data
if dataset == 'eyedata':
X, y, featnames = load_eyedata(data_folder)
if dataset == 'iwpc':
X, y, featnames = load_iwpc(data_folder)
train_X, test_X, train_y, test_y = train_test_split(X, y, random_state=9)
time
params = {
'activation' : ['identity', 'logistic', 'tanh', 'relu'],
'solver' : ['lbfgs', 'sgd', 'adam'],
'hidden_layer_sizes': [(100,),(150,),(200,),(300,), (50,100,),(50,150,),(100,100,),(100,150,),(50,75,100,)],
'max_iter':[200,250,300,350]
}
mlp_clf_grid = GridSearchCV(MLPRegressor(random_state=9), param_grid=params, n_jobs=-1, cv=5, verbose=1)
mlp_clf_grid.fit(train_X,train_y)
print('Train Accuracy : ',mlp_clf_grid.best_estimator_.score(train_X,train_y))
print('Test Accuracy : ',mlp_clf_grid.best_estimator_.score(test_X, test_y))
print('Grid Search Best Accuracy :',mlp_clf_grid.best_score_)
print('Best Parameters : ',mlp_clf_grid.best_params_)
print('Best Estimators: ',mlp_clf_grid.best_estimator_)
|
[
"argparse.ArgumentParser",
"sklearn.preprocessing.scale",
"warnings.filterwarnings",
"sklearn.model_selection.train_test_split",
"csv.DictReader",
"sklearn.neural_network.MLPRegressor",
"numpy.array",
"sklearn.feature_extraction.DictVectorizer",
"numpy.loadtxt"
] |
[((74, 107), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (97, 107), False, 'import warnings\n'), ((449, 496), 'numpy.loadtxt', 'np.loadtxt', (['datafile'], {'skiprows': '(1)', 'delimiter': '""","""'}), "(datafile, skiprows=1, delimiter=',')\n", (459, 496), True, 'import numpy as np\n'), ((508, 519), 'sklearn.preprocessing.scale', 'scale', (['data'], {}), '(data)\n', (513, 519), False, 'from sklearn.preprocessing import scale\n'), ((1507, 1523), 'sklearn.feature_extraction.DictVectorizer', 'DictVectorizer', ([], {}), '()\n', (1521, 1523), False, 'from sklearn.feature_extraction import DictVectorizer\n'), ((1560, 1571), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1568, 1571), True, 'import numpy as np\n'), ((1719, 1744), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1742, 1744), False, 'import argparse\n'), ((2091, 2129), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'random_state': '(9)'}), '(X, y, random_state=9)\n', (2107, 2129), False, 'from sklearn.model_selection import train_test_split\n'), ((1125, 1148), 'csv.DictReader', 'csv.DictReader', (['csvfile'], {}), '(csvfile)\n', (1139, 1148), False, 'import csv\n'), ((2482, 2510), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'random_state': '(9)'}), '(random_state=9)\n', (2494, 2510), False, 'from sklearn.neural_network import MLPRegressor\n')]
|
from nanoget import get_input
from argparse import ArgumentParser
from nanoplot import utils
from .version import __version__
from nanoplotter import check_valid_time_and_sort, Plot
from os import path
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
def main():
args = get_args()
merged_df = get_input(source="summary", files=args.summary).set_index("readIDs") \
.merge(right=get_input(source="bam", files=args.bam).set_index("readIDs"),
how="left",
left_index=True,
right_index=True)
plot_retrotect(df=merged_df,
path=path.join(args.outdir, args.prefix),
figformat=args.format,
title=args.title,
hours=args.hours)
merged_df.dropna(axis="index", how="any").sort_values(by="start_time").to_csv(
path_or_buf=path.join(args.outdir, args.prefix) + "Retrotect_details.txt.gz",
sep="\t",
columns=["start_time"],
compression='gzip')
def get_args():
epilog = """"""
parser = ArgumentParser(
description="Get detection curve of nanopore experiment.",
epilog=epilog,
formatter_class=utils.custom_formatter,
add_help=False)
general = parser.add_argument_group(
title='General options')
general.add_argument("-h", "--help",
action="help",
help="show the help and exit")
general.add_argument("-v", "--version",
help="Print version and exit.",
action="version",
version='NanoComp {}'.format(__version__))
general.add_argument("-t", "--threads",
help="Set the allowed number of threads to be used by the script",
default=4,
type=int)
general.add_argument("-o", "--outdir",
help="Specify directory in which output has to be created.",
default=".")
general.add_argument("-p", "--prefix",
help="Specify an optional prefix to be used for the output files.",
default="",
type=str)
general.add_argument("--verbose",
help="Write log messages also to terminal.",
action="store_true")
visual = parser.add_argument_group(
title='Options for customizing the plots created')
visual.add_argument("-f", "--format",
help="Specify the output format of the plots.",
default="png",
type=str,
choices=['eps', 'jpeg', 'jpg', 'pdf', 'pgf', 'png', 'ps',
'raw', 'rgba', 'svg', 'svgz', 'tif', 'tiff'])
visual.add_argument("--title",
help="Add a title to all plots, requires quoting if using spaces",
type=str,
default=None)
visual.add_argument("--hours",
help="How many hours to plot in the graph",
type=int,
default=8)
target = parser.add_argument_group(
title="Input data sources, requires a bam and a summary file.")
target.add_argument("--summary",
help="Data is a summary file generated by albacore.",
nargs='+',
metavar="files",
required=True)
target.add_argument("--bam",
help="Data as a sorted bam file.",
nargs='+',
metavar="files",
required=True)
return parser.parse_args()
def plot_retrotect(df, path, figformat="png", title=None, hours=8):
dfs = check_valid_time_and_sort(
df=df,
timescol="start_time",
days=hours / 24,
warning=False)
dfs["start_time"] = dfs["start_time"].astype('timedelta64[m]') # ?! dtype float64
cum_yield_reads = Plot(
path=path + "CumulativeYieldPlot_NumberOfReads." + figformat,
title="Cumulative yield")
ax = sns.regplot(
x=dfs['start_time'],
y=np.log10(dfs['index'] + 1),
x_ci=None,
fit_reg=False,
color="blue",
scatter_kws={"s": 1})
aligned_df = dfs.drop('index', axis=1) \
.dropna(axis="index", how="any") \
.reset_index(drop=True) \
.reset_index()
ax = sns.regplot(
x=aligned_df['start_time'],
y=np.log10(aligned_df["index"] + 1),
x_ci=None,
fit_reg=False,
color="red",
scatter_kws={"s": 1},
ax=ax)
yticks = [10**i for i in range(10) if not 10**i > 10 * dfs["index"].max()]
ax.set(
xlabel='Run time (minutes)',
yticks=np.log10(yticks),
yticklabels=yticks,
ylabel='Cumulative yield in log transformed number of reads',
title=title or cum_yield_reads.title)
fig = ax.get_figure()
cum_yield_reads.fig = fig
fig.savefig(cum_yield_reads.path, format=figformat, dpi=100, bbox_inches="tight")
plt.close("all")
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"matplotlib.pyplot.close",
"nanoplotter.Plot",
"nanoplotter.check_valid_time_and_sort",
"numpy.log10",
"nanoget.get_input",
"os.path.join"
] |
[((1081, 1229), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Get detection curve of nanopore experiment."""', 'epilog': 'epilog', 'formatter_class': 'utils.custom_formatter', 'add_help': '(False)'}), "(description='Get detection curve of nanopore experiment.',\n epilog=epilog, formatter_class=utils.custom_formatter, add_help=False)\n", (1095, 1229), False, 'from argparse import ArgumentParser\n'), ((3895, 3986), 'nanoplotter.check_valid_time_and_sort', 'check_valid_time_and_sort', ([], {'df': 'df', 'timescol': '"""start_time"""', 'days': '(hours / 24)', 'warning': '(False)'}), "(df=df, timescol='start_time', days=hours / 24,\n warning=False)\n", (3920, 3986), False, 'from nanoplotter import check_valid_time_and_sort, Plot\n'), ((4126, 4223), 'nanoplotter.Plot', 'Plot', ([], {'path': "(path + 'CumulativeYieldPlot_NumberOfReads.' + figformat)", 'title': '"""Cumulative yield"""'}), "(path=path + 'CumulativeYieldPlot_NumberOfReads.' + figformat, title=\n 'Cumulative yield')\n", (4130, 4223), False, 'from nanoplotter import check_valid_time_and_sort, Plot\n'), ((5226, 5242), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5235, 5242), True, 'import matplotlib.pyplot as plt\n'), ((630, 665), 'os.path.join', 'path.join', (['args.outdir', 'args.prefix'], {}), '(args.outdir, args.prefix)\n', (639, 665), False, 'from os import path\n'), ((4297, 4323), 'numpy.log10', 'np.log10', (["(dfs['index'] + 1)"], {}), "(dfs['index'] + 1)\n", (4305, 4323), True, 'import numpy as np\n'), ((4632, 4665), 'numpy.log10', 'np.log10', (["(aligned_df['index'] + 1)"], {}), "(aligned_df['index'] + 1)\n", (4640, 4665), True, 'import numpy as np\n'), ((4918, 4934), 'numpy.log10', 'np.log10', (['yticks'], {}), '(yticks)\n', (4926, 4934), True, 'import numpy as np\n'), ((886, 921), 'os.path.join', 'path.join', (['args.outdir', 'args.prefix'], {}), '(args.outdir, args.prefix)\n', (895, 921), False, 'from os import path\n'), ((327, 374), 'nanoget.get_input', 'get_input', ([], {'source': '"""summary"""', 'files': 'args.summary'}), "(source='summary', files=args.summary)\n", (336, 374), False, 'from nanoget import get_input\n'), ((419, 458), 'nanoget.get_input', 'get_input', ([], {'source': '"""bam"""', 'files': 'args.bam'}), "(source='bam', files=args.bam)\n", (428, 458), False, 'from nanoget import get_input\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from IPython.core.debugger import set_trace
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import glob
import os
from skimage.io import imread
from skimage.transform import resize
from torch.utils import data
import os
from config import Config
import pandas as pd
from utils.rotate_fcns import rotate_2d,rotate_3d,flip_2d
class DataLoader2D(data.Dataset):
def __init__(self, split,path_to_data):
self.split=split
self.path=path_to_data
data = pd.read_csv("utils/rot_dict_unique.csv")
self.rots_table=data.loc[:,:].to_numpy()
xl_file = pd.ExcelFile(self.path + os.sep+'ListOfData.xlsx')
data = pd.read_excel(xl_file,header=None)
folders=data.loc[:,0].tolist()
names=data.loc[:,1].tolist()
file_names=[]
for folder,name in zip(folders,names):
file_names.append((self.path + os.sep + folder.split('\\')[-1] + os.sep + name).replace('.mhd',''))
if self.split=='training':
file_names=file_names[:int(len(file_names)*0.8)]
elif self.split=='testing':
file_names=file_names[int(len(file_names)*0.8):-20]
self.file_names=[]
self.vec=[]
self.flip=[]
self.lbls=[]
for file in file_names:
for flip in [0,1]:
for unique_rot_num in range(self.rots_table.shape[0]):
self.file_names.append(file)
self.vec.append(self.rots_table[unique_rot_num,:])
self.flip.append(flip)
self.lbls.append(unique_rot_num)
def __len__(self):
return len(self.file_names)
def __getitem__(self, index):
file_name=self.file_names[index]
r=self.vec[index][0:3]
flip=self.flip[index]
flip=np.array([flip])
img_list=[]
folders=['mean','max','std']
for folder in folders:
for k in range(3):
tmp=imread(file_name + '_' + folder + '_'+ str(k+1) +'.png' )
tmp=tmp.astype(np.float32)/255-0.5
img_list.append(tmp)
# if self.split=='training':
# max_mult_change=0.3
# for k in range(len(img_list)):
# mult_change=1+torch.rand(1).numpy()[0]*2*max_mult_change-max_mult_change
# img_list[k]=img_list[k]*mult_change
# max_add_change=0.3
# for k in range(len(img_list)):
# add_change=torch.rand(1).numpy()[0]*2*max_add_change-max_add_change
# img_list[k]=img_list[k]+add_change
imgs=np.stack(img_list,axis=2)
for k in range(0,9,3):
if flip==1:
imgs[:,:,k:k+3]=flip_2d(imgs[:,:,k:k+3])
imgs[:,:,k:k+3]=rotate_2d(imgs[:,:,k:k+3],r)
imgs=torch.from_numpy(imgs.copy())
imgs=imgs.permute(2,0,1)
lbl=self.lbls[index]
lbl2=np.zeros(self.rots_table.shape[0]).astype(np.float32)
lbl2[lbl]=1
lbl=torch.from_numpy(lbl2)
return imgs,lbl
|
[
"numpy.stack",
"utils.rotate_fcns.rotate_2d",
"pandas.read_csv",
"pandas.ExcelFile",
"numpy.zeros",
"pandas.read_excel",
"numpy.array",
"utils.rotate_fcns.flip_2d",
"torch.from_numpy"
] |
[((599, 639), 'pandas.read_csv', 'pd.read_csv', (['"""utils/rot_dict_unique.csv"""'], {}), "('utils/rot_dict_unique.csv')\n", (610, 639), True, 'import pandas as pd\n'), ((717, 769), 'pandas.ExcelFile', 'pd.ExcelFile', (["(self.path + os.sep + 'ListOfData.xlsx')"], {}), "(self.path + os.sep + 'ListOfData.xlsx')\n", (729, 769), True, 'import pandas as pd\n'), ((783, 818), 'pandas.read_excel', 'pd.read_excel', (['xl_file'], {'header': 'None'}), '(xl_file, header=None)\n', (796, 818), True, 'import pandas as pd\n'), ((2093, 2109), 'numpy.array', 'np.array', (['[flip]'], {}), '([flip])\n', (2101, 2109), True, 'import numpy as np\n'), ((3102, 3128), 'numpy.stack', 'np.stack', (['img_list'], {'axis': '(2)'}), '(img_list, axis=2)\n', (3110, 3128), True, 'import numpy as np\n'), ((3553, 3575), 'torch.from_numpy', 'torch.from_numpy', (['lbl2'], {}), '(lbl2)\n', (3569, 3575), False, 'import torch\n'), ((3281, 3314), 'utils.rotate_fcns.rotate_2d', 'rotate_2d', (['imgs[:, :, k:k + 3]', 'r'], {}), '(imgs[:, :, k:k + 3], r)\n', (3290, 3314), False, 'from utils.rotate_fcns import rotate_2d, rotate_3d, flip_2d\n'), ((3215, 3243), 'utils.rotate_fcns.flip_2d', 'flip_2d', (['imgs[:, :, k:k + 3]'], {}), '(imgs[:, :, k:k + 3])\n', (3222, 3243), False, 'from utils.rotate_fcns import rotate_2d, rotate_3d, flip_2d\n'), ((3458, 3492), 'numpy.zeros', 'np.zeros', (['self.rots_table.shape[0]'], {}), '(self.rots_table.shape[0])\n', (3466, 3492), True, 'import numpy as np\n')]
|
# 多个文件中要用到的函数之类的统一写在这里
from skimage.measure import label
import numpy as np
import copy
# 如果最大连通域面积小于2000,直接认为分割错误,返回无分割结果,反之保留面积最大连通域,如果面积第二大连通域和最大差不多,则两个都保留
def refine_output(output):
refine = np.zeros((1280, 2440), dtype=np.uint8)
if len(np.where(output > 0)[0]) > 0:
output = label(output)
top = output.max()
area_list = []
for i in range(1, top + 1):
area = len(np.where(output == i)[0])
area_list.append(area)
max_area = max(area_list)
max_index = area_list.index(max_area)
if max_area < 2000:
return refine
else:
refine[output == max_index + 1] = 1
if top > 1:
temp_list = copy.deepcopy(area_list)
del temp_list[max_index]
second_max_area = max(temp_list)
second_max_index = area_list.index(second_max_area)
if (max_area / second_max_area) < 1.2:
refine[output == second_max_index + 1] = 1
return refine
else:
return refine
else:
return refine
else:
return refine
# 如果两颗牙的分割结果重合面积大于其中一颗牙的40%,则认为这颗牙分割错误在了其它牙齿上,去掉这颗牙的分割结果
def judge_overlap(id, output_all):
ids = [11, 12, 13, 14, 15, 16, 17, 18, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 41, 42, 43,
44, 45, 46, 47, 48]
index = ids.index(id)
output_id = output_all[:, :, index].reshape(1, -1) # 每一通道保存着一颗牙的分割结果
output_id_area = output_id.sum(1) + 0.001
refine = output_all
if index <= 29:
end = index + 3
elif index == 30: # 倒数第二颗牙前面只有一颗牙
end = index + 2
else:
end = index + 1 # 最后一颗牙不用再计算重叠率了
for i in range(index + 1, end): # 每颗牙和前面两颗牙算重叠区域,因为有可能前面少了一颗牙齿,所以选两颗
output_other = output_all[:, :, i].reshape(1, -1)
output_other_area = output_other.sum(1) + 0.001
inter = (output_id * output_other).sum(1) + 0.001
if (inter / output_id_area) >= 0.4:
refine[:, :, index] = 0
if (inter / output_other_area) >= 0.4:
refine[:, :, i] = 0
return refine
# 输入一个模型,获得其参数量
def get_model_params(net):
total_params = sum(p.numel() for p in net.parameters())
print(f'{total_params:,} total parameters.')
total_trainable_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
print(f'{total_trainable_params:,} training parameters.')
print()
|
[
"copy.deepcopy",
"skimage.measure.label",
"numpy.where",
"numpy.zeros"
] |
[((201, 239), 'numpy.zeros', 'np.zeros', (['(1280, 2440)'], {'dtype': 'np.uint8'}), '((1280, 2440), dtype=np.uint8)\n', (209, 239), True, 'import numpy as np\n'), ((298, 311), 'skimage.measure.label', 'label', (['output'], {}), '(output)\n', (303, 311), False, 'from skimage.measure import label\n'), ((251, 271), 'numpy.where', 'np.where', (['(output > 0)'], {}), '(output > 0)\n', (259, 271), True, 'import numpy as np\n'), ((730, 754), 'copy.deepcopy', 'copy.deepcopy', (['area_list'], {}), '(area_list)\n', (743, 754), False, 'import copy\n'), ((421, 442), 'numpy.where', 'np.where', (['(output == i)'], {}), '(output == i)\n', (429, 442), True, 'import numpy as np\n')]
|
import bayesian_irl
import mdp_worlds
import utils
import mdp
import numpy as np
import scipy
import random
import generate_efficient_frontier
import matplotlib.pyplot as plt
def generate_reward_sample():
#rewards for no-op are gamma distributed
r_noop = []
locs = 1/2
scales = [20, 40, 80,190]
for i in range(4):
r_noop.append(-np.random.gamma(locs, scales[i], 1)[0])
r_noop = np.array(r_noop)
#rewards for repair are -N(100,1) for all but last state where it is -N(130,20)
r_repair = -100 + -1 * np.random.randn(4)
return np.concatenate((r_noop, r_repair))
def generate_posterior_samples(num_samples):
print("samples")
all_samples = []
for i in range(num_samples):
r_sample = generate_reward_sample()
all_samples.append(r_sample)
print("mean of posterior from samples")
print(np.mean(all_samples, axis=0))
posterior = np.array(all_samples)
return posterior.transpose() #each column is a reward sample
if __name__=="__main__":
seed = 1234
np.random.seed(seed)
scipy.random.seed(seed)
random.seed(seed)
num_states = 4
num_samples = 2000
gamma = 0.95
alpha = 0.99
lamda = 0.9
posterior = generate_posterior_samples(num_samples)
r_sa = np.mean(posterior, axis=1)
init_distribution = np.ones(num_states)/num_states #uniform distribution
mdp_env = mdp.MachineReplacementMDP(num_states, r_sa, gamma, init_distribution)
print("---MDP solution for expectation---")
print("mean MDP reward", r_sa)
u_sa = mdp.solve_mdp_lp(mdp_env, debug=True)
print("mean policy from posterior")
utils.print_stochastic_policy_action_probs(u_sa, mdp_env)
print("MAP/Mean policy from posterior")
utils.print_policy_from_occupancies(u_sa, mdp_env)
print("rewards")
print(mdp_env.r_sa)
print("expected value = ", np.dot(u_sa, r_sa))
stoch_pi = utils.get_optimal_policy_from_usa(u_sa, mdp_env)
print("expected return", mdp.get_policy_expected_return(stoch_pi, mdp_env))
print("values", mdp.get_state_values(u_sa, mdp_env))
print('q-values', mdp.get_q_values(u_sa, mdp_env))
#run CVaR optimization, maybe just the robust version for now
u_expert = np.zeros(mdp_env.num_actions * mdp_env.num_states)
# print("solving for CVaR optimal policy")
posterior_probs = np.ones(num_samples) / num_samples #uniform dist since samples from MCMC
#generate efficient frontier
lambda_range = [0.0, 0.3, 0.5, 0.75, 0.95,0.99, 1.0]
#generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior, posterior_probs, lambda_range, alpha, debug=False)
alpha = 0.99
print("calculating optimal policy for alpha = {} over lambda = {}".format(alpha, lambda_range))
cvar_rets = generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior, posterior_probs, lambda_range, alpha, debug=False)
cvar_rets_array = np.array(cvar_rets)
plt.figure()
plt.plot(cvar_rets_array[:,0], cvar_rets_array[:,1], '-o')
#go through and label the points in the figure with the corresponding lambda values
unique_pts_lambdas = []
unique_pts = []
for i,pt in enumerate(cvar_rets_array):
unique = True
for upt in unique_pts:
if np.linalg.norm(upt - pt) < 0.00001:
unique = False
break
if unique:
unique_pts_lambdas.append((pt[0], pt[1], lambda_range[i]))
unique_pts.append(np.array(pt))
#calculate offset
offsetx = (np.max(cvar_rets_array[:,0]) - np.min(cvar_rets_array[:,0]))/30
offsety = (np.max(cvar_rets_array[:,1]) - np.min(cvar_rets_array[:,1]))/17
for i,pt in enumerate(unique_pts_lambdas):
if i in [0,1,2,4]:
plt.text(pt[0] - 6.2*offsetx, pt[1] , r"$\lambda = {}$".format(str(pt[2])), fontsize=19, fontweight='bold')
elif i in [3]:
plt.text(pt[0] - 6.2*offsetx, pt[1] - 1.2*offsety , r"$\lambda = {}$".format(str(pt[2])), fontsize=19, fontweight='bold')
elif i in [5]:
plt.text(pt[0] - 5.5*offsetx, pt[1] - 1.5*offsety, r"$\lambda = {}$".format(str(pt[2])), fontsize=19, fontweight='bold')
else:
plt.text(pt[0]-offsetx, pt[1] - 1.5*offsety, r"$\lambda = {}$".format(str(pt[2])), fontsize=19, fontweight='bold')
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.xlabel("Robustness (CVaR)", fontsize=20)
plt.ylabel("Expected Return", fontsize=20)
plt.tight_layout()
plt.savefig('./figs/machine_replacement/efficient_frontier_machine_replacement.png')
plt.show()
|
[
"numpy.random.seed",
"numpy.ones",
"numpy.random.gamma",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.linalg.norm",
"mdp.get_policy_expected_return",
"mdp.MachineReplacementMDP",
"matplotlib.pyplot.tight_layout",
"numpy.random.randn",
"matplotlib.pyplot.yticks",
"numpy.max",
"random.seed",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"mdp.get_state_values",
"scipy.random.seed",
"generate_efficient_frontier.calc_frontier",
"numpy.min",
"utils.print_policy_from_occupancies",
"matplotlib.pyplot.ylabel",
"numpy.dot",
"numpy.concatenate",
"matplotlib.pyplot.plot",
"numpy.zeros",
"mdp.solve_mdp_lp",
"utils.print_stochastic_policy_action_probs",
"numpy.array",
"mdp.get_q_values",
"utils.get_optimal_policy_from_usa",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((418, 434), 'numpy.array', 'np.array', (['r_noop'], {}), '(r_noop)\n', (426, 434), True, 'import numpy as np\n'), ((582, 616), 'numpy.concatenate', 'np.concatenate', (['(r_noop, r_repair)'], {}), '((r_noop, r_repair))\n', (596, 616), True, 'import numpy as np\n'), ((924, 945), 'numpy.array', 'np.array', (['all_samples'], {}), '(all_samples)\n', (932, 945), True, 'import numpy as np\n'), ((1060, 1080), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1074, 1080), True, 'import numpy as np\n'), ((1085, 1108), 'scipy.random.seed', 'scipy.random.seed', (['seed'], {}), '(seed)\n', (1102, 1108), False, 'import scipy\n'), ((1113, 1130), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1124, 1130), False, 'import random\n'), ((1292, 1318), 'numpy.mean', 'np.mean', (['posterior'], {'axis': '(1)'}), '(posterior, axis=1)\n', (1299, 1318), True, 'import numpy as np\n'), ((1412, 1481), 'mdp.MachineReplacementMDP', 'mdp.MachineReplacementMDP', (['num_states', 'r_sa', 'gamma', 'init_distribution'], {}), '(num_states, r_sa, gamma, init_distribution)\n', (1437, 1481), False, 'import mdp\n'), ((1577, 1614), 'mdp.solve_mdp_lp', 'mdp.solve_mdp_lp', (['mdp_env'], {'debug': '(True)'}), '(mdp_env, debug=True)\n', (1593, 1614), False, 'import mdp\n'), ((1659, 1716), 'utils.print_stochastic_policy_action_probs', 'utils.print_stochastic_policy_action_probs', (['u_sa', 'mdp_env'], {}), '(u_sa, mdp_env)\n', (1701, 1716), False, 'import utils\n'), ((1765, 1815), 'utils.print_policy_from_occupancies', 'utils.print_policy_from_occupancies', (['u_sa', 'mdp_env'], {}), '(u_sa, mdp_env)\n', (1800, 1815), False, 'import utils\n'), ((1928, 1976), 'utils.get_optimal_policy_from_usa', 'utils.get_optimal_policy_from_usa', (['u_sa', 'mdp_env'], {}), '(u_sa, mdp_env)\n', (1961, 1976), False, 'import utils\n'), ((2259, 2309), 'numpy.zeros', 'np.zeros', (['(mdp_env.num_actions * mdp_env.num_states)'], {}), '(mdp_env.num_actions * mdp_env.num_states)\n', (2267, 2309), True, 'import numpy as np\n'), ((2820, 2946), 'generate_efficient_frontier.calc_frontier', 'generate_efficient_frontier.calc_frontier', (['mdp_env', 'u_expert', 'posterior', 'posterior_probs', 'lambda_range', 'alpha'], {'debug': '(False)'}), '(mdp_env, u_expert, posterior,\n posterior_probs, lambda_range, alpha, debug=False)\n', (2861, 2946), False, 'import generate_efficient_frontier\n'), ((2970, 2989), 'numpy.array', 'np.array', (['cvar_rets'], {}), '(cvar_rets)\n', (2978, 2989), True, 'import numpy as np\n'), ((2994, 3006), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3004, 3006), True, 'import matplotlib.pyplot as plt\n'), ((3011, 3071), 'matplotlib.pyplot.plot', 'plt.plot', (['cvar_rets_array[:, 0]', 'cvar_rets_array[:, 1]', '"""-o"""'], {}), "(cvar_rets_array[:, 0], cvar_rets_array[:, 1], '-o')\n", (3019, 3071), True, 'import matplotlib.pyplot as plt\n'), ((4381, 4404), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(18)'}), '(fontsize=18)\n', (4391, 4404), True, 'import matplotlib.pyplot as plt\n'), ((4410, 4433), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(18)'}), '(fontsize=18)\n', (4420, 4433), True, 'import matplotlib.pyplot as plt\n'), ((4439, 4483), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Robustness (CVaR)"""'], {'fontsize': '(20)'}), "('Robustness (CVaR)', fontsize=20)\n", (4449, 4483), True, 'import matplotlib.pyplot as plt\n'), ((4488, 4530), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Expected Return"""'], {'fontsize': '(20)'}), "('Expected Return', fontsize=20)\n", (4498, 4530), True, 'import matplotlib.pyplot as plt\n'), ((4540, 4558), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4556, 4558), True, 'import matplotlib.pyplot as plt\n'), ((4563, 4652), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./figs/machine_replacement/efficient_frontier_machine_replacement.png"""'], {}), "(\n './figs/machine_replacement/efficient_frontier_machine_replacement.png')\n", (4574, 4652), True, 'import matplotlib.pyplot as plt\n'), ((4653, 4663), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4661, 4663), True, 'import matplotlib.pyplot as plt\n'), ((876, 904), 'numpy.mean', 'np.mean', (['all_samples'], {'axis': '(0)'}), '(all_samples, axis=0)\n', (883, 904), True, 'import numpy as np\n'), ((1344, 1363), 'numpy.ones', 'np.ones', (['num_states'], {}), '(num_states)\n', (1351, 1363), True, 'import numpy as np\n'), ((1893, 1911), 'numpy.dot', 'np.dot', (['u_sa', 'r_sa'], {}), '(u_sa, r_sa)\n', (1899, 1911), True, 'import numpy as np\n'), ((2006, 2055), 'mdp.get_policy_expected_return', 'mdp.get_policy_expected_return', (['stoch_pi', 'mdp_env'], {}), '(stoch_pi, mdp_env)\n', (2036, 2055), False, 'import mdp\n'), ((2077, 2112), 'mdp.get_state_values', 'mdp.get_state_values', (['u_sa', 'mdp_env'], {}), '(u_sa, mdp_env)\n', (2097, 2112), False, 'import mdp\n'), ((2136, 2167), 'mdp.get_q_values', 'mdp.get_q_values', (['u_sa', 'mdp_env'], {}), '(u_sa, mdp_env)\n', (2152, 2167), False, 'import mdp\n'), ((2384, 2404), 'numpy.ones', 'np.ones', (['num_samples'], {}), '(num_samples)\n', (2391, 2404), True, 'import numpy as np\n'), ((551, 569), 'numpy.random.randn', 'np.random.randn', (['(4)'], {}), '(4)\n', (566, 569), True, 'import numpy as np\n'), ((3580, 3609), 'numpy.max', 'np.max', (['cvar_rets_array[:, 0]'], {}), '(cvar_rets_array[:, 0])\n', (3586, 3609), True, 'import numpy as np\n'), ((3611, 3640), 'numpy.min', 'np.min', (['cvar_rets_array[:, 0]'], {}), '(cvar_rets_array[:, 0])\n', (3617, 3640), True, 'import numpy as np\n'), ((3659, 3688), 'numpy.max', 'np.max', (['cvar_rets_array[:, 1]'], {}), '(cvar_rets_array[:, 1])\n', (3665, 3688), True, 'import numpy as np\n'), ((3690, 3719), 'numpy.min', 'np.min', (['cvar_rets_array[:, 1]'], {}), '(cvar_rets_array[:, 1])\n', (3696, 3719), True, 'import numpy as np\n'), ((3319, 3343), 'numpy.linalg.norm', 'np.linalg.norm', (['(upt - pt)'], {}), '(upt - pt)\n', (3333, 3343), True, 'import numpy as np\n'), ((3528, 3540), 'numpy.array', 'np.array', (['pt'], {}), '(pt)\n', (3536, 3540), True, 'import numpy as np\n'), ((365, 400), 'numpy.random.gamma', 'np.random.gamma', (['locs', 'scales[i]', '(1)'], {}), '(locs, scales[i], 1)\n', (380, 400), True, 'import numpy as np\n')]
|
from SentimentAnalysis.creat_data.config import tencent
import pandas as pd
import numpy as np
import requests
import json
import time
import random
import hashlib
from urllib import parse
from collections import OrderedDict
AppID = tencent['account']['id_1']['APP_ID']
AppKey = tencent['account']['id_1']['AppKey']
def cal_sign(params_raw,AppKey=AppKey):
# 官方文档例子为php,给出python版本
# params_raw = {'app_id': '10000',
# 'time_stamp': '1493449657',
# 'nonce_str': '20e3408a79',
# 'key1': '腾讯AI开放平台',
# 'key2': '示例仅供参考',
# 'sign': ''}
# AppKey = '<KEY>'
# cal_sign(params_raw=params_raw,
# AppKey=AppKey)
# 返回:BE918C28827E0783D1E5F8E6D7C37A61
params = OrderedDict()
for i in sorted(params_raw):
if params_raw[i] != '':
params[i] = params_raw[i]
newurl = parse.urlencode(params)
newurl += ('&app_key=' + AppKey)
sign = hashlib.md5(newurl.encode("latin1")).hexdigest().upper()
return sign
def creat_label(texts,
AppID=AppID,
AppKey=AppKey):
'''
:param texts: 需要打标签的文档列表
:param AppID: 腾讯ai账号信息,默认调用配置文件id_1
:param AppKey: 腾讯ai账号信息,默认调用配置文件id_1
:return: 打好标签的列表,包括原始文档、标签、置信水平、是否成功
'''
url = tencent['api']['nlp_textpolar']['url']
results = []
# 逐句调用接口判断
count_i=0
for one_text in texts:
params = {'app_id': AppID,
'time_stamp': int(time.time()),
'nonce_str': ''.join([random.choice('1234567890abcdefghijklmnopqrstuvwxyz') for i in range(10)]),
'sign': '',
'text': one_text}
params['sign'] = cal_sign(params_raw=params,
AppKey=AppKey) # 获取sign
r = requests.post(url=url,
params=params) # 获取分析结果
result = json.loads(r.text)
# print(result)
results.append([one_text,
result['data']['polar'],
result['data']['confd'],
result['ret'],
result['msg']
])
r.close()
count_i += 1
if count_i % 50 == 0:
print('tencent finish:%d' % (count_i))
return results
if __name__ == '__main__':
results = creat_label(texts=['价格便宜啦,比原来优惠多了',
'壁挂效果差,果然一分价钱一分货',
'东西一般般,诶呀',
'讨厌你',
'一般'])
results = pd.DataFrame(results, columns=['evaluation',
'label',
'confidence',
'ret',
'msg'])
results['label'] = np.where(results['label'] == 1, '正面',
np.where(results['label'] == 0, '中性', '负面'))
print(results)
|
[
"pandas.DataFrame",
"json.loads",
"urllib.parse.urlencode",
"random.choice",
"time.time",
"numpy.where",
"collections.OrderedDict",
"requests.post"
] |
[((776, 789), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (787, 789), False, 'from collections import OrderedDict\n'), ((906, 929), 'urllib.parse.urlencode', 'parse.urlencode', (['params'], {}), '(params)\n', (921, 929), False, 'from urllib import parse\n'), ((2557, 2643), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {'columns': "['evaluation', 'label', 'confidence', 'ret', 'msg']"}), "(results, columns=['evaluation', 'label', 'confidence', 'ret',\n 'msg'])\n", (2569, 2643), True, 'import pandas as pd\n'), ((1818, 1855), 'requests.post', 'requests.post', ([], {'url': 'url', 'params': 'params'}), '(url=url, params=params)\n', (1831, 1855), False, 'import requests\n'), ((1909, 1927), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (1919, 1927), False, 'import json\n'), ((2913, 2956), 'numpy.where', 'np.where', (["(results['label'] == 0)", '"""中性"""', '"""负面"""'], {}), "(results['label'] == 0, '中性', '负面')\n", (2921, 2956), True, 'import numpy as np\n'), ((1498, 1509), 'time.time', 'time.time', ([], {}), '()\n', (1507, 1509), False, 'import time\n'), ((1552, 1605), 'random.choice', 'random.choice', (['"""1234567890abcdefghijklmnopqrstuvwxyz"""'], {}), "('1234567890abcdefghijklmnopqrstuvwxyz')\n", (1565, 1605), False, 'import random\n')]
|
# -*- coding: utf-8 -*-
'''
#-------------------------------------------------------------------------------
# NATIONAL UNIVERSITY OF SINGAPORE - NUS
# SINGAPORE INSTITUTE FOR NEUROTECHNOLOGY - SINAPSE
# Singapore
# URL: http://www.sinapseinstitute.org
#-------------------------------------------------------------------------------
# Neuromorphic Engineering Group
# Author: <NAME>, PhD
# Contact:
#-------------------------------------------------------------------------------
# Description: defines classes for processing tactile data to be used for
# braille recognition.
# The 'Braille' class stores the SVM model used to recognize braille characters.
# this class abstracts the process of data processing, meaning that it only deals
# with the data ready for training and/or classification procedures.
# For handling data, the class 'BrailleHandler' should be used instead
#-------------------------------------------------------------------------------
'''
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#LIBRARIES
import os, os.path, sys
sys.path.append('../general')
import numpy as np
import scipy as sp
from sklearn.svm import SVC
from sklearn.externals import joblib
from dataprocessing import * #import the detect_peaks method
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#Feature extraction for SVM-based braille classification
class BrailleHandler():
#---------------------------------------------------------------------------
#read a file and return the data
def loadFile(filepath):
if os.path.isfile(filepath):
#return the data contained in the data
return np.loadtxt(filepath)
else:
return False #file not found
def convert2vector(data):
return np.transpose(data)
#convert the data from a file into a vector
def oldconvert2vector(data,nrows,ncols):
#first convert to 3D matrix
datamat = BrailleHandler.oldconvert2frames(data,nrows,ncols)
numsamples = np.size(datamat,2) #number of samples or frames
dataVector = np.zeros((nrows*ncols,numsamples))
taxelCounter = 0
for i in range(nrows):
for j in range(ncols):
dataVector[taxelCounter] = datamat[i,j,:]
taxelCounter+=1
return dataVector #return the dataVector
#convert data from the file that are arranged
#in a 2D array (every line contains reading from all rows for one column)
#into a 3D array (row,col,frame)
def oldconvert2frames(data,nrows,ncols):
datamat = np.zeros((nrows,ncols,np.int(np.floor(np.divide(np.size(data,0),nrows)))),dtype=int)
c = 0
for ii in range(0,(np.size(data,0)-nrows),nrows):
datamat[:,:,c] = data[ii:ii+nrows,:]
c = c+1
return datamat #return the 3D matrix
#---------------------------------------------------------------------------
#find the number of peaks in every single taxel
def countPeaks(inputMatrix,threshold):
if len(inputMatrix.shape) == 3: #3D matrix
nrows = inputMatrix.shape[0] #number of rows
ncols = inputMatrix.shape[1] #number of columns
nsamples = inputMatrix.shape[2] #number of samples
#feature vector containing the number of peaks for
#each taxel of the tactile sensor
featureVector = np.zeros(nrows*ncols)
#matrix M*NxT where each row corresponds to a taxel and the
#columns to the time series signal
tactileSignal = np.zeros((nrows*ncols,nsamples))
#counter for the index of the tactileSignal matrix
counter = 0
#loop through the rows
for k in range(nrows):
#loop through the columns
for w in range(ncols):
#get a single taxel signal
tactileSignal[counter] = inputMatrix[k,w,:]
#count the number of peaks in the signal
#and built the feature vector
#find the peaks
tmppeaks = detect_peaks(tactileSignal[counter],mph=threshold,mpd=20,show=False)
#number of peaks is the length of 'tmppeaks'
featureVector[counter] = len(tmppeaks)
#increment the counter
counter+=1
#list of list, every element of the list corresponds to
#the time series of a single taxel
else:
#find the total number of taxels in the tactile array
numberTaxels = len(inputMatrix)
#feature vector containing the number of peaks for
#each taxel of the tactile sensor
featureVector = np.zeros(numberTaxels)
#scan all the taxels
for k in range(numberTaxels):
#find the peaks
tmppeaks = detect_peaks(inputMatrix[k],mph=threshold,mpd=20,show=False)
#number of peaks is the length of 'tmppeaks'
featureVector[k] = len(tmppeaks)
#return the feature vector
return featureVector
#-------------------------------------------------------------------------------
#create the training data based on the list of the text files to be loaded
#and the labels corresponding for each text data
def createTrainingData(dataFiles,nrows,ncols,filt=False):
for k in range(len(dataFiles)):
#get the filename
filename = dataFiles[k]
#load the data
datafile = BrailleHandler.loadFile(filename)
#convert to vector
#datavector = BrailleHandler.oldconvert2vector(datafile,nrows,ncols)
datavector = BrailleHandler.convert2vector(datafile)
#if data should be filtered
if filt == True:
#for every taxel
for i in range(np.size(datavector,0)):
mva = MovingAverage() #window size = 10, sampfreq = 100 Hz
#for every sample, get the moving average response
for z in range(np.size(datavector,1)):
datavector[i,z] = mva.getSample(datavector[i,z])
#find the number of peaks
peakTh = 0.05 #threshold for peak detection
#create the feature vector
featurevector = BrailleHandler.countPeaks(datavector,peakTh)
#if it is the first iteration, create the training data
if k != 0:
trainingData = np.vstack((trainingData,featurevector))
else:
trainingData = featurevector
return trainingData
#-------------------------------------------------------------------------------
#Braille Recognition Class
class Braille():
def __init__(self):
#labels for every class
#dictionary to associate label names and values
self.classes = dict()
#SVM model
self.modelSVM = None
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
#load a pre-trained SVM model from a file
def load(self,filepath):
#checks if the file exists
if os.path.isfile(filepath):
self.modelSVM = joblib.load(filepath) #loads the SVM model
return True #load ok
else:
return False #file not found
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
#save a new SVM model
def save(self,filename):
#saving
joblib.dump(self.modelSVM,filename+'.pkl')
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
#train a SVM model
def train(self,trainingData,labels):
#create a new SVM model
self.modelSVM = SVC()
#pass the training data and the labels for training
self.modelSVM.fit(trainingData,labels)
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
#classification
#features should be a feature vector following the same pattern
#that was used for training
def classify(self,features):
#check if there is a SVM model to classify the data
if self.modelSVM is not None:
#classify based on the input features
svmResp = self.modelSVM.predict(features)
#return the output of the classifier
return svmResp
else:
return False
#---------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
if __name__=='__main__':
#---------------------------------------------------------------------------
import numpy as np #numpy
import matplotlib.pyplot as plt #matplotlib
NROWS = 4 #number of columns in the tactile array
NCOLS = 4 #number of lines in the tactile array
peakTh = 300 #threshold for detecting peaks
#load the braille data from file
#2D matrix
datafile = np.loadtxt('NewData_BRC/BRC_B1.txt')
#convert data to a 3D matrix
tactileData = BrailleHandler.oldconvert2frames(datafile,NROWS,NCOLS)
#feature vector containing the number of peaks for each taxel
features = BrailleHandler.countPeaks(tactileData,peakTh)
#---------------------------------------------------------------------------
#feature extraction with 2D array
#moving average of the 2D matrix
#create a moving average object
#default parameters, windowsize = 10, sampfreq = 100 Hz
mva = MovingAverage()
tactileVector = BrailleHandler.oldconvert2vector(datafile,NROWS,NCOLS)
numsamples = np.size(tactileData,2) #total number of samples
tactileMVA = np.zeros((NROWS*NCOLS,numsamples))
counter = 0 #taxel counter
for k in range(NROWS*NCOLS): #scan all the columns
for z in range(numsamples): #filtering the signal sample by sample
tactileMVA[counter,z] = mva.getSample(tactileVector[k,z])
counter+=1 #increment the taxel counter
#with the filtered data, count peaks again
filtFeatures = BrailleHandler.countPeaks(tactileMVA,peakTh)
#print the filtered feature vector
print(filtFeatures)
|
[
"sys.path.append",
"numpy.size",
"sklearn.externals.joblib.dump",
"numpy.zeros",
"numpy.transpose",
"os.path.isfile",
"sklearn.externals.joblib.load",
"numpy.loadtxt",
"sklearn.svm.SVC",
"numpy.vstack"
] |
[((1187, 1216), 'sys.path.append', 'sys.path.append', (['"""../general"""'], {}), "('../general')\n", (1202, 1216), False, 'import os, os.path, sys\n'), ((9841, 9877), 'numpy.loadtxt', 'np.loadtxt', (['"""NewData_BRC/BRC_B1.txt"""'], {}), "('NewData_BRC/BRC_B1.txt')\n", (9851, 9877), True, 'import numpy as np\n'), ((10493, 10516), 'numpy.size', 'np.size', (['tactileData', '(2)'], {}), '(tactileData, 2)\n', (10500, 10516), True, 'import numpy as np\n'), ((10559, 10596), 'numpy.zeros', 'np.zeros', (['(NROWS * NCOLS, numsamples)'], {}), '((NROWS * NCOLS, numsamples))\n', (10567, 10596), True, 'import numpy as np\n'), ((1794, 1818), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (1808, 1818), False, 'import os, os.path, sys\n'), ((2019, 2037), 'numpy.transpose', 'np.transpose', (['data'], {}), '(data)\n', (2031, 2037), True, 'import numpy as np\n'), ((2264, 2283), 'numpy.size', 'np.size', (['datamat', '(2)'], {}), '(datamat, 2)\n', (2271, 2283), True, 'import numpy as np\n'), ((2334, 2371), 'numpy.zeros', 'np.zeros', (['(nrows * ncols, numsamples)'], {}), '((nrows * ncols, numsamples))\n', (2342, 2371), True, 'import numpy as np\n'), ((7652, 7676), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (7666, 7676), False, 'import os, os.path, sys\n'), ((8088, 8133), 'sklearn.externals.joblib.dump', 'joblib.dump', (['self.modelSVM', "(filename + '.pkl')"], {}), "(self.modelSVM, filename + '.pkl')\n", (8099, 8133), False, 'from sklearn.externals import joblib\n'), ((8419, 8424), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (8422, 8424), False, 'from sklearn.svm import SVC\n'), ((1892, 1912), 'numpy.loadtxt', 'np.loadtxt', (['filepath'], {}), '(filepath)\n', (1902, 1912), True, 'import numpy as np\n'), ((3670, 3693), 'numpy.zeros', 'np.zeros', (['(nrows * ncols)'], {}), '(nrows * ncols)\n', (3678, 3693), True, 'import numpy as np\n'), ((3842, 3877), 'numpy.zeros', 'np.zeros', (['(nrows * ncols, nsamples)'], {}), '((nrows * ncols, nsamples))\n', (3850, 3877), True, 'import numpy as np\n'), ((5061, 5083), 'numpy.zeros', 'np.zeros', (['numberTaxels'], {}), '(numberTaxels)\n', (5069, 5083), True, 'import numpy as np\n'), ((7707, 7728), 'sklearn.externals.joblib.load', 'joblib.load', (['filepath'], {}), '(filepath)\n', (7718, 7728), False, 'from sklearn.externals import joblib\n'), ((2968, 2984), 'numpy.size', 'np.size', (['data', '(0)'], {}), '(data, 0)\n', (2975, 2984), True, 'import numpy as np\n'), ((6905, 6945), 'numpy.vstack', 'np.vstack', (['(trainingData, featurevector)'], {}), '((trainingData, featurevector))\n', (6914, 6945), True, 'import numpy as np\n'), ((6260, 6282), 'numpy.size', 'np.size', (['datavector', '(0)'], {}), '(datavector, 0)\n', (6267, 6282), True, 'import numpy as np\n'), ((6472, 6494), 'numpy.size', 'np.size', (['datavector', '(1)'], {}), '(datavector, 1)\n', (6479, 6494), True, 'import numpy as np\n'), ((2888, 2904), 'numpy.size', 'np.size', (['data', '(0)'], {}), '(data, 0)\n', (2895, 2904), True, 'import numpy as np\n')]
|
import pdb
import json
import numpy as np
file = 'benchmark_data.json'
with open(file, 'r') as f:
json_data = json.load(f)
print(json_data.keys()) # ['domains', 'version']
domains = json_data['domains']
print('domain length', len(domains))
corr_data = []
for domain in domains:
temp = {}
temp['long_description'] = domain['description']
temp['short_description'] = domain['name']
intents = domain['intents']
print('intent length', len(intents))
for intent in intents:
temp['intent'] = intent['name']
queries = intent['queries']
print('query length', len(queries))
for query in queries:
temp['query'] = query['text']
corr_data.append(temp)
print(len(corr_data))
corr_data = np.array(corr_data)
np.save('benchmark_data.npy', corr_data)
"""
(Pdb) json_data['domains'][3]['intents'][0].keys()
dict_keys(['description', 'benchmark', 'queries', 'slots', '@type', 'name'])
len(json_data['domains'][3]['intents'][0]['description'])
json_data['domains'][3]['intents'][0]['queries']
# length
(Pdb) json_data['domains'][3]['intents'][0]['queries'][0].keys()
dict_keys(['text', 'results_per_service'])
json_data['domains'][3]['intents'][0]['queries'][0]['text']
print(domains.keys()) # ['description', '@type', 'intents', 'name']
"Queries that are related to places (restaurants, shops, concert halls, etc), as well as to the user's location."
'Queries that are related to reservation.'
'Queries that are related to transit and navigation.'
'Queries that relate to weather.'
(Pdb) json_data['domains'][3]['name']
'weather'
(Pdb) json_data['domains'][2]['name']
'transit'
(Pdb) json_data['domains'][1]['name']
'reservation'
(Pdb) json_data['domains'][0]['name']
'places'
print(len(domains)) # 4
(Pdb) len(json_data['domains'][0]['intents'])
4
(Pdb) len(json_data['domains'][1]['intents'])
2
(Pdb) len(json_data['domains'][2]['intents'])
3
(Pdb) len(json_data['domains'][3]['intents'])
1
"""
|
[
"numpy.save",
"numpy.array",
"json.load"
] |
[((719, 738), 'numpy.array', 'np.array', (['corr_data'], {}), '(corr_data)\n', (727, 738), True, 'import numpy as np\n'), ((739, 779), 'numpy.save', 'np.save', (['"""benchmark_data.npy"""', 'corr_data'], {}), "('benchmark_data.npy', corr_data)\n", (746, 779), True, 'import numpy as np\n'), ((113, 125), 'json.load', 'json.load', (['f'], {}), '(f)\n', (122, 125), False, 'import json\n')]
|
'''
Created on Feb 24, 2015
@author: <NAME> <<EMAIL>>
This module provides functions and classes for probability distributions, which
build upon the scipy.stats package and extend it.
'''
from __future__ import division
import numpy as np
from scipy import stats, special, linalg, optimize
from ..data_structures.cache import cached_property
def lognorm_mean_var_to_mu_sigma(mean, variance, definition='scipy'):
""" determines the parameters of the log-normal distribution such that the
distribution yields a given mean and variance. The optional parameter
`definition` can be used to choose a definition of the resulting parameters
that is suitable for the given software package. """
mean2 = mean**2
mu = mean2/np.sqrt(mean2 + variance)
sigma = np.sqrt(np.log(1 + variance/mean2))
if definition == 'scipy':
return mu, sigma
elif definition == 'numpy':
return np.log(mu), sigma
else:
raise ValueError('Unknown definition `%s`' % definition)
def lognorm_mean(mean, sigma):
""" returns a lognormal distribution parameterized by its mean and a spread
parameter `sigma` """
if sigma == 0:
return DeterministicDistribution(mean)
else:
mu = mean * np.exp(-0.5 * sigma**2)
return stats.lognorm(scale=mu, s=sigma)
def lognorm_mean_var(mean, variance):
""" returns a lognormal distribution parameterized by its mean and its
variance. """
if variance == 0:
return DeterministicDistribution(mean)
else:
scale, sigma = lognorm_mean_var_to_mu_sigma(mean, variance, 'scipy')
return stats.lognorm(scale=scale, s=sigma)
def lognorm_sum_leastsq(count, var_norm, sim_terms=1e5, bins=64):
""" returns the parameters of a log-normal distribution that estimates the
sum of `count` log-normally distributed random variables with mean 1 and
variance `var_norm`. These parameters are determined by fitting the
probability density function to a histogram obtained by drawing `sim_terms`
random numbers """
sum_mean = count
sum_var = count * var_norm
# get random numbers
dist = lognorm_mean_var(1, var_norm)
vals = dist.rvs((int(sim_terms), count)).sum(axis=1)
# get the histogram
val_max = sum_mean + 3 * np.sqrt(sum_var)
bins = np.linspace(0, val_max, bins + 1)
xs = 0.5*(bins[:-1] + bins[1:])
density, _ = np.histogram(vals, bins=bins, range=[0, val_max],
density=True)
def pdf_diff(params):
""" evaluate the estimated pdf """
scale, sigma = params
return stats.lognorm.pdf(xs, scale=scale, s=sigma) - density
# do the least square fitting
params_init = lognorm_mean_var_to_mu_sigma(sum_mean, sum_var, 'scipy')
params, _ = optimize.leastsq(pdf_diff, params_init)
return params
def lognorm_sum(count, mean, variance, method='fenton'):
""" returns an estimate of the distribution of the sum of `count`
log-normally distributed variables with `mean` and `variance`. The returned
distribution is again log-normal with mean and variance determined from the
given parameters. Here, several methods can be used:
`fenton` - match the first two moments of the distribution
`leastsq` - minimize the error in the interval
"""
if method == 'fenton':
# use the moments directly
return lognorm_mean_var(count * mean, count * variance)
elif method == 'leastsq':
# determine the moments from fitting
var_norm = variance / mean**2
scale, sigma = lognorm_sum_leastsq(count, var_norm)
return stats.lognorm(scale=scale * mean, s=sigma)
else:
raise ValueError('Unknown method `%s` for determining the sum of '
'lognormal distributions. Accepted methods are '
'[`fenton`, `leastsq`].')
def gamma_mean_var(mean, variance):
""" returns a gamma distribution with given mean and variance """
alpha = mean**2 / variance
beta = variance / mean
return stats.gamma(scale=beta, a=alpha)
def loguniform_mean(mean, width):
""" returns a loguniform distribution parameterized by its mean and a spread
parameter `width`. The ratio between the maximal value and the minimal value
is given by width**2 """
if width == 1:
# treat special case separately
return DeterministicDistribution(mean)
else:
scale = mean * (2*width*np.log(width)) / (width**2 - 1)
return LogUniformDistribution(scale=scale, s=width)
def loguniform_mean_var(mean, var):
""" returns a loguniform distribution parameterized by its mean and
variance. Here, we need to solve a non-linear equation numerically, which
might degrade accuracy and performance of the result """
if var < 0:
raise ValueError('Variance must be positive')
elif var == 0:
# treat special case separately
return DeterministicDistribution(mean)
else:
# determine width parameter numerically
cv2 = var / mean**2 # match square coefficient of variation
def _rhs(q):
""" match the coefficient of variation """
return 0.5 * (q + 1) * np.log(q) / (q - 1) - 1 - cv2
width = optimize.newton(_rhs, 1.1)
return loguniform_mean(mean, np.sqrt(width))
def random_log_uniform(v_min, v_max, size):
""" returns random variables that a distributed uniformly in log space """
log_min, log_max = np.log(v_min), np.log(v_max)
res = np.random.uniform(log_min, log_max, size)
return np.exp(res)
def dist_skewness(dist):
""" returns the skewness of the distribution `dist` """
mean = dist.mean()
var = dist.var()
return (dist.moment(3) - 3*mean*var - mean**3) / var**(3/2)
class DeterministicDistribution_gen(stats.rv_continuous):
""" deterministic distribution that always returns a given value
Code copied from
https://docs.scipy.org/doc/scipy/reference/tutorial/stats.html#making-a-continuous-distribution-i-e-subclassing-rv-continuous
"""
def _cdf(self, x):
return np.where(x < 0, 0., 1.)
def _stats(self):
return 0., 0., 0., 0.
def _rvs(self):
return np.zeros(self._size)
DeterministicDistribution = DeterministicDistribution_gen(
name='DeterministicDistribution'
)
class LogUniformDistribution_gen(stats.rv_continuous):
"""
Log-uniform distribution.
"""
def freeze(self, *args, **kwds):
frozen = super(LogUniformDistribution_gen, self).freeze(*args, **kwds)
frozen.support = self.support(*args, **kwds)
return frozen
def support(self, *args, **kwds):
""" return the interval in which the PDF of the distribution is
non-zero """
extra_args, _, _, _ = self._parse_args_stats(*args, **kwds)
mean = self.mean(*args, **kwds)
scale = extra_args[0]
width = mean * (2*scale*np.log(scale)) / (scale**2 - 1)
return (width / scale, width * scale)
def _rvs(self, s):
""" random variates """
# choose the receptor response characteristics
return random_log_uniform(1/s, s, self._size)
def _pdf(self, x, s):
""" probability density function """
s = s[0] # reset broadcasting
res = np.zeros_like(x)
idx = (1 < x*s) & (x < s)
res[idx] = 1/(x[idx] * np.log(s*s))
return res
def _cdf(self, x, s):
""" cumulative probability function """
s = s[0] # reset broadcasting
res = np.zeros_like(x)
idx = (1 < x*s) & (x < s)
log_s = np.log(s)
res[idx] = (log_s + np.log(x[idx]))/(2 * log_s)
res[x > s] = 1
return res
def _ppf(self, q, s):
""" percent point function (inverse of cdf) """
s = s[0] # reset broadcasting
res = np.zeros_like(q)
idx = (q > 0)
res[idx] = s**(2*q[idx] - 1)
return res
def _stats(self, s):
""" calculates statistics of the distribution """
mean = (s**2 - 1)/(2*s*np.log(s))
var = ((s**4 - 1) * np.log(s) - (s**2 - 1)**2) \
/ (4 * s**2 * np.log(s)**2)
return mean, var, None, None
LogUniformDistribution = LogUniformDistribution_gen(
a=0, name='LogUniformDistribution'
)
class HypoExponentialDistribution(object):
"""
Hypoexponential distribution.
Unfortunately, the framework supplied by scipy.stats.rv_continuous does not
support a variable number of parameters and we thus only mimic its
interface here.
"""
def __init__(self, rates, method='sum'):
""" initializes the hypoexponential distribution.
`rates` are the rates of the underlying exponential processes
`method` determines what method is used for calculating the cdf and can
be either `sum` or `eigen`
"""
if method in {'sum', 'eigen'}:
self.method = method
# prepare the rates of the system
self.rates = np.asarray(rates)
self.alpha = 1 / self.rates
if np.any(rates <= 0):
raise ValueError('All rates must be positive')
if len(np.unique(self.alpha)) != len(self.alpha):
raise ValueError('The current implementation only supports cases '
'where all rates are different from each other.')
# calculate terms that we need later
with np.errstate(divide='ignore'):
mat = self.alpha[:, None] \
/ (self.alpha[:, None] - self.alpha[None, :])
mat[(self.alpha[:, None] - self.alpha[None, :]) == 0] = 1
self._terms = np.prod(mat, 1)
def rvs(self, size):
""" random variates """
# choose the receptor response characteristics
return sum(np.random.exponential(scale=alpha, size=size)
for alpha in self.alpha)
def mean(self):
""" mean of the distribution """
return self.alpha.sum()
def variance(self):
""" variance of the distribution """
return (2 * np.sum(self.alpha**2 * self._terms) -
(self.alpha.sum())**2)
def pdf(self, x):
""" probability density function """
if not np.isscalar(x):
x = np.asarray(x)
res = np.zeros_like(x)
nz = (x > 0)
if np.any(nz):
if self.method == 'sum':
factor = np.exp(-x[nz, None] * self.rates[..., :]) \
/ self.rates[..., :]
res[nz] = np.sum(self._terms[..., :] * factor, axis=1)
else:
Theta = (np.diag(-self.rates, 0) +
np.diag(self.rates[:-1], 1))
for i in np.flatnonzero(nz):
res.flat[i] = \
1 - linalg.expm(x.flat[i]*Theta)[0, :].sum()
elif x == 0:
res = 0
else:
if self.method == 'sum':
factor = np.exp(-x*self.rates)/self.ratesx
res[nz] = np.sum(self._terms * factor)
else:
Theta = np.diag(-self.rates, 0) + np.diag(self.rates[:-1], 1)
res = 1 - linalg.expm(x*Theta)[0, :].sum()
return res
def cdf(self, x):
""" cumulative density function """
if not np.isscalar(x):
x = np.asarray(x)
res = np.zeros_like(x)
nz = (x > 0)
if np.any(nz):
factor = np.exp(-x[nz, None]*self.rates[..., :])
res = 1 - np.sum(self._terms[..., :] * factor, axis=1)
elif x == 0:
res = 0
else:
factor = np.exp(-x*self.rates)
res = 1 - np.sum(self._terms * factor)
return res
# ==============================================================================
# OLD DISTRIBUTIONS THAT MIGHT NOT BE NEEDED ANYMORE
# ==============================================================================
class PartialLogNormDistribution_gen(stats.rv_continuous):
"""
partial log-normal distribution.
a fraction `frac` of the distribution follows a log-normal distribution,
while the remaining fraction `1 - frac` is zero
Similar to the lognorm distribution, this does not support any location
parameter
"""
def _rvs(self, s, frac):
""" random variates """
# choose the items response characteristics
res = np.exp(s * np.random.standard_normal(self._size))
if frac != 1:
# switch off items randomly
res[np.random.random(self._size) > frac] = 0
return res
def _pdf(self, x, s, frac):
""" probability density function """
s, frac = s[0], frac[0] # reset broadcasting
return frac / (s*x*np.sqrt(2*np.pi)) * np.exp(-1/2*(np.log(x)/s)**2)
def _cdf(self, x, s, frac):
""" cumulative probability function """
s, frac = s[0], frac[0] # reset broadcasting
return 1 + frac*(-0.5 + 0.5*special.erf(np.log(x)/(s*np.sqrt(2))))
def _ppf(self, q, s, frac):
""" percent point function (inverse of cdf) """
s, frac = s[0], frac[0] # reset broadcasting
q_scale = (q - (1 - frac)) / frac
res = np.zeros_like(q)
idx = (q_scale > 0)
res[idx] = np.exp(s * special.ndtri(q_scale[idx]))
return res
PartialLogNormDistribution = PartialLogNormDistribution_gen(
a=0, name='PartialLogNormDistribution'
)
class PartialLogUniformDistribution_gen(stats.rv_continuous):
"""
partial log-uniform distribution.
a fraction `frac` of the distribution follows a log-uniform distribution,
while the remaining fraction `1 - frac` is zero
"""
def _rvs(self, s, frac):
""" random variates """
# choose the receptor response characteristics
res = random_log_uniform(1/s, s, self._size)
# switch off receptors randomly
if frac != 1:
res[np.random.random(self._size) > frac] = 0
return res
def _pdf(self, x, s, frac):
""" probability density function """
s, frac = s[0], frac[0] # reset broadcasting
res = np.zeros_like(x)
idx = (1 < x*s) & (x < s)
res[idx] = frac/(x[idx] * np.log(s*s))
return res
def _cdf(self, x, s, frac):
""" cumulative probability function """
s, frac = s[0], frac[0] # reset broadcasting
res = np.zeros_like(x)
idx = (1 < x*s) & (x < s)
log_s = np.log(s)
res[idx] = (log_s + np.log(x[idx]))/(2 * log_s)
res[x > s] = 1
return (1 - frac) + frac*res
def _ppf(self, q, s, frac):
""" percent point function (inverse of cdf) """
s, frac = s[0], frac[0] # reset broadcasting
q_scale = (q - (1 - frac)) / frac
res = np.zeros_like(q)
idx = (q_scale > 0)
res[idx] = s**(2*q_scale[idx] - 1)
return res
PartialLogUniformDistribution = PartialLogUniformDistribution_gen(
a=0, name='PartialLogUniformDistribution'
)
NORMAL_DISTRIBUTION_NORMALIZATION = 1/np.sqrt(2*np.pi)
class NormalDistribution(object):
""" class representing normal distributions """
def __init__(self, mean, var, count=None):
""" normal distributions are described by their mean and variance.
Additionally, count denotes how many observations were used to
estimate the parameters. All values can also be numpy arrays to
represent many distributions efficiently """
self.mean = mean
self.var = var
self.count = count
def copy(self):
return self.__class__(self.mean, self.var, self.count)
@cached_property()
def std(self):
""" return standard deviation """
return np.sqrt(self.var)
def pdf(self, value, mask=None):
""" return probability density function at value """
if mask is None:
mean = self.mean
var = self.var
std = self.std
else:
mean = self.mean[mask]
var = self.var[mask]
std = self.std[mask]
return NORMAL_DISTRIBUTION_NORMALIZATION/std \
* np.exp(-0.5*(value - mean)**2 / var)
def add_observation(self, value):
""" add an observed value and adjust mean and variance of the
distribution. This returns a new distribution and only works if
count was set """
if self.count is None:
return self.copy()
else:
M2 = self.var*(self.count - 1)
count = self.count + 1
delta = value - self.mean
mean = self.mean + delta/count
M2 = M2 + delta*(value - mean)
return NormalDistribution(mean, M2/(count - 1), count)
def distance(self, other, kind='kullback-leibler'):
""" return the distance between two normal distributions """
if kind == 'kullback-leibler':
dist = 0.5*(np.log(other.var/self.var) +
(self.var + (self.mean - self.mean)**2)/other.var - 1)
elif kind == 'bhattacharyya':
var_ratio = self.var/other.var
term1 = np.log(0.25*(var_ratio + 1/var_ratio + 2))
term2 = (self.mean - other.mean)**2/(self.var + other.var)
dist = 0.25*(term1 + term2)
elif kind == 'hellinger':
dist_b = self.distance(other, kind='bhattacharyya')
dist = np.sqrt(1 - np.exp(-dist_b))
else:
raise ValueError('Unknown distance `%s`' % kind)
return dist
def welch_test(self, other):
""" performs Welch's t-test of two normal distributions """
# calculate the degrees of freedom
s1, s2 = self.var/self.count, other.var/other.count
nu1, nu2 = self.count - 1, other.count - 1
dof = (s1 + s2)**2/(s1**2/nu1 + s2**2/nu2)
# calculate the Welch t-value
t = (self.mean - other.mean)/np.sqrt(s1 + s2)
# calculate the probability using the Student's T distribution
prob = stats.t.sf(np.abs(t), dof) * 2
return prob
def overlap(self, other, common_variance=True):
""" estimates the amount of overlap between two distributions """
if common_variance:
if self.count is None:
if other.count is None: # neither is sampled
S = np.sqrt(0.5*(self.var + other.var))
else: # other is sampled
S = self.std
else:
if other.count is None: # self is sampled
S = other.std
else: # both are sampled
expr = ((self.count - 1)*self.var +
(other.count - 1)*other.var)
S = np.sqrt(expr/(self.count + other.count - 2))
delta = np.abs(self.mean - other.mean)/S
return 2*stats.norm.cdf(-0.5*delta)
else:
# here, we would have to integrate numerically
raise NotImplementedError
|
[
"numpy.sum",
"numpy.abs",
"scipy.special.ndtri",
"numpy.random.exponential",
"scipy.optimize.leastsq",
"numpy.histogram",
"scipy.optimize.newton",
"numpy.exp",
"numpy.diag",
"numpy.prod",
"numpy.unique",
"numpy.zeros_like",
"scipy.stats.lognorm.pdf",
"scipy.stats.norm.cdf",
"scipy.stats.gamma",
"numpy.linspace",
"numpy.asarray",
"numpy.random.standard_normal",
"numpy.random.uniform",
"scipy.linalg.expm",
"numpy.log",
"numpy.isscalar",
"numpy.flatnonzero",
"scipy.stats.lognorm",
"numpy.zeros",
"numpy.errstate",
"numpy.any",
"numpy.where",
"numpy.random.random",
"numpy.sqrt"
] |
[((2333, 2366), 'numpy.linspace', 'np.linspace', (['(0)', 'val_max', '(bins + 1)'], {}), '(0, val_max, bins + 1)\n', (2344, 2366), True, 'import numpy as np\n'), ((2420, 2483), 'numpy.histogram', 'np.histogram', (['vals'], {'bins': 'bins', 'range': '[0, val_max]', 'density': '(True)'}), '(vals, bins=bins, range=[0, val_max], density=True)\n', (2432, 2483), True, 'import numpy as np\n'), ((2821, 2860), 'scipy.optimize.leastsq', 'optimize.leastsq', (['pdf_diff', 'params_init'], {}), '(pdf_diff, params_init)\n', (2837, 2860), False, 'from scipy import stats, special, linalg, optimize\n'), ((4129, 4161), 'scipy.stats.gamma', 'stats.gamma', ([], {'scale': 'beta', 'a': 'alpha'}), '(scale=beta, a=alpha)\n', (4140, 4161), False, 'from scipy import stats, special, linalg, optimize\n'), ((5626, 5667), 'numpy.random.uniform', 'np.random.uniform', (['log_min', 'log_max', 'size'], {}), '(log_min, log_max, size)\n', (5643, 5667), True, 'import numpy as np\n'), ((5679, 5690), 'numpy.exp', 'np.exp', (['res'], {}), '(res)\n', (5685, 5690), True, 'import numpy as np\n'), ((15535, 15553), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (15542, 15553), True, 'import numpy as np\n'), ((748, 773), 'numpy.sqrt', 'np.sqrt', (['(mean2 + variance)'], {}), '(mean2 + variance)\n', (755, 773), True, 'import numpy as np\n'), ((794, 822), 'numpy.log', 'np.log', (['(1 + variance / mean2)'], {}), '(1 + variance / mean2)\n', (800, 822), True, 'import numpy as np\n'), ((1292, 1324), 'scipy.stats.lognorm', 'stats.lognorm', ([], {'scale': 'mu', 's': 'sigma'}), '(scale=mu, s=sigma)\n', (1305, 1324), False, 'from scipy import stats, special, linalg, optimize\n'), ((1630, 1665), 'scipy.stats.lognorm', 'stats.lognorm', ([], {'scale': 'scale', 's': 'sigma'}), '(scale=scale, s=sigma)\n', (1643, 1665), False, 'from scipy import stats, special, linalg, optimize\n'), ((5587, 5600), 'numpy.log', 'np.log', (['v_min'], {}), '(v_min)\n', (5593, 5600), True, 'import numpy as np\n'), ((5602, 5615), 'numpy.log', 'np.log', (['v_max'], {}), '(v_max)\n', (5608, 5615), True, 'import numpy as np\n'), ((6214, 6239), 'numpy.where', 'np.where', (['(x < 0)', '(0.0)', '(1.0)'], {}), '(x < 0, 0.0, 1.0)\n', (6222, 6239), True, 'import numpy as np\n'), ((6335, 6355), 'numpy.zeros', 'np.zeros', (['self._size'], {}), '(self._size)\n', (6343, 6355), True, 'import numpy as np\n'), ((7451, 7467), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (7464, 7467), True, 'import numpy as np\n'), ((7711, 7727), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (7724, 7727), True, 'import numpy as np\n'), ((7778, 7787), 'numpy.log', 'np.log', (['s'], {}), '(s)\n', (7784, 7787), True, 'import numpy as np\n'), ((8032, 8048), 'numpy.zeros_like', 'np.zeros_like', (['q'], {}), '(q)\n', (8045, 8048), True, 'import numpy as np\n'), ((9229, 9246), 'numpy.asarray', 'np.asarray', (['rates'], {}), '(rates)\n', (9239, 9246), True, 'import numpy as np\n'), ((9294, 9312), 'numpy.any', 'np.any', (['(rates <= 0)'], {}), '(rates <= 0)\n', (9300, 9312), True, 'import numpy as np\n'), ((9880, 9895), 'numpy.prod', 'np.prod', (['mat', '(1)'], {}), '(mat, 1)\n', (9887, 9895), True, 'import numpy as np\n'), ((13622, 13638), 'numpy.zeros_like', 'np.zeros_like', (['q'], {}), '(q)\n', (13635, 13638), True, 'import numpy as np\n'), ((14571, 14587), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (14584, 14587), True, 'import numpy as np\n'), ((14864, 14880), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (14877, 14880), True, 'import numpy as np\n'), ((14931, 14940), 'numpy.log', 'np.log', (['s'], {}), '(s)\n', (14937, 14940), True, 'import numpy as np\n'), ((15266, 15282), 'numpy.zeros_like', 'np.zeros_like', (['q'], {}), '(q)\n', (15279, 15282), True, 'import numpy as np\n'), ((16258, 16275), 'numpy.sqrt', 'np.sqrt', (['self.var'], {}), '(self.var)\n', (16265, 16275), True, 'import numpy as np\n'), ((1253, 1278), 'numpy.exp', 'np.exp', (['(-0.5 * sigma ** 2)'], {}), '(-0.5 * sigma ** 2)\n', (1259, 1278), True, 'import numpy as np\n'), ((2305, 2321), 'numpy.sqrt', 'np.sqrt', (['sum_var'], {}), '(sum_var)\n', (2312, 2321), True, 'import numpy as np\n'), ((2633, 2676), 'scipy.stats.lognorm.pdf', 'stats.lognorm.pdf', (['xs'], {'scale': 'scale', 's': 'sigma'}), '(xs, scale=scale, s=sigma)\n', (2650, 2676), False, 'from scipy import stats, special, linalg, optimize\n'), ((3685, 3727), 'scipy.stats.lognorm', 'stats.lognorm', ([], {'scale': '(scale * mean)', 's': 'sigma'}), '(scale=scale * mean, s=sigma)\n', (3698, 3727), False, 'from scipy import stats, special, linalg, optimize\n'), ((5358, 5384), 'scipy.optimize.newton', 'optimize.newton', (['_rhs', '(1.1)'], {}), '(_rhs, 1.1)\n', (5373, 5384), False, 'from scipy import stats, special, linalg, optimize\n'), ((9656, 9684), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (9667, 9684), True, 'import numpy as np\n'), ((10495, 10509), 'numpy.isscalar', 'np.isscalar', (['x'], {}), '(x)\n', (10506, 10509), True, 'import numpy as np\n'), ((10527, 10540), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (10537, 10540), True, 'import numpy as np\n'), ((10559, 10575), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (10572, 10575), True, 'import numpy as np\n'), ((10616, 10626), 'numpy.any', 'np.any', (['nz'], {}), '(nz)\n', (10622, 10626), True, 'import numpy as np\n'), ((11641, 11655), 'numpy.isscalar', 'np.isscalar', (['x'], {}), '(x)\n', (11652, 11655), True, 'import numpy as np\n'), ((11673, 11686), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (11683, 11686), True, 'import numpy as np\n'), ((11705, 11721), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (11718, 11721), True, 'import numpy as np\n'), ((11762, 11772), 'numpy.any', 'np.any', (['nz'], {}), '(nz)\n', (11768, 11772), True, 'import numpy as np\n'), ((16693, 16733), 'numpy.exp', 'np.exp', (['(-0.5 * (value - mean) ** 2 / var)'], {}), '(-0.5 * (value - mean) ** 2 / var)\n', (16699, 16733), True, 'import numpy as np\n'), ((18583, 18599), 'numpy.sqrt', 'np.sqrt', (['(s1 + s2)'], {}), '(s1 + s2)\n', (18590, 18599), True, 'import numpy as np\n'), ((924, 934), 'numpy.log', 'np.log', (['mu'], {}), '(mu)\n', (930, 934), True, 'import numpy as np\n'), ((5422, 5436), 'numpy.sqrt', 'np.sqrt', (['width'], {}), '(width)\n', (5429, 5436), True, 'import numpy as np\n'), ((7533, 7546), 'numpy.log', 'np.log', (['(s * s)'], {}), '(s * s)\n', (7539, 7546), True, 'import numpy as np\n'), ((7816, 7830), 'numpy.log', 'np.log', (['x[idx]'], {}), '(x[idx])\n', (7822, 7830), True, 'import numpy as np\n'), ((8251, 8260), 'numpy.log', 'np.log', (['s'], {}), '(s)\n', (8257, 8260), True, 'import numpy as np\n'), ((9388, 9409), 'numpy.unique', 'np.unique', (['self.alpha'], {}), '(self.alpha)\n', (9397, 9409), True, 'import numpy as np\n'), ((10041, 10086), 'numpy.random.exponential', 'np.random.exponential', ([], {'scale': 'alpha', 'size': 'size'}), '(scale=alpha, size=size)\n', (10062, 10086), True, 'import numpy as np\n'), ((10329, 10366), 'numpy.sum', 'np.sum', (['(self.alpha ** 2 * self._terms)'], {}), '(self.alpha ** 2 * self._terms)\n', (10335, 10366), True, 'import numpy as np\n'), ((11799, 11840), 'numpy.exp', 'np.exp', (['(-x[nz, None] * self.rates[..., :])'], {}), '(-x[nz, None] * self.rates[..., :])\n', (11805, 11840), True, 'import numpy as np\n'), ((11986, 12009), 'numpy.exp', 'np.exp', (['(-x * self.rates)'], {}), '(-x * self.rates)\n', (11992, 12009), True, 'import numpy as np\n'), ((12790, 12827), 'numpy.random.standard_normal', 'np.random.standard_normal', (['self._size'], {}), '(self._size)\n', (12815, 12827), True, 'import numpy as np\n'), ((13697, 13724), 'scipy.special.ndtri', 'special.ndtri', (['q_scale[idx]'], {}), '(q_scale[idx])\n', (13710, 13724), False, 'from scipy import stats, special, linalg, optimize\n'), ((14656, 14669), 'numpy.log', 'np.log', (['(s * s)'], {}), '(s * s)\n', (14662, 14669), True, 'import numpy as np\n'), ((14969, 14983), 'numpy.log', 'np.log', (['x[idx]'], {}), '(x[idx])\n', (14975, 14983), True, 'import numpy as np\n'), ((17761, 17807), 'numpy.log', 'np.log', (['(0.25 * (var_ratio + 1 / var_ratio + 2))'], {}), '(0.25 * (var_ratio + 1 / var_ratio + 2))\n', (17767, 17807), True, 'import numpy as np\n'), ((18707, 18716), 'numpy.abs', 'np.abs', (['t'], {}), '(t)\n', (18713, 18716), True, 'import numpy as np\n'), ((19500, 19530), 'numpy.abs', 'np.abs', (['(self.mean - other.mean)'], {}), '(self.mean - other.mean)\n', (19506, 19530), True, 'import numpy as np\n'), ((19554, 19582), 'scipy.stats.norm.cdf', 'stats.norm.cdf', (['(-0.5 * delta)'], {}), '(-0.5 * delta)\n', (19568, 19582), False, 'from scipy import stats, special, linalg, optimize\n'), ((4538, 4551), 'numpy.log', 'np.log', (['width'], {}), '(width)\n', (4544, 4551), True, 'import numpy as np\n'), ((7069, 7082), 'numpy.log', 'np.log', (['scale'], {}), '(scale)\n', (7075, 7082), True, 'import numpy as np\n'), ((8290, 8299), 'numpy.log', 'np.log', (['s'], {}), '(s)\n', (8296, 8299), True, 'import numpy as np\n'), ((8349, 8358), 'numpy.log', 'np.log', (['s'], {}), '(s)\n', (8355, 8358), True, 'import numpy as np\n'), ((10825, 10869), 'numpy.sum', 'np.sum', (['(self._terms[..., :] * factor)'], {'axis': '(1)'}), '(self._terms[..., :] * factor, axis=1)\n', (10831, 10869), True, 'import numpy as np\n'), ((11035, 11053), 'numpy.flatnonzero', 'np.flatnonzero', (['nz'], {}), '(nz)\n', (11049, 11053), True, 'import numpy as np\n'), ((11351, 11379), 'numpy.sum', 'np.sum', (['(self._terms * factor)'], {}), '(self._terms * factor)\n', (11357, 11379), True, 'import numpy as np\n'), ((11865, 11909), 'numpy.sum', 'np.sum', (['(self._terms[..., :] * factor)'], {'axis': '(1)'}), '(self._terms[..., :] * factor, axis=1)\n', (11871, 11909), True, 'import numpy as np\n'), ((12030, 12058), 'numpy.sum', 'np.sum', (['(self._terms * factor)'], {}), '(self._terms * factor)\n', (12036, 12058), True, 'import numpy as np\n'), ((12907, 12935), 'numpy.random.random', 'np.random.random', (['self._size'], {}), '(self._size)\n', (12923, 12935), True, 'import numpy as np\n'), ((13135, 13153), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (13142, 13153), True, 'import numpy as np\n'), ((14356, 14384), 'numpy.random.random', 'np.random.random', (['self._size'], {}), '(self._size)\n', (14372, 14384), True, 'import numpy as np\n'), ((19032, 19069), 'numpy.sqrt', 'np.sqrt', (['(0.5 * (self.var + other.var))'], {}), '(0.5 * (self.var + other.var))\n', (19039, 19069), True, 'import numpy as np\n'), ((19434, 19480), 'numpy.sqrt', 'np.sqrt', (['(expr / (self.count + other.count - 2))'], {}), '(expr / (self.count + other.count - 2))\n', (19441, 19480), True, 'import numpy as np\n'), ((10698, 10739), 'numpy.exp', 'np.exp', (['(-x[nz, None] * self.rates[..., :])'], {}), '(-x[nz, None] * self.rates[..., :])\n', (10704, 10739), True, 'import numpy as np\n'), ((10921, 10944), 'numpy.diag', 'np.diag', (['(-self.rates)', '(0)'], {}), '(-self.rates, 0)\n', (10928, 10944), True, 'import numpy as np\n'), ((10977, 11004), 'numpy.diag', 'np.diag', (['self.rates[:-1]', '(1)'], {}), '(self.rates[:-1], 1)\n', (10984, 11004), True, 'import numpy as np\n'), ((11291, 11314), 'numpy.exp', 'np.exp', (['(-x * self.rates)'], {}), '(-x * self.rates)\n', (11297, 11314), True, 'import numpy as np\n'), ((11422, 11445), 'numpy.diag', 'np.diag', (['(-self.rates)', '(0)'], {}), '(-self.rates, 0)\n', (11429, 11445), True, 'import numpy as np\n'), ((11448, 11475), 'numpy.diag', 'np.diag', (['self.rates[:-1]', '(1)'], {}), '(self.rates[:-1], 1)\n', (11455, 11475), True, 'import numpy as np\n'), ((17537, 17565), 'numpy.log', 'np.log', (['(other.var / self.var)'], {}), '(other.var / self.var)\n', (17543, 17565), True, 'import numpy as np\n'), ((13168, 13177), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (13174, 13177), True, 'import numpy as np\n'), ((18057, 18072), 'numpy.exp', 'np.exp', (['(-dist_b)'], {}), '(-dist_b)\n', (18063, 18072), True, 'import numpy as np\n'), ((5303, 5312), 'numpy.log', 'np.log', (['q'], {}), '(q)\n', (5309, 5312), True, 'import numpy as np\n'), ((13395, 13404), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (13401, 13404), True, 'import numpy as np\n'), ((11502, 11524), 'scipy.linalg.expm', 'linalg.expm', (['(x * Theta)'], {}), '(x * Theta)\n', (11513, 11524), False, 'from scipy import stats, special, linalg, optimize\n'), ((13408, 13418), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (13415, 13418), True, 'import numpy as np\n'), ((11131, 11161), 'scipy.linalg.expm', 'linalg.expm', (['(x.flat[i] * Theta)'], {}), '(x.flat[i] * Theta)\n', (11142, 11161), False, 'from scipy import stats, special, linalg, optimize\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import torch
from torch.utils.data import Dataset
from sklearn.model_selection import train_test_split
# Define the class for the Meta-material dataset
class MetaMaterialDataSet(Dataset):
""" The Meta Material Dataset Class """
def __init__(self, ftr, lbl, bool_train):
"""
Instantiate the Dataset Object
:param ftr: the features which is always the Geometry !!
:param lbl: the labels, which is always the Spectra !!
:param bool_train:
"""
self.ftr = ftr
self.lbl = lbl
self.bool_train = bool_train
self.len = len(ftr)
def __len__(self):
return self.len
def __getitem__(self, ind):
return self.ftr[ind, :], self.lbl[ind, :]
## Copied from Omar's code
# Make geometry samples
def MM_Geom(n):
# Parameter bounds for metamaterial radius and height
r_min = 20
r_max = 200
h_min = 20
h_max = 100
# Defines hypergeometric space of parameters to choose from
space = 10
r_space = np.linspace(r_min, r_max, space + 1)
h_space = np.linspace(h_min, h_max, space + 1)
# Shuffles r,h arrays each iteration and then selects 0th element to generate random n x n parameter set
r, h = np.zeros(n, dtype=float), np.zeros(n, dtype=float)
for i in range(n):
np.random.shuffle(r_space)
np.random.shuffle(h_space)
r[i] = r_space[0]
h[i] = h_space[0]
return r, h
# Make geometry and spectra
def Make_MM_Model(n):
r, h = MM_Geom(n)
spectra = np.zeros(300)
geom = np.concatenate((r, h), axis=0)
for i in range(n):
w0 = 100 / h[i]
wp = (1 / 100) * np.sqrt(np.pi) * r[i]
g = (1 / 1000) * np.sqrt(np.pi) * r[i]
w, e2 = Lorentzian(w0, wp, g)
spectra += e2
return geom, spectra
# Calculate Lorentzian function to get spectra
def Lorentzian(w0, wp, g):
freq_low = 0
freq_high = 5
num_freq = 300
w = np.arange(freq_low, freq_high, (freq_high - freq_low) / num_freq)
# e1 = np.divide(np.multiply(np.power(wp, 2), np.add(np.power(w0, 2), -np.power(w, 2))),
# np.add(np.power(np.add(np.power(w0, 2), -np.power(w, 2)), 2),
# np.multiply(np.power(w, 2), np.power(g, 2))))
e2 = np.divide(np.multiply(np.power(wp, 2), np.multiply(w, g)),
np.add(np.power(np.add(np.power(w0, 2), -np.power(w, 2)), 2),
np.multiply(np.power(w, 2), np.power(g, 2))))
return w, e2
# Generates randomized dataset of simulated spectra for training and testing
def Prepare_Data(osc, sets, batch_size):
features = []
labels = []
for i in range(sets):
geom, spectra = Make_MM_Model(osc)
features.append(geom)
labels.append(spectra)
features = np.array(features, dtype='float32')
labels = np.array(labels, dtype='float32')
ftrsize = features.size / sets
lblsize = labels.size / sets
print('Size of Features is %i, Size of Labels is %i' % (ftrsize, lblsize))
print('There are %i datasets:' % sets)
ftrTrain, ftrTest, lblTrain, lblTest = train_test_split(features, labels, test_size=0.2, random_state=1234)
train_data = MetaMaterialDataSet(ftrTrain, lblTrain, bool_train=True)
test_data = MetaMaterialDataSet(ftrTest, lblTest, bool_train=False)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size)
print('Number of Training samples is {}'.format(len(ftrTrain)))
print('Number of Test samples is {}'.format(len(ftrTest)))
return train_loader, test_loader
def gen_data(name):
train_loader, test_loader = Prepare_Data(1, 10000, 1000)
with open(name, 'a') as datafile:
for j, (geometry, spectra) in enumerate(train_loader):
concate = np.concatenate([geometry, spectra], axis=1)
# print(np.shape(concate))
np.savetxt(datafile, concate, delimiter=',')
if __name__ == "__main__":
train_loader, test_loader = Prepare_Data(1, 10000, 1000)
with open('toy_data/mm1d_6.csv', 'a') as datafile:
for j, (geometry, spectra) in enumerate(train_loader):
concate = np.concatenate([geometry, spectra], axis=1)
#print(np.shape(concate))
np.savetxt(datafile, concate, delimiter=',')
|
[
"numpy.random.shuffle",
"numpy.multiply",
"torch.utils.data.DataLoader",
"sklearn.model_selection.train_test_split",
"numpy.power",
"numpy.savetxt",
"numpy.zeros",
"numpy.arange",
"numpy.array",
"numpy.linspace",
"numpy.concatenate",
"numpy.sqrt"
] |
[((1093, 1129), 'numpy.linspace', 'np.linspace', (['r_min', 'r_max', '(space + 1)'], {}), '(r_min, r_max, space + 1)\n', (1104, 1129), True, 'import numpy as np\n'), ((1144, 1180), 'numpy.linspace', 'np.linspace', (['h_min', 'h_max', '(space + 1)'], {}), '(h_min, h_max, space + 1)\n', (1155, 1180), True, 'import numpy as np\n'), ((1602, 1615), 'numpy.zeros', 'np.zeros', (['(300)'], {}), '(300)\n', (1610, 1615), True, 'import numpy as np\n'), ((1627, 1657), 'numpy.concatenate', 'np.concatenate', (['(r, h)'], {'axis': '(0)'}), '((r, h), axis=0)\n', (1641, 1657), True, 'import numpy as np\n'), ((2022, 2087), 'numpy.arange', 'np.arange', (['freq_low', 'freq_high', '((freq_high - freq_low) / num_freq)'], {}), '(freq_low, freq_high, (freq_high - freq_low) / num_freq)\n', (2031, 2087), True, 'import numpy as np\n'), ((2885, 2920), 'numpy.array', 'np.array', (['features'], {'dtype': '"""float32"""'}), "(features, dtype='float32')\n", (2893, 2920), True, 'import numpy as np\n'), ((2934, 2967), 'numpy.array', 'np.array', (['labels'], {'dtype': '"""float32"""'}), "(labels, dtype='float32')\n", (2942, 2967), True, 'import numpy as np\n'), ((3203, 3271), 'sklearn.model_selection.train_test_split', 'train_test_split', (['features', 'labels'], {'test_size': '(0.2)', 'random_state': '(1234)'}), '(features, labels, test_size=0.2, random_state=1234)\n', (3219, 3271), False, 'from sklearn.model_selection import train_test_split\n'), ((3437, 3499), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_data'], {'batch_size': 'batch_size'}), '(train_data, batch_size=batch_size)\n', (3464, 3499), False, 'import torch\n'), ((3518, 3579), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_data'], {'batch_size': 'batch_size'}), '(test_data, batch_size=batch_size)\n', (3545, 3579), False, 'import torch\n'), ((1302, 1326), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'float'}), '(n, dtype=float)\n', (1310, 1326), True, 'import numpy as np\n'), ((1328, 1352), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'float'}), '(n, dtype=float)\n', (1336, 1352), True, 'import numpy as np\n'), ((1384, 1410), 'numpy.random.shuffle', 'np.random.shuffle', (['r_space'], {}), '(r_space)\n', (1401, 1410), True, 'import numpy as np\n'), ((1419, 1445), 'numpy.random.shuffle', 'np.random.shuffle', (['h_space'], {}), '(h_space)\n', (1436, 1445), True, 'import numpy as np\n'), ((2377, 2392), 'numpy.power', 'np.power', (['wp', '(2)'], {}), '(wp, 2)\n', (2385, 2392), True, 'import numpy as np\n'), ((2394, 2411), 'numpy.multiply', 'np.multiply', (['w', 'g'], {}), '(w, g)\n', (2405, 2411), True, 'import numpy as np\n'), ((3954, 3997), 'numpy.concatenate', 'np.concatenate', (['[geometry, spectra]'], {'axis': '(1)'}), '([geometry, spectra], axis=1)\n', (3968, 3997), True, 'import numpy as np\n'), ((4049, 4093), 'numpy.savetxt', 'np.savetxt', (['datafile', 'concate'], {'delimiter': '""","""'}), "(datafile, concate, delimiter=',')\n", (4059, 4093), True, 'import numpy as np\n'), ((4325, 4368), 'numpy.concatenate', 'np.concatenate', (['[geometry, spectra]'], {'axis': '(1)'}), '([geometry, spectra], axis=1)\n', (4339, 4368), True, 'import numpy as np\n'), ((4419, 4463), 'numpy.savetxt', 'np.savetxt', (['datafile', 'concate'], {'delimiter': '""","""'}), "(datafile, concate, delimiter=',')\n", (4429, 4463), True, 'import numpy as np\n'), ((1730, 1744), 'numpy.sqrt', 'np.sqrt', (['np.pi'], {}), '(np.pi)\n', (1737, 1744), True, 'import numpy as np\n'), ((1777, 1791), 'numpy.sqrt', 'np.sqrt', (['np.pi'], {}), '(np.pi)\n', (1784, 1791), True, 'import numpy as np\n'), ((2533, 2547), 'numpy.power', 'np.power', (['w', '(2)'], {}), '(w, 2)\n', (2541, 2547), True, 'import numpy as np\n'), ((2549, 2563), 'numpy.power', 'np.power', (['g', '(2)'], {}), '(g, 2)\n', (2557, 2563), True, 'import numpy as np\n'), ((2456, 2471), 'numpy.power', 'np.power', (['w0', '(2)'], {}), '(w0, 2)\n', (2464, 2471), True, 'import numpy as np\n'), ((2474, 2488), 'numpy.power', 'np.power', (['w', '(2)'], {}), '(w, 2)\n', (2482, 2488), True, 'import numpy as np\n')]
|
import numpy as np
from ..colors import Color
from .widget import Widget, overlapping_region
from .widget_data_structures import Point, Size, Rect
class _Root(Widget):
"""
Root widget. Meant to be instantiated by the `App` class. Renders to terminal.
"""
def __init__(self, app, env_out, default_char, default_color: Color):
self._app = app
self.env_out = env_out
self.default_char = default_char
self.default_color = default_color
self.children = [ ]
self.resize(env_out.get_size())
def resize(self, dim: Size):
"""
Resize canvas. Last render is erased.
"""
self.env_out.erase_screen()
self.env_out.flush()
self._dim = dim
self._last_canvas = np.full(dim, self.default_char, dtype=object)
self._last_colors = np.full((*dim, 6), self.default_color, dtype=np.uint8)
self.canvas = np.full_like(self._last_canvas, "><") # "><" will guarantee an entire screen redraw.
self.colors = self._last_colors.copy()
# Buffer arrays to re-use in the `render` method:
self._char_diffs = np.zeros_like(self.canvas, dtype=np.bool8)
self._color_diffs = np.zeros_like(self.colors, dtype=np.bool8)
self._reduced_color_diffs = np.zeros_like(self.canvas, dtype=np.bool8)
for child in self.children:
child.update_geometry()
@property
def top(self):
return 0
@property
def left(self):
return 0
@property
def pos(self):
return Point(0, 0)
@property
def absolute_pos(self):
return Point(0, 0)
@property
def is_transparent(self):
return False
@property
def is_visible(self):
return True
@property
def parent(self):
return None
@property
def root(self):
return self
@property
def app(self):
return self._app
def absolute_to_relative_coords(self, coord):
return coord
def render(self):
"""
Paint canvas. Render to terminal.
"""
# Swap canvas with last render:
self.canvas, self._last_canvas = self._last_canvas, self.canvas
self.colors, self._last_colors = self._last_colors, self.colors
# Bring arrays into locals:
canvas = self.canvas
colors = self.colors
char_diffs = self._char_diffs
color_diffs = self._color_diffs
reduced_color_diffs = self._reduced_color_diffs
env_out = self.env_out
write = env_out._buffer.append
# Erase canvas:
canvas[:] = self.default_char
colors[:, :] = self.default_color
overlap = overlapping_region
height, width = canvas.shape
rect = Rect(
0,
0,
height,
width,
height,
width,
)
for child in self.children:
if region := overlap(rect, child):
dest_slice, child_rect = region
child.render(canvas[dest_slice], colors[dest_slice], child_rect)
# Find differences between current render and last render:
# (This is optimized version of `(last_canvas != canvas) | np.any(last_colors != colors, axis=-1)`
# that re-uses buffers instead of creating new arrays.)
np.not_equal(self._last_canvas, canvas, out=char_diffs)
np.not_equal(self._last_colors, colors, out=color_diffs)
np.any(color_diffs, axis=-1, out=reduced_color_diffs)
np.logical_or(char_diffs, reduced_color_diffs, out=char_diffs)
write("\x1b[?25l") # Hide cursor
ys, xs = np.nonzero(char_diffs)
for y, x, color, char in zip(ys, xs, colors[ys, xs], canvas[ys, xs]):
# The escape codes for moving the cursor and setting the color concatenated:
write("\x1b[{};{}H\x1b[0;38;2;{};{};{};48;2;{};{};{}m{}".format(y + 1, x + 1, *color, char))
write("\x1b[0m") # Reset attributes
env_out.flush()
def dispatch_press(self, key_press):
"""
Dispatch key press to descendants until handled.
"""
any(widget.dispatch_press(key_press) for widget in reversed(self.children))
def dispatch_click(self, mouse_event):
"""
Dispatch mouse event to descendents until handled.
"""
any(widget.dispatch_click(mouse_event) for widget in reversed(self.children))
|
[
"numpy.full",
"numpy.full_like",
"numpy.zeros_like",
"numpy.not_equal",
"numpy.any",
"numpy.nonzero",
"numpy.logical_or"
] |
[((775, 820), 'numpy.full', 'np.full', (['dim', 'self.default_char'], {'dtype': 'object'}), '(dim, self.default_char, dtype=object)\n', (782, 820), True, 'import numpy as np\n'), ((849, 903), 'numpy.full', 'np.full', (['(*dim, 6)', 'self.default_color'], {'dtype': 'np.uint8'}), '((*dim, 6), self.default_color, dtype=np.uint8)\n', (856, 903), True, 'import numpy as np\n'), ((927, 964), 'numpy.full_like', 'np.full_like', (['self._last_canvas', '"""><"""'], {}), "(self._last_canvas, '><')\n", (939, 964), True, 'import numpy as np\n'), ((1146, 1188), 'numpy.zeros_like', 'np.zeros_like', (['self.canvas'], {'dtype': 'np.bool8'}), '(self.canvas, dtype=np.bool8)\n', (1159, 1188), True, 'import numpy as np\n'), ((1217, 1259), 'numpy.zeros_like', 'np.zeros_like', (['self.colors'], {'dtype': 'np.bool8'}), '(self.colors, dtype=np.bool8)\n', (1230, 1259), True, 'import numpy as np\n'), ((1296, 1338), 'numpy.zeros_like', 'np.zeros_like', (['self.canvas'], {'dtype': 'np.bool8'}), '(self.canvas, dtype=np.bool8)\n', (1309, 1338), True, 'import numpy as np\n'), ((3369, 3424), 'numpy.not_equal', 'np.not_equal', (['self._last_canvas', 'canvas'], {'out': 'char_diffs'}), '(self._last_canvas, canvas, out=char_diffs)\n', (3381, 3424), True, 'import numpy as np\n'), ((3433, 3489), 'numpy.not_equal', 'np.not_equal', (['self._last_colors', 'colors'], {'out': 'color_diffs'}), '(self._last_colors, colors, out=color_diffs)\n', (3445, 3489), True, 'import numpy as np\n'), ((3498, 3551), 'numpy.any', 'np.any', (['color_diffs'], {'axis': '(-1)', 'out': 'reduced_color_diffs'}), '(color_diffs, axis=-1, out=reduced_color_diffs)\n', (3504, 3551), True, 'import numpy as np\n'), ((3560, 3622), 'numpy.logical_or', 'np.logical_or', (['char_diffs', 'reduced_color_diffs'], {'out': 'char_diffs'}), '(char_diffs, reduced_color_diffs, out=char_diffs)\n', (3573, 3622), True, 'import numpy as np\n'), ((3684, 3706), 'numpy.nonzero', 'np.nonzero', (['char_diffs'], {}), '(char_diffs)\n', (3694, 3706), True, 'import numpy as np\n')]
|
import logging
import numpy as np
from openpnm.utils import SettingsAttr, Docorator
from openpnm.integrators import ScipyRK45
from openpnm.algorithms import GenericAlgorithm
from openpnm.algorithms._solution import SolutionContainer, TransientSolution
logger = logging.getLogger(__name__)
docstr = Docorator()
@docstr.dedent
class TransientMultiPhysicsSettings:
r"""
Parameters
----------
%(GenericAlgorithmSettings.parameters)s
algorithms: list
List of transient algorithm objects to be solved in a coupled manner
"""
algorithms = []
@docstr.dedent
class TransientMultiPhysics(GenericAlgorithm):
r"""
A subclass for transient multiphysics simulations.
"""
def __init__(self, algorithms, settings=None, **kwargs):
self.settings = SettingsAttr(TransientMultiPhysicsSettings, settings)
self.settings.algorithms = [alg.name for alg in algorithms]
self._algs = algorithms
super().__init__(settings=self.settings, **kwargs)
def run(self, x0, tspan, saveat=None, integrator=None):
"""
Runs all of the transient algorithms simultaneoulsy and returns the
solution.
Parameters steal from transient reactive transport
----------
x0 : ndarray or float
Array (or scalar) containing initial condition values.
tspan : array_like
Tuple (or array) containing the integration time span.
saveat : array_like or float, optional
If an array is passed, it signifies the time points at which
the solution is to be stored, and if a scalar is passed, it
refers to the interval at which the solution is to be stored.
integrator : Integrator, optional
Integrator object which will be used to to the time stepping.
Can be instantiated using openpnm.integrators module.
Returns
-------
TransientSolution
The solution object, which is basically a numpy array with
the added functionality that it can be called to return the
solution at intermediate times (i.e., those not stored in the
solution object). In the case of multiphysics, the solution object
is a combined array of solutions for each physics. The solution
for each physics is available on each algorithm object
independently.
"""
logger.info('Running TransientMultiphysics')
if np.isscalar(saveat):
saveat = np.arange(*tspan, saveat)
if (saveat is not None) and (tspan[1] not in saveat):
saveat = np.hstack((saveat, [tspan[1]]))
integrator = ScipyRK45() if integrator is None else integrator
for i, alg in enumerate(self._algs):
# Perform pre-solve validations
alg._validate_settings()
alg._validate_data_health()
# Write x0 to algorithm the obj (needed by _update_iterative_props)
x0_i = self._get_x0(x0, i)
alg['pore.ic'] = x0_i = np.ones(alg.Np, dtype=float) * x0_i
alg._merge_inital_and_boundary_values()
# Build RHS (dx/dt = RHS), then integrate the system of ODEs
rhs = self._build_rhs()
# Integrate RHS using the given solver
soln = integrator.solve(rhs, x0, tspan, saveat)
# Return dictionary containing solution
self.soln = SolutionContainer()
for i, alg in enumerate(self._algs):
# Slice soln and attach as TransientSolution object to each alg
t = soln.t
x = soln[i*alg.Np:(i+1)*alg.Np, :]
alg.soln = TransientSolution(t, x)
# Add solution of each alg to solution dictionary
self.soln[alg.settings['quantity']] = alg.soln
return self.soln
def _run_special(self, x0): ...
def _build_rhs(self):
"""
Returns a function handle, which calculates dy/dt = rhs(y, t).
Notes
-----
``y`` is a composite array that contains ALL the variables that
the multiphysics algorithm solves for, e.g., if the constituent
algorithms are ``TransientFickianDiffusion``, and
``TransientFourierConduction``, ``y[0:Np-1]`` refers to the
concentration, and ``[Np:2*Np-1]`` refers to the temperature
values.
"""
def ode_func(t, y):
# Initialize RHS
rhs = []
for i, alg in enumerate(self._algs):
# Get x from y, assume alg.Np is same for all algs
x = self._get_x0(y, i) # again use helper function
# Store x onto algorithm,
alg.x = x
# Build A and b
alg._update_A_and_b()
A = alg.A.tocsc()
b = alg.b
# Retrieve volume
V = alg.network[alg.settings["pore_volume"]]
# Calcualte RHS
rhs_alg = np.hstack(-A.dot(x) + b)/V
rhs = np.hstack((rhs, rhs_alg))
return rhs
return ode_func
def _get_x0(self, x0, i):
tmp = [alg.Np for alg in self._algs]
idx_end = np.cumsum(tmp)
idx_start = np.hstack((0, idx_end[:-1]))
x0 = x0[idx_start[i]:idx_end[i]]
return x0
|
[
"openpnm.algorithms._solution.TransientSolution",
"numpy.isscalar",
"openpnm.utils.Docorator",
"numpy.ones",
"openpnm.algorithms._solution.SolutionContainer",
"numpy.hstack",
"numpy.cumsum",
"openpnm.integrators.ScipyRK45",
"numpy.arange",
"openpnm.utils.SettingsAttr",
"logging.getLogger"
] |
[((261, 288), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (278, 288), False, 'import logging\n'), ((298, 309), 'openpnm.utils.Docorator', 'Docorator', ([], {}), '()\n', (307, 309), False, 'from openpnm.utils import SettingsAttr, Docorator\n'), ((797, 850), 'openpnm.utils.SettingsAttr', 'SettingsAttr', (['TransientMultiPhysicsSettings', 'settings'], {}), '(TransientMultiPhysicsSettings, settings)\n', (809, 850), False, 'from openpnm.utils import SettingsAttr, Docorator\n'), ((2497, 2516), 'numpy.isscalar', 'np.isscalar', (['saveat'], {}), '(saveat)\n', (2508, 2516), True, 'import numpy as np\n'), ((3432, 3451), 'openpnm.algorithms._solution.SolutionContainer', 'SolutionContainer', ([], {}), '()\n', (3449, 3451), False, 'from openpnm.algorithms._solution import SolutionContainer, TransientSolution\n'), ((5210, 5224), 'numpy.cumsum', 'np.cumsum', (['tmp'], {}), '(tmp)\n', (5219, 5224), True, 'import numpy as np\n'), ((5245, 5273), 'numpy.hstack', 'np.hstack', (['(0, idx_end[:-1])'], {}), '((0, idx_end[:-1]))\n', (5254, 5273), True, 'import numpy as np\n'), ((2539, 2564), 'numpy.arange', 'np.arange', (['*tspan', 'saveat'], {}), '(*tspan, saveat)\n', (2548, 2564), True, 'import numpy as np\n'), ((2648, 2679), 'numpy.hstack', 'np.hstack', (['(saveat, [tspan[1]])'], {}), '((saveat, [tspan[1]]))\n', (2657, 2679), True, 'import numpy as np\n'), ((2701, 2712), 'openpnm.integrators.ScipyRK45', 'ScipyRK45', ([], {}), '()\n', (2710, 2712), False, 'from openpnm.integrators import ScipyRK45\n'), ((3666, 3689), 'openpnm.algorithms._solution.TransientSolution', 'TransientSolution', (['t', 'x'], {}), '(t, x)\n', (3683, 3689), False, 'from openpnm.algorithms._solution import SolutionContainer, TransientSolution\n'), ((3072, 3100), 'numpy.ones', 'np.ones', (['alg.Np'], {'dtype': 'float'}), '(alg.Np, dtype=float)\n', (3079, 3100), True, 'import numpy as np\n'), ((5042, 5067), 'numpy.hstack', 'np.hstack', (['(rhs, rhs_alg)'], {}), '((rhs, rhs_alg))\n', (5051, 5067), True, 'import numpy as np\n')]
|
__copyright__ = "Copyright (C) 2019 <NAME>"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import pystella as ps
import pytest
from pyopencl.tools import ( # noqa
pytest_generate_tests_for_pyopencl as pytest_generate_tests)
@pytest.mark.parametrize("dtype", [np.float64])
@pytest.mark.parametrize("Stepper", [ps.RungeKutta4, ps.LowStorageRK54])
def test_expansion(ctx_factory, proc_shape, dtype, Stepper, timing=False):
if proc_shape != (1, 1, 1):
pytest.skip("test expansion only on one rank")
def sol(w, t):
x = (1 + 3*w)
return (x*(t/np.sqrt(3) + 2/x))**(2/x)/2**(2/x)
from pystella.step import LowStorageRKStepper
is_low_storage = LowStorageRKStepper in Stepper.__bases__
for w in [0, 1/3, 1/2, 1, -1/4]:
def energy(a):
return a**(-3-3*w)
def pressure(a):
return w * energy(a)
t = 0
dt = .005
expand = ps.Expansion(energy(1.), Stepper, mpl=np.sqrt(8.*np.pi))
while t <= 10. - dt:
for s in range(expand.stepper.num_stages):
slc = (0) if is_low_storage else (0 if s == 0 else 1)
expand.step(s, energy(expand.a[slc]), pressure(expand.a[slc]), dt)
t += dt
slc = () if is_low_storage else (0)
order = expand.stepper.expected_order
rtol = dt**order
print(order,
w,
expand.a[slc]/sol(w, t) - 1,
expand.constraint(energy(expand.a[slc])))
assert np.allclose(expand.a[slc], sol(w, t), rtol=rtol, atol=0), \
f"FLRW solution inaccurate for {w=}"
assert expand.constraint(energy(expand.a[slc])) < rtol, \
f"FLRW solution disobeying constraint for {w=}"
if __name__ == "__main__":
from common import parser
args = parser.parse_args()
from pystella.step import all_steppers
for stepper in all_steppers[-5:]:
test_expansion(
None, proc_shape=args.proc_shape, dtype=args.dtype, timing=args.timing,
Stepper=stepper,
)
|
[
"pytest.mark.parametrize",
"numpy.sqrt",
"pytest.skip",
"common.parser.parse_args"
] |
[((1284, 1330), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.float64]'], {}), "('dtype', [np.float64])\n", (1307, 1330), False, 'import pytest\n'), ((1333, 1404), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""Stepper"""', '[ps.RungeKutta4, ps.LowStorageRK54]'], {}), "('Stepper', [ps.RungeKutta4, ps.LowStorageRK54])\n", (1356, 1404), False, 'import pytest\n'), ((2919, 2938), 'common.parser.parse_args', 'parser.parse_args', ([], {}), '()\n', (2936, 2938), False, 'from common import parser\n'), ((1523, 1569), 'pytest.skip', 'pytest.skip', (['"""test expansion only on one rank"""'], {}), "('test expansion only on one rank')\n", (1534, 1569), False, 'import pytest\n'), ((2038, 2058), 'numpy.sqrt', 'np.sqrt', (['(8.0 * np.pi)'], {}), '(8.0 * np.pi)\n', (2045, 2058), True, 'import numpy as np\n'), ((1637, 1647), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (1644, 1647), True, 'import numpy as np\n')]
|
'''
Setup file for Operator and Hamiltonain Generators.
'''
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config=Configuration('hgen',parent_package,top_path)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
|
[
"numpy.distutils.core.setup",
"numpy.distutils.misc_util.Configuration"
] |
[((179, 226), 'numpy.distutils.misc_util.Configuration', 'Configuration', (['"""hgen"""', 'parent_package', 'top_path'], {}), "('hgen', parent_package, top_path)\n", (192, 226), False, 'from numpy.distutils.misc_util import Configuration\n'), ((318, 352), 'numpy.distutils.core.setup', 'setup', ([], {'configuration': 'configuration'}), '(configuration=configuration)\n', (323, 352), False, 'from numpy.distutils.core import setup\n')]
|
from setuptools import setup
import numpy
setup(
name='CIGAN',
version='0.2dev',
packages=['vpa'],
license='MIT License',
include_dirs=[numpy.get_include(),],
)
|
[
"numpy.get_include"
] |
[((157, 176), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (174, 176), False, 'import numpy\n')]
|
#!/usr/bin/env python3
import re, argparse, numpy as np, glob, os
#from sklearn.neighbors.kde import KernelDensity
import matplotlib.pyplot as plt
from extractTargetFilesNonDim import epsNuFromRe
from extractTargetFilesNonDim import getAllData
from computeSpectraNonDim import readAllSpectra
colors = ['#1f78b4', '#33a02c', '#e31a1c', '#ff7f00', '#6a3d9a', '#b15928', '#a6cee3', '#b2df8a', '#fb9a99', '#fdbf6f', '#cab2d6', '#ffff99']
colors = ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33', '#a65628', '#f781bf', '#999999']
#colors = ['#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00', '#cab2d6', '#6a3d9a', '#ffff99', '#b15928']
#colors = ['#abd9e9', '#74add1', '#4575b4', '#313695', '#006837', '#1a9850', '#66bd63', '#a6d96a', '#d9ef8b', '#fee08b', '#fdae61', '#f46d43', '#d73027', '#a50026', '#8e0152', '#c51b7d', '#de77ae', '#f1b6da']
def findDirectory(runspath, re, token):
retoken = 'RE%03d' % re
alldirs = glob.glob(runspath + '/*')
for dirn in alldirs:
if retoken not in dirn: continue
if token not in dirn: continue
return dirn
assert(False, 're-token combo not found')
def main_integral(runspath, target, REs, tokens, labels):
nBins = 2 * 16//2 - 1
modes = np.arange(1, nBins+1, dtype=np.float64) # assumes box is 2 pi
plt.figure()
#REs = findAllParams(path)
nRes = len(REs)
axes, lines = [], []
for j in range(nRes):
axes += [ plt.subplot(1, nRes, j+1) ]
for j in range(nRes):
RE = REs[j]
# read target file
logSpectra, logEnStdev, _, _ = readAllSpectra(target, [RE])
for i in range(len(tokens)):
eps, nu = epsNuFromRe(RE)
dirn = findDirectory(runspath, RE, tokens[i])
runData = getAllData(dirn, eps, nu, nBins, fSkip=1)
logE = np.log(runData['spectra'])
avgLogSpec = np.mean(logE, axis=0)
assert(avgLogSpec.size == nBins)
LL = (avgLogSpec.ravel() - logSpectra.ravel()) / logEnStdev.ravel()
print(LL.shape)
p = axes[j].plot(LL, modes, label=labels[i], color=colors[i])
#p = axes[j].plot(LL, modes, color=colors[i])
if j == 0: lines += [p]
#stdLogSpec = np.std(logE, axis=0)
#covLogSpec = np.cov(logE, rowvar=False)
#print(covLogSpec.shape)
axes[0].set_ylabel(r'$k$')
for j in range(nRes):
axes[j].set_title(r'$Re_\lambda$ = %d' % REs[j])
#axes[j].set_xscale("log")
axes[j].set_ylim([1, 15])
axes[j].grid()
axes[j].set_xlabel(r'$\frac{\log E(k) - \mu_{\log E(k)}}{\sigma_{\log E(k)}}$')
for j in range(1,nRes): axes[j].set_yticklabels([])
#axes[0].legend(lines, labels, bbox_to_anchor=(-0.1, 2.5), borderaxespad=0)
assert(len(lines) == len(labels))
#axes[0].legend(lines, labels, bbox_to_anchor=(0.5, 0.5))
axes[0].legend(bbox_to_anchor=(0.5, 0.5))
plt.tight_layout()
plt.show()
#axes[0].legend(loc='lower left')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description = "Compute a target file for RL agent from DNS data.")
parser.add_argument('--target', help="Path to target files directory")
parser.add_argument('--tokens', nargs='+', help="Text token distinguishing each series of runs")
parser.add_argument('--res', nargs='+', type=int, help="Reynolds numbers")
parser.add_argument('--labels', nargs='+', help="Plot labels to assiciate to tokens")
parser.add_argument('--runspath', help="Plot labels to assiciate to tokens")
args = parser.parse_args()
assert(len(args.tokens) == len(args.labels))
main_integral(args.runspath, args.target, args.res, args.tokens, args.labels)
|
[
"matplotlib.pyplot.subplot",
"extractTargetFilesNonDim.epsNuFromRe",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"numpy.log",
"extractTargetFilesNonDim.getAllData",
"computeSpectraNonDim.readAllSpectra",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"glob.glob",
"matplotlib.pyplot.tight_layout"
] |
[((982, 1008), 'glob.glob', 'glob.glob', (["(runspath + '/*')"], {}), "(runspath + '/*')\n", (991, 1008), False, 'import re, argparse, numpy as np, glob, os\n'), ((1277, 1318), 'numpy.arange', 'np.arange', (['(1)', '(nBins + 1)'], {'dtype': 'np.float64'}), '(1, nBins + 1, dtype=np.float64)\n', (1286, 1318), True, 'import re, argparse, numpy as np, glob, os\n'), ((1343, 1355), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1353, 1355), True, 'import matplotlib.pyplot as plt\n'), ((2974, 2992), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2990, 2992), True, 'import matplotlib.pyplot as plt\n'), ((2997, 3007), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3005, 3007), True, 'import matplotlib.pyplot as plt\n'), ((3086, 3179), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Compute a target file for RL agent from DNS data."""'}), "(description=\n 'Compute a target file for RL agent from DNS data.')\n", (3109, 3179), False, 'import re, argparse, numpy as np, glob, os\n'), ((1617, 1645), 'computeSpectraNonDim.readAllSpectra', 'readAllSpectra', (['target', '[RE]'], {}), '(target, [RE])\n', (1631, 1645), False, 'from computeSpectraNonDim import readAllSpectra\n'), ((1476, 1503), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', 'nRes', '(j + 1)'], {}), '(1, nRes, j + 1)\n', (1487, 1503), True, 'import matplotlib.pyplot as plt\n'), ((1705, 1720), 'extractTargetFilesNonDim.epsNuFromRe', 'epsNuFromRe', (['RE'], {}), '(RE)\n', (1716, 1720), False, 'from extractTargetFilesNonDim import epsNuFromRe\n'), ((1801, 1842), 'extractTargetFilesNonDim.getAllData', 'getAllData', (['dirn', 'eps', 'nu', 'nBins'], {'fSkip': '(1)'}), '(dirn, eps, nu, nBins, fSkip=1)\n', (1811, 1842), False, 'from extractTargetFilesNonDim import getAllData\n'), ((1862, 1888), 'numpy.log', 'np.log', (["runData['spectra']"], {}), "(runData['spectra'])\n", (1868, 1888), True, 'import re, argparse, numpy as np, glob, os\n'), ((1914, 1935), 'numpy.mean', 'np.mean', (['logE'], {'axis': '(0)'}), '(logE, axis=0)\n', (1921, 1935), True, 'import re, argparse, numpy as np, glob, os\n')]
|
import numpy as np
import pyvista as pv
from pylie import SE3
class Viewer3D:
"""Visualises the lab in 3D"""
def __init__(self):
"""Sets up the 3D viewer"""
self._plotter = pv.Plotter()
# Add scene origin and plane
scene_plane = pv.Plane(i_size=1000, j_size=1000)
self._plotter.add_mesh(scene_plane, show_edges=True, style='wireframe')
self._add_axis(SE3(), 100)
# Set camera.
self._plotter.camera.position = (100, 1500, -500)
self._plotter.camera.up = (-0.042739, -0.226979, -0.972961)
self._plotter.camera.focal_point = (100, 300, -200)
self._plotter.show(title="3D visualization", interactive_update=True)
def add_body_axes(self, pose_local_body: SE3):
"""Add axes representing the body pose to the 3D world
:param pose_local_body: The pose of the body in the local coordinate system.
"""
self._add_axis(pose_local_body)
def add_camera_axes(self, pose_local_camera: SE3):
"""Add axes representing the camera pose to the 3D world
:param pose_local_camera: The pose of the camera in the local coordinate system.
"""
self._add_axis(pose_local_camera)
def add_camera_frustum(self, camera_model, image):
"""Add a frustum representing the camera model and image to the 3D world"""
self._add_frustum(camera_model, image)
def _add_axis(self, pose: SE3, scale=10.0):
T = pose.to_matrix()
point = pv.Sphere(radius=0.1*scale)
point.transform(T)
self._plotter.add_mesh(point)
x_arrow = pv.Arrow(direction=(1.0, 0.0, 0.0), scale=scale)
x_arrow.transform(T)
self._plotter.add_mesh(x_arrow, color='red')
y_arrow = pv.Arrow(direction=(0.0, 1.0, 0.0), scale=scale)
y_arrow.transform(T)
self._plotter.add_mesh(y_arrow, color='green')
z_arrow = pv.Arrow(direction=(0.0, 0.0, 1.0), scale=scale)
z_arrow.transform(T)
self._plotter.add_mesh(z_arrow, color='blue')
def _add_frustum(self, camera_model, image, scale=20.0):
S = camera_model.pose_world_camera.to_matrix() @ np.diag([scale, scale, scale, 1.0])
img_height, img_width = image.shape[:2]
point_bottom_left = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., img_height-1.])))
point_bottom_right = np.squeeze(camera_model.pixel_to_normalised(np.array([0., img_height-1.])))
point_top_left = np.squeeze(camera_model.pixel_to_normalised(np.array([0., 0.])))
point_top_right = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., 0.])))
point_focal = np.zeros([3])
pyramid = pv.Pyramid([point_bottom_left, point_bottom_right, point_top_left, point_top_right, point_focal])
pyramid.transform(S)
rectangle = pv.Rectangle([point_bottom_left, point_bottom_right, point_top_left, point_top_right])
rectangle.texture_map_to_plane(inplace=True)
rectangle.transform(S)
image_flipped_rgb = image[::-1, :, ::-1].copy()
tex = pv.numpy_to_texture(image_flipped_rgb)
self._plotter.add_mesh(pyramid, show_edges=True, style='wireframe')
self._plotter.add_mesh(rectangle, texture=tex, opacity=0.9)
def update(self, time=500):
self._plotter.update(time)
def show(self):
self._plotter.show()
|
[
"numpy.zeros",
"pyvista.Plotter",
"pyvista.Pyramid",
"pyvista.numpy_to_texture",
"pyvista.Plane",
"numpy.array",
"pyvista.Rectangle",
"pyvista.Arrow",
"numpy.diag",
"pylie.SE3",
"pyvista.Sphere"
] |
[((200, 212), 'pyvista.Plotter', 'pv.Plotter', ([], {}), '()\n', (210, 212), True, 'import pyvista as pv\n'), ((273, 307), 'pyvista.Plane', 'pv.Plane', ([], {'i_size': '(1000)', 'j_size': '(1000)'}), '(i_size=1000, j_size=1000)\n', (281, 307), True, 'import pyvista as pv\n'), ((1511, 1540), 'pyvista.Sphere', 'pv.Sphere', ([], {'radius': '(0.1 * scale)'}), '(radius=0.1 * scale)\n', (1520, 1540), True, 'import pyvista as pv\n'), ((1623, 1671), 'pyvista.Arrow', 'pv.Arrow', ([], {'direction': '(1.0, 0.0, 0.0)', 'scale': 'scale'}), '(direction=(1.0, 0.0, 0.0), scale=scale)\n', (1631, 1671), True, 'import pyvista as pv\n'), ((1773, 1821), 'pyvista.Arrow', 'pv.Arrow', ([], {'direction': '(0.0, 1.0, 0.0)', 'scale': 'scale'}), '(direction=(0.0, 1.0, 0.0), scale=scale)\n', (1781, 1821), True, 'import pyvista as pv\n'), ((1925, 1973), 'pyvista.Arrow', 'pv.Arrow', ([], {'direction': '(0.0, 0.0, 1.0)', 'scale': 'scale'}), '(direction=(0.0, 0.0, 1.0), scale=scale)\n', (1933, 1973), True, 'import pyvista as pv\n'), ((2695, 2708), 'numpy.zeros', 'np.zeros', (['[3]'], {}), '([3])\n', (2703, 2708), True, 'import numpy as np\n'), ((2728, 2829), 'pyvista.Pyramid', 'pv.Pyramid', (['[point_bottom_left, point_bottom_right, point_top_left, point_top_right,\n point_focal]'], {}), '([point_bottom_left, point_bottom_right, point_top_left,\n point_top_right, point_focal])\n', (2738, 2829), True, 'import pyvista as pv\n'), ((2876, 2966), 'pyvista.Rectangle', 'pv.Rectangle', (['[point_bottom_left, point_bottom_right, point_top_left, point_top_right]'], {}), '([point_bottom_left, point_bottom_right, point_top_left,\n point_top_right])\n', (2888, 2966), True, 'import pyvista as pv\n'), ((3118, 3156), 'pyvista.numpy_to_texture', 'pv.numpy_to_texture', (['image_flipped_rgb'], {}), '(image_flipped_rgb)\n', (3137, 3156), True, 'import pyvista as pv\n'), ((411, 416), 'pylie.SE3', 'SE3', ([], {}), '()\n', (414, 416), False, 'from pylie import SE3\n'), ((2176, 2211), 'numpy.diag', 'np.diag', (['[scale, scale, scale, 1.0]'], {}), '([scale, scale, scale, 1.0])\n', (2183, 2211), True, 'import numpy as np\n'), ((2334, 2379), 'numpy.array', 'np.array', (['[img_width - 1.0, img_height - 1.0]'], {}), '([img_width - 1.0, img_height - 1.0])\n', (2342, 2379), True, 'import numpy as np\n'), ((2449, 2482), 'numpy.array', 'np.array', (['[0.0, img_height - 1.0]'], {}), '([0.0, img_height - 1.0])\n', (2457, 2482), True, 'import numpy as np\n'), ((2550, 2570), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (2558, 2570), True, 'import numpy as np\n'), ((2641, 2673), 'numpy.array', 'np.array', (['[img_width - 1.0, 0.0]'], {}), '([img_width - 1.0, 0.0])\n', (2649, 2673), True, 'import numpy as np\n')]
|
#Callbacks
"""Create training callbacks"""
import os
import numpy as np
import pandas as pd
from datetime import datetime
from DeepTreeAttention.utils import metrics
from DeepTreeAttention.visualization import visualize
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.callbacks import Callback, TensorBoard
from tensorflow import expand_dims
class F1Callback(Callback):
def __init__(self, experiment, eval_dataset, y_true, label_names, submodel, train_shp, n=10):
"""F1 callback
Args:
n: number of epochs to run. If n=4, function will run every 4 epochs
y_true: instead of iterating through the dataset every time, just do it once and pass the true labels to the function
"""
self.experiment = experiment
self.eval_dataset = eval_dataset
self.label_names = label_names
self.submodel = submodel
self.n = n
self.train_shp = train_shp
self.y_true = y_true
def on_train_end(self, logs={}):
y_pred = []
sites = []
#gather site and species matrix
y_pred = self.model.predict(self.eval_dataset)
if self.submodel in ["spectral","spatial"]:
y_pred = y_pred[0]
#F1
macro, micro = metrics.f1_scores(self.y_true, y_pred)
self.experiment.log_metric("Final MicroF1", micro)
self.experiment.log_metric("Final MacroF1", macro)
#Log number of predictions to make sure its constant
self.experiment.log_metric("Prediction samples",y_pred.shape[0])
results = pd.DataFrame({"true":np.argmax(self.y_true, 1),"predicted":np.argmax(y_pred, 1)})
#assign labels
if self.label_names:
results["true_taxonID"] = results.true.apply(lambda x: self.label_names[x])
results["predicted_taxonID"] = results.predicted.apply(lambda x: self.label_names[x])
#Within site confusion
site_lists = self.train_shp.groupby("taxonID").siteID.unique()
site_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=site_lists)
self.experiment.log_metric(name = "Within_site confusion[training]", value = site_confusion)
plot_lists = self.train_shp.groupby("taxonID").plotID.unique()
plot_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=plot_lists)
self.experiment.log_metric(name = "Within_plot confusion[training]", value = plot_confusion)
domain_lists = self.train_shp.groupby("taxonID").domainID.unique()
domain_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=domain_lists)
self.experiment.log_metric(name = "Within_domain confusion[training]", value = domain_confusion)
#Genus of all the different taxonID variants should be the same, take the first
scientific_dict = self.train_shp.groupby('taxonID')['scientific'].apply(lambda x: x.head(1).values.tolist()).to_dict()
genus_confusion = metrics.genus_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, scientific_dict=scientific_dict)
self.experiment.log_metric(name = "Within Genus confusion", value = genus_confusion)
#Most confused
most_confused = results.groupby(["true_taxonID","predicted_taxonID"]).size().reset_index(name="count")
most_confused = most_confused[~(most_confused.true_taxonID == most_confused.predicted_taxonID)].sort_values("count", ascending=False)
self.experiment.log_table("most_confused.csv",most_confused.values)
def on_epoch_end(self, epoch, logs={}):
if not epoch % self.n == 0:
return None
y_pred = []
sites = []
#gather site and species matrix
y_pred = self.model.predict(self.eval_dataset)
if self.submodel in ["spectral","spatial"]:
y_pred = y_pred[0]
#F1
macro, micro = metrics.f1_scores(self.y_true, y_pred)
self.experiment.log_metric("MicroF1", micro)
self.experiment.log_metric("MacroF1", macro)
#Log number of predictions to make sure its constant
self.experiment.log_metric("Prediction samples",y_pred.shape[0])
class ConfusionMatrixCallback(Callback):
def __init__(self, experiment, dataset, label_names, y_true, submodel):
self.experiment = experiment
self.dataset = dataset
self.label_names = label_names
self.submodel = submodel
self.y_true = y_true
def on_train_end(self, epoch, logs={}):
y_pred = self.model.predict(self.dataset)
if self.submodel is "metadata":
name = "Metadata Confusion Matrix"
elif self.submodel in ["ensemble"]:
name = "Ensemble Matrix"
else:
name = "Confusion Matrix"
cm = self.experiment.log_confusion_matrix(
self.y_true,
y_pred,
title=name,
file_name= name,
labels=self.label_names,
max_categories=90,
max_example_per_cell=1)
class ImageCallback(Callback):
def __init__(self, experiment, dataset, label_names, submodel=False):
self.experiment = experiment
self.dataset = dataset
self.label_names = label_names
self.submodel = submodel
def on_train_end(self, epoch, logs={}):
"""Plot sample images with labels annotated"""
#fill until there is atleast 20 images
images = []
y_pred = []
y_true = []
limit = 20
num_images = 0
for data, label in self.dataset:
if num_images < limit:
pred = self.model.predict(data)
images.append(data)
if self.submodel:
y_pred.append(pred[0])
y_true.append(label[0])
else:
y_pred.append(pred)
y_true.append(label)
num_images += label.shape[0]
else:
break
images = np.vstack(images)
y_true = np.concatenate(y_true)
y_pred = np.concatenate(y_pred)
y_true = np.argmax(y_true, axis=1)
y_pred = np.argmax(y_pred, axis=1)
true_taxonID = [self.label_names[x] for x in y_true]
pred_taxonID = [self.label_names[x] for x in y_pred]
counter = 0
for label, prediction, image in zip(true_taxonID, pred_taxonID, images):
figure = visualize.plot_prediction(image=image,
prediction=prediction,
label=label)
self.experiment.log_figure(figure_name="{}_{}".format(label, counter))
counter += 1
def create(experiment, train_data, validation_data, train_shp, log_dir=None, label_names=None, submodel=False):
"""Create a set of callbacks
Args:
experiment: a comet experiment object
train_data: a tf data object to generate data
validation_data: a tf data object to generate data
train_shp: the original shapefile for the train data to check site error
"""
#turn off callbacks for metadata
callback_list = []
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
factor=0.5,
patience=10,
min_delta=0.1,
min_lr=0.00001,
verbose=1)
callback_list.append(reduce_lr)
#Get the true labels since they are not shuffled
y_true = [ ]
for data, label in validation_data:
if submodel in ["spatial","spectral"]:
label = label[0]
y_true.append(label)
y_true = np.concatenate(y_true)
if not submodel in ["spatial","spectral"]:
confusion_matrix = ConfusionMatrixCallback(experiment=experiment, y_true=y_true, dataset=validation_data, label_names=label_names, submodel=submodel)
callback_list.append(confusion_matrix)
f1 = F1Callback(experiment=experiment, y_true=y_true, eval_dataset=validation_data, label_names=label_names, submodel=submodel, train_shp=train_shp)
callback_list.append(f1)
#if submodel is None:
#plot_images = ImageCallback(experiment, validation_data, label_names, submodel=submodel)
#callback_list.append(plot_images)
if log_dir is not None:
print("saving tensorboard logs at {}".format(log_dir))
tensorboard = TensorBoard(log_dir=log_dir, histogram_freq=0, profile_batch=30)
callback_list.append(tensorboard)
return callback_list
|
[
"numpy.concatenate",
"numpy.argmax",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"DeepTreeAttention.utils.metrics.f1_scores",
"DeepTreeAttention.utils.metrics.site_confusion",
"DeepTreeAttention.utils.metrics.genus_confusion",
"DeepTreeAttention.visualization.visualize.plot_prediction",
"tensorflow.keras.callbacks.TensorBoard",
"numpy.vstack"
] |
[((7816, 7923), 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_loss"""', 'factor': '(0.5)', 'patience': '(10)', 'min_delta': '(0.1)', 'min_lr': '(1e-05)', 'verbose': '(1)'}), "(monitor='val_loss', factor=0.5, patience=10, min_delta=\n 0.1, min_lr=1e-05, verbose=1)\n", (7833, 7923), False, 'from tensorflow.keras.callbacks import ReduceLROnPlateau\n'), ((8369, 8391), 'numpy.concatenate', 'np.concatenate', (['y_true'], {}), '(y_true)\n', (8383, 8391), True, 'import numpy as np\n'), ((1328, 1366), 'DeepTreeAttention.utils.metrics.f1_scores', 'metrics.f1_scores', (['self.y_true', 'y_pred'], {}), '(self.y_true, y_pred)\n', (1345, 1366), False, 'from DeepTreeAttention.utils import metrics\n'), ((4320, 4358), 'DeepTreeAttention.utils.metrics.f1_scores', 'metrics.f1_scores', (['self.y_true', 'y_pred'], {}), '(self.y_true, y_pred)\n', (4337, 4358), False, 'from DeepTreeAttention.utils import metrics\n'), ((6618, 6635), 'numpy.vstack', 'np.vstack', (['images'], {}), '(images)\n', (6627, 6635), True, 'import numpy as np\n'), ((6653, 6675), 'numpy.concatenate', 'np.concatenate', (['y_true'], {}), '(y_true)\n', (6667, 6675), True, 'import numpy as np\n'), ((6693, 6715), 'numpy.concatenate', 'np.concatenate', (['y_pred'], {}), '(y_pred)\n', (6707, 6715), True, 'import numpy as np\n'), ((6734, 6759), 'numpy.argmax', 'np.argmax', (['y_true'], {'axis': '(1)'}), '(y_true, axis=1)\n', (6743, 6759), True, 'import numpy as np\n'), ((6777, 6802), 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (6786, 6802), True, 'import numpy as np\n'), ((9126, 9190), 'tensorflow.keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': 'log_dir', 'histogram_freq': '(0)', 'profile_batch': '(30)'}), '(log_dir=log_dir, histogram_freq=0, profile_batch=30)\n', (9137, 9190), False, 'from tensorflow.keras.callbacks import Callback, TensorBoard\n'), ((2118, 2231), 'DeepTreeAttention.utils.metrics.site_confusion', 'metrics.site_confusion', ([], {'y_true': 'results.true_taxonID', 'y_pred': 'results.predicted_taxonID', 'site_lists': 'site_lists'}), '(y_true=results.true_taxonID, y_pred=results.\n predicted_taxonID, site_lists=site_lists)\n', (2140, 2231), False, 'from DeepTreeAttention.utils import metrics\n'), ((2457, 2570), 'DeepTreeAttention.utils.metrics.site_confusion', 'metrics.site_confusion', ([], {'y_true': 'results.true_taxonID', 'y_pred': 'results.predicted_taxonID', 'site_lists': 'plot_lists'}), '(y_true=results.true_taxonID, y_pred=results.\n predicted_taxonID, site_lists=plot_lists)\n', (2479, 2570), False, 'from DeepTreeAttention.utils import metrics\n'), ((2810, 2925), 'DeepTreeAttention.utils.metrics.site_confusion', 'metrics.site_confusion', ([], {'y_true': 'results.true_taxonID', 'y_pred': 'results.predicted_taxonID', 'site_lists': 'domain_lists'}), '(y_true=results.true_taxonID, y_pred=results.\n predicted_taxonID, site_lists=domain_lists)\n', (2832, 2925), False, 'from DeepTreeAttention.utils import metrics\n'), ((3300, 3424), 'DeepTreeAttention.utils.metrics.genus_confusion', 'metrics.genus_confusion', ([], {'y_true': 'results.true_taxonID', 'y_pred': 'results.predicted_taxonID', 'scientific_dict': 'scientific_dict'}), '(y_true=results.true_taxonID, y_pred=results.\n predicted_taxonID, scientific_dict=scientific_dict)\n', (3323, 3424), False, 'from DeepTreeAttention.utils import metrics\n'), ((7049, 7123), 'DeepTreeAttention.visualization.visualize.plot_prediction', 'visualize.plot_prediction', ([], {'image': 'image', 'prediction': 'prediction', 'label': 'label'}), '(image=image, prediction=prediction, label=label)\n', (7074, 7123), False, 'from DeepTreeAttention.visualization import visualize\n'), ((1667, 1692), 'numpy.argmax', 'np.argmax', (['self.y_true', '(1)'], {}), '(self.y_true, 1)\n', (1676, 1692), True, 'import numpy as np\n'), ((1705, 1725), 'numpy.argmax', 'np.argmax', (['y_pred', '(1)'], {}), '(y_pred, 1)\n', (1714, 1725), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
"""Remove embedded signalalign analyses from files"""
########################################################################
# File: remove_sa_analyses.py
# executable: remove_sa_analyses.py
#
# Author: <NAME>
# History: 02/06/19 Created
########################################################################
import os
from py3helpers.utils import list_dir
from py3helpers.multiprocess import *
from argparse import ArgumentParser
from signalalign.fast5 import Fast5
import numpy as np
def parse_args():
parser = ArgumentParser(description=__doc__)
# required arguments
parser.add_argument('--directory', '-d', required=True, action='store',
dest='dir', type=str, default=None,
help="Path to directory of fast5 files")
parser.add_argument('--analysis', required=False, action='store_true',
dest='analysis', default=False,
help="Remove all analysis files")
parser.add_argument('--basecall', required=False, action='store_true',
dest='basecall', default=False,
help="Remove all basecall files")
parser.add_argument('--signalalign', required=False, action='store_true',
dest='signalalign', default=False,
help="Remove all signalalign files")
parser.add_argument('--threads', required=False, action='store',
dest='threads', default=1, type=int,
help="number of threads to run")
args = parser.parse_args()
return args
def remove_sa_analyses(fast5):
"""Remove signalalign analyses from a fast5 file"""
assert os.path.exists(fast5), "Fast5 path does not exist".format(fast5)
fh = Fast5(fast5, read='r+')
counter = 0
for analyses in [x for x in list(fh["Analyses"].keys()) if "SignalAlign" in x]:
fh.delete(os.path.join("Analyses", analyses))
counter += 1
fh = fh.repack()
fh.close()
return counter
def remove_basecall_analyses(fast5):
"""Remove basecall analyses from a fast5 file"""
assert os.path.exists(fast5), "Fast5 path does not exist".format(fast5)
fh = Fast5(fast5, read='r+')
counter = 0
for analyses in [x for x in list(fh["Analyses"].keys()) if "Basecall" in x]:
fh.delete(os.path.join("Analyses", analyses))
counter += 1
fh = fh.repack()
fh.close()
return counter
def remove_analyses(fast5):
"""Remove analyses from a fast5 file"""
assert os.path.exists(fast5), "Fast5 path does not exist".format(fast5)
fh = Fast5(fast5, read='r+')
counter = 0
for analyses in [x for x in list(fh["Analyses"].keys())]:
fh.delete(os.path.join("Analyses", analyses))
counter += 1
fh.delete("Analyses")
fh = fh.repack()
fh.close()
return counter
def main():
args = parse_args()
function_to_run = None
if args.analysis:
function_to_run = remove_analyses
else:
if args.signalalign or not args.basecall:
function_to_run = remove_sa_analyses
elif args.basecall:
function_to_run = remove_basecall_analyses
assert function_to_run is not None, "Must select --analysis, --signalalign or --basecall."
service = BasicService(function_to_run, service_name="forward_multiprocess_aggregate_all_variantcalls")
files = list_dir(args.dir, ext="fast5")
total, failure, messages, output = run_service(service.run, files,
{}, ["fast5"], worker_count=args.threads)
print("Deleted {} analysis datasets deleted from {} files".format(np.asarray(output).sum(), len(files)))
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"numpy.asarray",
"os.path.exists",
"signalalign.fast5.Fast5",
"os.path.join",
"py3helpers.utils.list_dir"
] |
[((547, 582), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (561, 582), False, 'from argparse import ArgumentParser\n'), ((1720, 1741), 'os.path.exists', 'os.path.exists', (['fast5'], {}), '(fast5)\n', (1734, 1741), False, 'import os\n'), ((1794, 1817), 'signalalign.fast5.Fast5', 'Fast5', (['fast5'], {'read': '"""r+"""'}), "(fast5, read='r+')\n", (1799, 1817), False, 'from signalalign.fast5 import Fast5\n'), ((2151, 2172), 'os.path.exists', 'os.path.exists', (['fast5'], {}), '(fast5)\n', (2165, 2172), False, 'import os\n'), ((2225, 2248), 'signalalign.fast5.Fast5', 'Fast5', (['fast5'], {'read': '"""r+"""'}), "(fast5, read='r+')\n", (2230, 2248), False, 'from signalalign.fast5 import Fast5\n'), ((2561, 2582), 'os.path.exists', 'os.path.exists', (['fast5'], {}), '(fast5)\n', (2575, 2582), False, 'import os\n'), ((2635, 2658), 'signalalign.fast5.Fast5', 'Fast5', (['fast5'], {'read': '"""r+"""'}), "(fast5, read='r+')\n", (2640, 2658), False, 'from signalalign.fast5 import Fast5\n'), ((3431, 3462), 'py3helpers.utils.list_dir', 'list_dir', (['args.dir'], {'ext': '"""fast5"""'}), "(args.dir, ext='fast5')\n", (3439, 3462), False, 'from py3helpers.utils import list_dir\n'), ((1936, 1970), 'os.path.join', 'os.path.join', (['"""Analyses"""', 'analyses'], {}), "('Analyses', analyses)\n", (1948, 1970), False, 'import os\n'), ((2364, 2398), 'os.path.join', 'os.path.join', (['"""Analyses"""', 'analyses'], {}), "('Analyses', analyses)\n", (2376, 2398), False, 'import os\n'), ((2755, 2789), 'os.path.join', 'os.path.join', (['"""Analyses"""', 'analyses'], {}), "('Analyses', analyses)\n", (2767, 2789), False, 'import os\n'), ((3697, 3715), 'numpy.asarray', 'np.asarray', (['output'], {}), '(output)\n', (3707, 3715), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
cnames = [
'#F0F8FF',
'#FAEBD7',
'#00FFFF',
'#7FFFD4',
'#F0FFFF',
'#F5F5DC',
'#FFE4C4',
'#000000',
'#FFEBCD',
'#0000FF',
'#8A2BE2',
'#A52A2A',
'#DEB887',
'#5F9EA0',
'#7FFF00',
'#D2691E',
'#FF7F50',
'#6495ED',
'#FFF8DC',
'#DC143C',
'#00FFFF',
'#00008B',
'#008B8B',
'#B8860B',
'#A9A9A9',
'#006400',
'#BDB76B',
'#8B008B',
'#556B2F',
'#FF8C00',
'#9932CC',
'#8B0000',
'#E9967A',
'#8FBC8F',
'#483D8B',
'#2F4F4F',
'#00CED1',
'#9400D3',
'#FF1493',
'#00BFFF',
'#696969',
'#1E90FF',
'#B22222',
'#FFFAF0',
'#228B22',
'#FF00FF',
'#DCDCDC',
'#F8F8FF',
'#FFD700',
'#DAA520',
'#808080',
'#008000',
'#ADFF2F',
'#F0FFF0',
'#FF69B4',
'#CD5C5C',
'#4B0082',
'#FFFFF0',
'#F0E68C',
'#E6E6FA',
'#FFF0F5',
'#7CFC00',
'#FFFACD',
'#ADD8E6',
'#F08080',
'#E0FFFF',
'#FAFAD2',
'#90EE90',
'#D3D3D3',
'#FFB6C1',
'#FFA07A',
'#20B2AA',
'#87CEFA',
'#778899',
'#B0C4DE',
'#FFFFE0',
'#00FF00',
'#32CD32',
'#FAF0E6',
'#FF00FF',
'#800000',
'#66CDAA',
'#0000CD',
'#BA55D3',
'#9370DB',
'#3CB371',
'#7B68EE',
'#00FA9A',
'#48D1CC',
'#C71585',
'#191970',
'#F5FFFA',
'#FFE4E1',
'#FFE4B5',
'#FFDEAD',
'#000080',
'#FDF5E6',
'#808000',
'#6B8E23',
'#FFA500',
'#FF4500',
'#DA70D6',
'#EEE8AA',
'#98FB98',
'#AFEEEE',
'#DB7093',
'#FFEFD5',
'#FFDAB9',
'#CD853F',
'#FFC0CB',
'#DDA0DD',
'#B0E0E6',
'#800080',
'#FF0000',
'#BC8F8F',
'#4169E1',
'#8B4513',
'#FA8072',
'#FAA460',
'#2E8B57',
'#FFF5EE',
'#A0522D',
'#C0C0C0',
'#87CEEB',
'#6A5ACD',
'#708090',
'#FFFAFA',
'#00FF7F',
'#4682B4',
'#D2B48C',
'#008080',
'#D8BFD8',
'#FF6347',
'#40E0D0',
'#EE82EE',
'#F5DEB3',
'#FFFFFF',
'#F5F5F5',
'#FFFF00',
'#9ACD32']
months = {'Jan': [],
'Feb': [],
'Mar': [],
'Apr': [],
'May': [],
'Jun': [],
'Jul': [],
'Aug': [],
'Sep': [],
'Oct': [],
'Nov': [],
'Dec': []
}
def getOwl(monthTable, ID):
result = []
for f in monthTable:
if f[0] == ID:
result.append(f)
return result
def fillNull(months):
months["Jan"].append(0)
months["Feb"].append(0)
months["Mar"].append(0)
months["Apr"].append(0)
months["May"].append(0)
months["Jun"].append(0)
months["Jul"].append(0)
months["Aug"].append(0)
months["Sep"].append(0)
months["Oct"].append(0)
months["Nov"].append(0)
months["Dec"].append(0)
return months
def fillMonths(monthTable, months):
curOwl = monthTable[0][0]
for feature in monthTable:
tempOwl = feature[0]
month = feature[2]
dist = feature[3]
owl = getOwl(monthTable, "1751")
# get all Data for one owl
# fill all month with distance
# missing data = 0 distance
months = fillNull(months)
if month == "01":
months["Jan"][len(months["Jan"])-1] = dist
if month == "02":
months["Feb"][len(months["Feb"])-1] = dist
if month == "03":
months["Mar"][len(months["Mar"])-1] = dist
if month == "04":
months["Apr"][len(months["Apr"])-1] = dist
if month == "05":
months["May"][len(months["May"])-1] = dist
if month == "06":
months["Jun"][len(months["Jun"])-1] = dist
if month == "07":
months["Jul"][len(months["Jul"])-1] = dist
if month == "08":
months["Aug"][len(months["Aug"])-1] = dist
if month == "09":
months["Sep"][len(months["Sep"])-1] = dist
if month == "10":
months["Oct"][len(months["Oct"])-1] = dist
if month == "11":
months["Nov"][len(months["Nov"])-1] = dist
if month == "12":
months["Dec"][len(months["Dec"])-1] = dist
return months
months = fillMonths(monthTable, months)
X = np.arange(12)
curOwl = [np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,]
counter = 0
tempOwl = "0"
lastOwl="none"
for feature in monthTable:
owl = feature[0]
if owl != tempOwl:
tempOwl = owl
t = getOwl(monthTable, feature[0])
for i in t:
month = i[2]
if month == "01":
curOwl[0] = i[3]
if month == "02":
curOwl[1] = i[3]
if month == "03":
curOwl[2] = i[3]
if month == "04":
curOwl[3] = i[3]
if month == "05":
curOwl[4] = i[3]
if month == "06":
curOwl[5] = i[3]
if month == "07":
curOwl[6] = i[3]
if month == "08":
curOwl[7] = i[3]
if month == "09":
curOwl[8] = i[3]
if month == "10":
curOwl[9] = i[3]
if month == "11":
curOwl[10] = i[3]
if month == "12":
curOwl[11] = i[3]
col = cnames[counter]
if lastOwl == "none":
plt.bar(X, curOwl, color = col)
else:
plt.bar(X, curOwl, color = col, bottom = lastOwl)
lastOwl = curOwl
counter = counter + 5
plt.show()
|
[
"matplotlib.pyplot.bar",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((5007, 5020), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (5016, 5020), True, 'import numpy as np\n'), ((6430, 6440), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6438, 6440), True, 'import matplotlib.pyplot as plt\n'), ((6249, 6278), 'matplotlib.pyplot.bar', 'plt.bar', (['X', 'curOwl'], {'color': 'col'}), '(X, curOwl, color=col)\n', (6256, 6278), True, 'import matplotlib.pyplot as plt\n'), ((6307, 6352), 'matplotlib.pyplot.bar', 'plt.bar', (['X', 'curOwl'], {'color': 'col', 'bottom': 'lastOwl'}), '(X, curOwl, color=col, bottom=lastOwl)\n', (6314, 6352), True, 'import matplotlib.pyplot as plt\n')]
|
# ------------------------------------------------------------------------------------------------ #
def ImportEssentialityData(fileName):
# Not yet ready for prime time
# Import a defined format essentiality data file
# Assumes that data is in the format: locus tag, gene name, essentiality
from .utils import ParseCSVLine
fileHandle = open(fileName, 'r')
data = fileHandle.readlines()
dataDict = {}
i = 0
while i < len(data):
# Ignore comment lines
if data[i][0] != '#':
dataLine = ParseCSVLine(data[i])
dataDict[dataLine[0]] = [dataLine[1], dataLine[2]]
i += 1
return dataDict
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def BuildEssentialityDictThatIsKeyedByLocusTag(dataArray):
# Not yet ready for prime time
# Build essentiality data dict that is keyed by locus tag
essentialityDict = {}
locusTags = []
headersWithoutSysName = []
i = 0
while i < len(headers):
if headers[i] != 'sysName':
headersWithoutSysName.append(headers[i])
i += 1
dataDict = {}
for line in dataArray:
dataDict[line['sysName']] = {}
for header in headersWithoutSysName:
dataDict[line['sysName']][header] = line[header]
return dataDict
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def BuildCDSDictThatIsKeyedByLocusTag(cdsFeatures):
# Not yet ready for prime time
i = 0
cdsDict = {}
while i < len(cdsFeatures):
locusTag = cdsFeatures[i].tagDict['locus_tag'][0]
cdsDict[locusTag] = cdsFeatures[i]
i += 1
return cdsDict
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \
transposonCoordToFeatureDict, maxMutants):
from numpy.random import choice
import pdb
nonEssentialGeneCount = len(hittableFeatures)
featureHitCountDict = {}
for feature in hittableFeatures:
featureHitCountDict[feature] = 0
featuresHitAtLeastOnce = 0
featuresHitAtLeastOnceVersusMutant = []
i = 1
while i <= maxMutants:
randomCoord = int(choice(hittableTransposonCoords))
featuresToBeHit = transposonCoordToFeatureDict[randomCoord]
isAnyFeatureIncludingThisCoordNotHittable = False
for featureToBeHit in featuresToBeHit:
if featureToBeHit in notHittableFeatures:
isAnyFeatureIncludingThisCoordNotHittable = True
if isAnyFeatureIncludingThisCoordNotHittable == False:
for featureToBeHit in featuresToBeHit:
try:
featureHitCountDict[featureToBeHit] += 1
except:
pdb.set_trace()
if featureHitCountDict[featureToBeHit] == 1:
featuresHitAtLeastOnce += 1
featuresHitAtLeastOnceVersusMutant.append(featuresHitAtLeastOnce)
i += 1
return featuresHitAtLeastOnceVersusMutant
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def SimulateMultiplePickings(transposonCoordToFeatureDictFile, numberOfTrials, maxMutants):
from scipy import unique, intersect1d
from numpy import mean, std, arange
import xml.etree.ElementTree as ET
import pdb
transposonCoordToFeatureDictFileHandle = open(transposonCoordToFeatureDictFile, 'r')
transposonCoordToFeatureDict = {}
hittableFeatures = []
hittableTransposonCoords = []
notHittableTransposonCoords = []
notHittableFeatures = []
otherFeatures = []
tree = ET.parse(transposonCoordToFeatureDictFile)
root = tree.getroot()
importedCoordsList = root.findall('coord')
for coord in importedCoordsList:
coordinate = int(coord.attrib['coord'])
loci = coord.findall('locus')
importedCoordsKeys = transposonCoordToFeatureDict.keys()
if coordinate not in importedCoordsKeys:
transposonCoordToFeatureDict[coordinate] = []
for locus in loci:
locusName = locus.attrib['locus']
essentiality = locus.attrib['essentiality']
transposonCoordToFeatureDict[coordinate].append(locusName)
if essentiality == 'Dispensable':
hittableTransposonCoords.append(coordinate)
hittableFeatures.append(locusName)
elif essentiality == 'Essential':
notHittableFeatures.append(locusName)
notHittableTransposonCoords.append(coordinate)
else:
otherFeatures.append(locusName)
print(locusName)
hittableFeatures = unique(hittableFeatures)
hittableTransposonCoords = unique(hittableTransposonCoords)
notHittableFeatures = unique(notHittableFeatures)
otherFeatures = unique(otherFeatures)
intersection = intersect1d(hittableFeatures, notHittableFeatures)
# Simulate a number of picking runs
featuresHitAtLeastOnceTrialsArray = []
i = 0
while i < numberOfTrials:
featuresHitAtLeastOnceVersusMutant = \
SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \
transposonCoordToFeatureDict, maxMutants)
featuresHitAtLeastOnceTrialsArray.append(featuresHitAtLeastOnceVersusMutant)
i += 1
# Collect together then data from the picking runs for calculation of mean and standard
# deviation of number of hits picked
i = 0
collectedFeatureHitCountArray = []
while i < len(featuresHitAtLeastOnceTrialsArray[0]):
collectedFeatureHitCountArray.append([])
i += 1
i = 0
while i < len(collectedFeatureHitCountArray):
j = 0
while j < len(featuresHitAtLeastOnceTrialsArray):
collectedFeatureHitCountArray[i].append(featuresHitAtLeastOnceTrialsArray[j][i])
j += 1
i += 1
averageFeatureHitCount = []
sdFeatureHitCount = []
featureHitCountUpperBound = []
featureHitCountLowerBound = []
# Calculate the mean and standard deviation of the number of unique features hit at each pick
# from the trials
i = 0
while i < len(collectedFeatureHitCountArray):
averageFeatureHitCount.append(mean(collectedFeatureHitCountArray[i]))
sdFeatureHitCount.append(std(collectedFeatureHitCountArray[i]))
featureHitCountUpperBound.append(averageFeatureHitCount[i] + sdFeatureHitCount[i])
featureHitCountLowerBound.append(averageFeatureHitCount[i] - sdFeatureHitCount[i])
i += 1
# Prepare an x axis (the number of mutants picked) for the output
iAxis = arange(1, maxMutants+1, 1)
noUniqHittableFeatures = len(hittableFeatures)
return [iAxis, averageFeatureHitCount, sdFeatureHitCount, featureHitCountUpperBound, \
featureHitCountLowerBound, noUniqHittableFeatures ]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def PoissonEstimateOfGenesHit(iAxis, noUniqHittableFeatures):
from numpy import exp, array, float
uniqueGenesHit = []
i = 0
while i < len(iAxis):
ans = noUniqHittableFeatures*(1-exp(-iAxis[i]/noUniqHittableFeatures))
uniqueGenesHit.append(ans)
i += 1
uniqueGenesHit = array(uniqueGenesHit, float)
return uniqueGenesHit
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def FindATandTAPositions2(genomeFile, format='genbank'):
# Does the same thing as FindATandTAPositions but can work with a GenBank or a Fasta file, \
# so you only need one file format
import re
from pdb import set_trace
if format == 'genbank':
sequence = ImportGenBankSequence(genomeFile)
elif format == 'fasta':
sequence = ImportFastaSequence(genomeFile)
ATandTAPositions = []
atRegex = re.compile('(at|ta)', re.IGNORECASE)
# set_trace()
i = 0
while i < len(sequence) - 1:
atMatch = atRegex.match(sequence[i:i+2])
if atMatch != None:
ATandTAPositions.append(i+1)
i += 1
return [ATandTAPositions, sequence]
# ------------------------------------------------------------------------------------------------ #
|
[
"xml.etree.ElementTree.parse",
"numpy.std",
"scipy.intersect1d",
"numpy.mean",
"numpy.array",
"numpy.arange",
"numpy.exp",
"numpy.random.choice",
"pdb.set_trace",
"scipy.unique",
"re.compile"
] |
[((3844, 3886), 'xml.etree.ElementTree.parse', 'ET.parse', (['transposonCoordToFeatureDictFile'], {}), '(transposonCoordToFeatureDictFile)\n', (3852, 3886), True, 'import xml.etree.ElementTree as ET\n'), ((4733, 4757), 'scipy.unique', 'unique', (['hittableFeatures'], {}), '(hittableFeatures)\n', (4739, 4757), False, 'from scipy import unique, intersect1d\n'), ((4786, 4818), 'scipy.unique', 'unique', (['hittableTransposonCoords'], {}), '(hittableTransposonCoords)\n', (4792, 4818), False, 'from scipy import unique, intersect1d\n'), ((4842, 4869), 'scipy.unique', 'unique', (['notHittableFeatures'], {}), '(notHittableFeatures)\n', (4848, 4869), False, 'from scipy import unique, intersect1d\n'), ((4887, 4908), 'scipy.unique', 'unique', (['otherFeatures'], {}), '(otherFeatures)\n', (4893, 4908), False, 'from scipy import unique, intersect1d\n'), ((4927, 4977), 'scipy.intersect1d', 'intersect1d', (['hittableFeatures', 'notHittableFeatures'], {}), '(hittableFeatures, notHittableFeatures)\n', (4938, 4977), False, 'from scipy import unique, intersect1d\n'), ((6541, 6569), 'numpy.arange', 'arange', (['(1)', '(maxMutants + 1)', '(1)'], {}), '(1, maxMutants + 1, 1)\n', (6547, 6569), False, 'from numpy import mean, std, arange\n'), ((7252, 7280), 'numpy.array', 'array', (['uniqueGenesHit', 'float'], {}), '(uniqueGenesHit, float)\n', (7257, 7280), False, 'from numpy import exp, array, float\n'), ((7924, 7960), 're.compile', 're.compile', (['"""(at|ta)"""', 're.IGNORECASE'], {}), "('(at|ta)', re.IGNORECASE)\n", (7934, 7960), False, 'import re\n'), ((2451, 2483), 'numpy.random.choice', 'choice', (['hittableTransposonCoords'], {}), '(hittableTransposonCoords)\n', (2457, 2483), False, 'from numpy.random import choice\n'), ((6174, 6212), 'numpy.mean', 'mean', (['collectedFeatureHitCountArray[i]'], {}), '(collectedFeatureHitCountArray[i])\n', (6178, 6212), False, 'from numpy import mean, std, arange\n'), ((6241, 6278), 'numpy.std', 'std', (['collectedFeatureHitCountArray[i]'], {}), '(collectedFeatureHitCountArray[i])\n', (6244, 6278), False, 'from numpy import mean, std, arange\n'), ((7154, 7193), 'numpy.exp', 'exp', (['(-iAxis[i] / noUniqHittableFeatures)'], {}), '(-iAxis[i] / noUniqHittableFeatures)\n', (7157, 7193), False, 'from numpy import exp, array, float\n'), ((2923, 2938), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (2936, 2938), False, 'import pdb\n')]
|
from __future__ import print_function
import minpy.numpy as mp
import numpy as np
import minpy.dispatch.policy as policy
from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm
import time
# mp.set_policy(policy.OnlyNumPyPolicy())
def test_autograd():
@convert_args
def minpy_rnn_step_forward(x, prev_h, Wx, Wh, b):
next_h = mp.tanh(x.dot(Wx) + prev_h.dot(Wh) + b)
return next_h
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
def rnn_step_forward(x, prev_h, Wx, Wh, b):
next_h = np.tanh(prev_h.dot(Wh) + x.dot(Wx) + b)
cache = next_h, prev_h, x, Wx, Wh
return next_h, cache
def rnn_step_backward(dnext_h, cache):
dx, dprev_h, dWx, dWh, db = None, None, None, None, None
# Load values from rnn_step_forward
next_h, prev_h, x, Wx, Wh = cache
# Gradients of loss wrt tanh
dtanh = dnext_h * (1 - next_h * next_h) # (N, H)
# Gradients of loss wrt x
dx = dtanh.dot(Wx.T)
# Gradients of loss wrt prev_h
dprev_h = dtanh.dot(Wh.T)
# Gradients of loss wrt Wx
dWx = x.T.dot(dtanh) # (D, H)
# Gradients of loss wrt Wh
dWh = prev_h.T.dot(dtanh)
# Gradients of loss wrt b. Note we broadcast b in practice. Thus result of
# matrix ops are just sum over columns
db = dtanh.sum(axis=0) # == np.ones([N, 1]).T.dot(dtanh)[0, :]
return dx, dprev_h, dWx, dWh, db
# preparation
N, D, H = 4, 5, 6
x = np.random.randn(N, D)
h = np.random.randn(N, H)
Wx = np.random.randn(D, H)
Wh = np.random.randn(H, H)
b = np.random.randn(H)
out, cache = rnn_step_forward(x, h, Wx, Wh, b)
dnext_h = np.random.randn(*out.shape)
# test MinPy
start = time.time()
rnn_step_forward_loss = lambda x, h, Wx, Wh, b, dnext_h: minpy_rnn_step_forward(x, h, Wx, Wh, b) * nm(dnext_h)
grad_loss_function = return_numpy(grad_and_loss(rnn_step_forward_loss, range(5)))
grad_arrays = grad_loss_function(x, h, Wx, Wh, b, dnext_h)[0]
end = time.time()
print("MinPy total time elapsed:", end - start)
# test NumPy
start = time.time()
out, cache = rnn_step_forward(x, h, Wx, Wh, b)
dx, dprev_h, dWx, dWh, db = rnn_step_backward(dnext_h, cache)
out *= dnext_h # to agree with MinPy calculation
end = time.time()
print("NumPy total time elapsed:", end - start)
print()
print("Result Check:")
print('dx error: ', rel_error(dx, grad_arrays[0]))
print('dprev_h error: ', rel_error(dprev_h, grad_arrays[1]))
print('dWx error: ', rel_error(dWx, grad_arrays[2]))
print('dWh error: ', rel_error(dWh, grad_arrays[3]))
print('db error: ', rel_error(db, grad_arrays[4]))
def test_zero_input_grad():
def foo1(x):
return 1
bar1 = grad(foo1)
assert bar1(0) == 0.0
def test_reduction():
def test_sum():
x_np = np.array([[1, 2], [3, 4], [5, 6]])
x_grad = np.array([[1, 1], [1, 1], [1, 1]])
def red1(x):
return mp.sum(x)
def red2(x):
return mp.sum(x, axis=0)
def red3(x):
return mp.sum(x, axis=0, keepdims=True)
grad1 = grad(red1)
assert np.all(grad1(x_np).asnumpy() == x_grad)
grad2 = grad(red2)
assert np.all(grad2(x_np).asnumpy() == x_grad)
grad3 = grad(red3)
assert np.all(grad3(x_np).asnumpy() == x_grad)
def test_max():
x_np = np.array([[1, 2], [2, 1], [0, 0]])
x_grad1 = np.array([[0, 1], [1, 0], [0, 0]])
x_grad2 = np.array([[0, 1], [1, 0], [1, 1]])
x_grad3 = np.array([[0, 1], [1, 0], [0, 0]])
def red1(x):
return mp.max(x)
def red2(x):
return mp.max(x, axis=1)
def red3(x):
return mp.max(x, axis=1, keepdims=True)
def red4(x):
return mp.max(x, axis=0)
def red5(x):
return mp.max(x, axis=0, keepdims=True)
grad1 = grad(red1)
assert np.all(grad1(x_np).asnumpy() == x_grad1)
grad2 = grad(red2)
assert np.all(grad2(x_np).asnumpy() == x_grad2)
grad3 = grad(red3)
assert np.all(grad3(x_np).asnumpy() == x_grad2)
grad4 = grad(red4)
assert np.all(grad4(x_np).asnumpy() == x_grad3)
grad5 = grad(red5)
assert np.all(grad5(x_np).asnumpy() == x_grad3)
def test_min():
x_np = np.array([[1, 2], [2, 1], [0, 0]])
x_grad1 = np.array([[0, 0], [0, 0], [1, 1]])
x_grad2 = np.array([[1, 0], [0, 1], [1, 1]])
x_grad3 = np.array([[0, 0], [0, 0], [1, 1]])
def red1(x):
return mp.min(x)
def red2(x):
return mp.min(x, axis=1)
def red3(x):
return mp.min(x, axis=1, keepdims=True)
def red4(x):
return mp.min(x, axis=0)
def red5(x):
return mp.min(x, axis=0, keepdims=True)
grad1 = grad(red1)
assert np.all(grad1(x_np).asnumpy() == x_grad1)
grad2 = grad(red2)
assert np.all(grad2(x_np).asnumpy() == x_grad2)
grad3 = grad(red3)
assert np.all(grad3(x_np).asnumpy() == x_grad2)
grad4 = grad(red4)
assert np.all(grad4(x_np).asnumpy() == x_grad3)
grad5 = grad(red5)
assert np.all(grad5(x_np).asnumpy() == x_grad3)
test_sum()
test_max()
test_min()
if __name__ == "__main__":
test_autograd()
test_zero_input_grad()
test_reduction()
|
[
"numpy.abs",
"minpy.numpy.sum",
"numpy.random.randn",
"minpy.numpy.max",
"time.time",
"minpy.core.numpy_to_minpy",
"numpy.array",
"minpy.core.grad",
"minpy.numpy.min"
] |
[((1681, 1702), 'numpy.random.randn', 'np.random.randn', (['N', 'D'], {}), '(N, D)\n', (1696, 1702), True, 'import numpy as np\n'), ((1711, 1732), 'numpy.random.randn', 'np.random.randn', (['N', 'H'], {}), '(N, H)\n', (1726, 1732), True, 'import numpy as np\n'), ((1742, 1763), 'numpy.random.randn', 'np.random.randn', (['D', 'H'], {}), '(D, H)\n', (1757, 1763), True, 'import numpy as np\n'), ((1773, 1794), 'numpy.random.randn', 'np.random.randn', (['H', 'H'], {}), '(H, H)\n', (1788, 1794), True, 'import numpy as np\n'), ((1803, 1821), 'numpy.random.randn', 'np.random.randn', (['H'], {}), '(H)\n', (1818, 1821), True, 'import numpy as np\n'), ((1887, 1914), 'numpy.random.randn', 'np.random.randn', (['*out.shape'], {}), '(*out.shape)\n', (1902, 1914), True, 'import numpy as np\n'), ((1949, 1960), 'time.time', 'time.time', ([], {}), '()\n', (1958, 1960), False, 'import time\n'), ((2238, 2249), 'time.time', 'time.time', ([], {}), '()\n', (2247, 2249), False, 'import time\n'), ((2336, 2347), 'time.time', 'time.time', ([], {}), '()\n', (2345, 2347), False, 'import time\n'), ((2528, 2539), 'time.time', 'time.time', ([], {}), '()\n', (2537, 2539), False, 'import time\n'), ((2999, 3009), 'minpy.core.grad', 'grad', (['foo1'], {}), '(foo1)\n', (3003, 3009), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((3094, 3128), 'numpy.array', 'np.array', (['[[1, 2], [3, 4], [5, 6]]'], {}), '([[1, 2], [3, 4], [5, 6]])\n', (3102, 3128), True, 'import numpy as np\n'), ((3146, 3180), 'numpy.array', 'np.array', (['[[1, 1], [1, 1], [1, 1]]'], {}), '([[1, 1], [1, 1], [1, 1]])\n', (3154, 3180), True, 'import numpy as np\n'), ((3378, 3388), 'minpy.core.grad', 'grad', (['red1'], {}), '(red1)\n', (3382, 3388), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((3460, 3470), 'minpy.core.grad', 'grad', (['red2'], {}), '(red2)\n', (3464, 3470), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((3542, 3552), 'minpy.core.grad', 'grad', (['red3'], {}), '(red3)\n', (3546, 3552), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((3644, 3678), 'numpy.array', 'np.array', (['[[1, 2], [2, 1], [0, 0]]'], {}), '([[1, 2], [2, 1], [0, 0]])\n', (3652, 3678), True, 'import numpy as np\n'), ((3697, 3731), 'numpy.array', 'np.array', (['[[0, 1], [1, 0], [0, 0]]'], {}), '([[0, 1], [1, 0], [0, 0]])\n', (3705, 3731), True, 'import numpy as np\n'), ((3750, 3784), 'numpy.array', 'np.array', (['[[0, 1], [1, 0], [1, 1]]'], {}), '([[0, 1], [1, 0], [1, 1]])\n', (3758, 3784), True, 'import numpy as np\n'), ((3803, 3837), 'numpy.array', 'np.array', (['[[0, 1], [1, 0], [0, 0]]'], {}), '([[0, 1], [1, 0], [0, 0]])\n', (3811, 3837), True, 'import numpy as np\n'), ((4166, 4176), 'minpy.core.grad', 'grad', (['red1'], {}), '(red1)\n', (4170, 4176), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((4249, 4259), 'minpy.core.grad', 'grad', (['red2'], {}), '(red2)\n', (4253, 4259), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((4332, 4342), 'minpy.core.grad', 'grad', (['red3'], {}), '(red3)\n', (4336, 4342), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((4415, 4425), 'minpy.core.grad', 'grad', (['red4'], {}), '(red4)\n', (4419, 4425), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((4498, 4508), 'minpy.core.grad', 'grad', (['red5'], {}), '(red5)\n', (4502, 4508), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((4601, 4635), 'numpy.array', 'np.array', (['[[1, 2], [2, 1], [0, 0]]'], {}), '([[1, 2], [2, 1], [0, 0]])\n', (4609, 4635), True, 'import numpy as np\n'), ((4654, 4688), 'numpy.array', 'np.array', (['[[0, 0], [0, 0], [1, 1]]'], {}), '([[0, 0], [0, 0], [1, 1]])\n', (4662, 4688), True, 'import numpy as np\n'), ((4707, 4741), 'numpy.array', 'np.array', (['[[1, 0], [0, 1], [1, 1]]'], {}), '([[1, 0], [0, 1], [1, 1]])\n', (4715, 4741), True, 'import numpy as np\n'), ((4760, 4794), 'numpy.array', 'np.array', (['[[0, 0], [0, 0], [1, 1]]'], {}), '([[0, 0], [0, 0], [1, 1]])\n', (4768, 4794), True, 'import numpy as np\n'), ((5123, 5133), 'minpy.core.grad', 'grad', (['red1'], {}), '(red1)\n', (5127, 5133), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((5206, 5216), 'minpy.core.grad', 'grad', (['red2'], {}), '(red2)\n', (5210, 5216), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((5289, 5299), 'minpy.core.grad', 'grad', (['red3'], {}), '(red3)\n', (5293, 5299), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((5372, 5382), 'minpy.core.grad', 'grad', (['red4'], {}), '(red4)\n', (5376, 5382), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((5455, 5465), 'minpy.core.grad', 'grad', (['red5'], {}), '(red5)\n', (5459, 5465), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((2064, 2075), 'minpy.core.numpy_to_minpy', 'nm', (['dnext_h'], {}), '(dnext_h)\n', (2066, 2075), True, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((3221, 3230), 'minpy.numpy.sum', 'mp.sum', (['x'], {}), '(x)\n', (3227, 3230), True, 'import minpy.numpy as mp\n'), ((3271, 3288), 'minpy.numpy.sum', 'mp.sum', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (3277, 3288), True, 'import minpy.numpy as mp\n'), ((3329, 3361), 'minpy.numpy.sum', 'mp.sum', (['x'], {'axis': '(0)', 'keepdims': '(True)'}), '(x, axis=0, keepdims=True)\n', (3335, 3361), True, 'import minpy.numpy as mp\n'), ((3878, 3887), 'minpy.numpy.max', 'mp.max', (['x'], {}), '(x)\n', (3884, 3887), True, 'import minpy.numpy as mp\n'), ((3928, 3945), 'minpy.numpy.max', 'mp.max', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (3934, 3945), True, 'import minpy.numpy as mp\n'), ((3986, 4018), 'minpy.numpy.max', 'mp.max', (['x'], {'axis': '(1)', 'keepdims': '(True)'}), '(x, axis=1, keepdims=True)\n', (3992, 4018), True, 'import minpy.numpy as mp\n'), ((4059, 4076), 'minpy.numpy.max', 'mp.max', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (4065, 4076), True, 'import minpy.numpy as mp\n'), ((4117, 4149), 'minpy.numpy.max', 'mp.max', (['x'], {'axis': '(0)', 'keepdims': '(True)'}), '(x, axis=0, keepdims=True)\n', (4123, 4149), True, 'import minpy.numpy as mp\n'), ((4835, 4844), 'minpy.numpy.min', 'mp.min', (['x'], {}), '(x)\n', (4841, 4844), True, 'import minpy.numpy as mp\n'), ((4885, 4902), 'minpy.numpy.min', 'mp.min', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (4891, 4902), True, 'import minpy.numpy as mp\n'), ((4943, 4975), 'minpy.numpy.min', 'mp.min', (['x'], {'axis': '(1)', 'keepdims': '(True)'}), '(x, axis=1, keepdims=True)\n', (4949, 4975), True, 'import minpy.numpy as mp\n'), ((5016, 5033), 'minpy.numpy.min', 'mp.min', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (5022, 5033), True, 'import minpy.numpy as mp\n'), ((5074, 5106), 'minpy.numpy.min', 'mp.min', (['x'], {'axis': '(0)', 'keepdims': '(True)'}), '(x, axis=0, keepdims=True)\n', (5080, 5106), True, 'import minpy.numpy as mp\n'), ((557, 570), 'numpy.abs', 'np.abs', (['(x - y)'], {}), '(x - y)\n', (563, 570), True, 'import numpy as np\n'), ((591, 600), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (597, 600), True, 'import numpy as np\n'), ((603, 612), 'numpy.abs', 'np.abs', (['y'], {}), '(y)\n', (609, 612), True, 'import numpy as np\n')]
|
import numpy as np
from krikos.nn.layer import BatchNorm, BatchNorm2d, Dropout
class Network(object):
def __init__(self):
super(Network, self).__init__()
self.diff = (BatchNorm, BatchNorm2d, Dropout)
def train(self, input, target):
raise NotImplementedError
def eval(self, input):
raise NotImplementedError
class Sequential(Network):
def __init__(self, layers, loss, lr, regularization=None):
super(Sequential, self).__init__()
self.layers = layers
self.loss = loss
self.lr = lr
self.regularization = regularization
def train(self, input, target):
layers = self.layers
loss = self.loss
regularization = self.regularization
l = 0
for layer in layers:
if isinstance(layer, self.diff):
layer.mode = "train"
input = layer.forward(input)
if regularization is not None:
for _, param in layer.params.items():
l += regularization.forward(param)
l += loss.forward(input, target)
dout = loss.backward()
for layer in reversed(layers):
dout = layer.backward(dout)
for param, grad in layer.grads.items():
if regularization is not None:
grad += regularization.backward(layer.params[param])
layer.params[param] -= self.lr * grad
return np.argmax(input, axis=1), l
def eval(self, input):
layers = self.layers
for layer in layers:
if isinstance(layer, self.diff):
layer.mode = "test"
input = layer.forward(input)
return np.argmax(input, axis=1)
|
[
"numpy.argmax"
] |
[((1717, 1741), 'numpy.argmax', 'np.argmax', (['input'], {'axis': '(1)'}), '(input, axis=1)\n', (1726, 1741), True, 'import numpy as np\n'), ((1463, 1487), 'numpy.argmax', 'np.argmax', (['input'], {'axis': '(1)'}), '(input, axis=1)\n', (1472, 1487), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
l: list = []
img = None
img_cp = None
def draw_circle(event, x, y, flags, param):
global l
global img
global img_cp
if event == cv2.EVENT_LBUTTONDOWN:
cv2.circle(img_cp, (x, y), 5, (255, 0, 0), -1)
l.append([x, y])
cv2.imshow('image', img_cp)
if len(l) == 4:
print(l)
pts1 = np.float32(l)
pts2 = np.float32([[0, 0], [300, 0], [0, 300], [300, 300]])
M = cv2.getPerspectiveTransform(pts1, pts2)
dst = cv2.warpPerspective(img, M, (300, 300))
cv2.imshow('Original image', img_cp)
cv2.imshow('Final', dst)
img_cp = img.copy()
l.clear()
def road_straight():
global img
global img_cp
img = cv2.imread('road.jpg')
img = cv2.resize(img, dsize=(1000, 1000))
img = cv2.resize(img, (0, 0), fx=0.75, fy=0.75, interpolation=cv2.INTER_NEAREST)
img_cp = img.copy()
cv2.namedWindow('image')
cv2.imshow('image', img)
cv2.setMouseCallback('image', draw_circle)
cv2.waitKey()
cv2.destroyAllWindows()
return
road_straight()
|
[
"cv2.resize",
"cv2.warpPerspective",
"cv2.circle",
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.float32",
"cv2.getPerspectiveTransform",
"cv2.imread",
"cv2.setMouseCallback",
"cv2.imshow",
"cv2.namedWindow"
] |
[((787, 809), 'cv2.imread', 'cv2.imread', (['"""road.jpg"""'], {}), "('road.jpg')\n", (797, 809), False, 'import cv2\n'), ((820, 855), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(1000, 1000)'}), '(img, dsize=(1000, 1000))\n', (830, 855), False, 'import cv2\n'), ((866, 940), 'cv2.resize', 'cv2.resize', (['img', '(0, 0)'], {'fx': '(0.75)', 'fy': '(0.75)', 'interpolation': 'cv2.INTER_NEAREST'}), '(img, (0, 0), fx=0.75, fy=0.75, interpolation=cv2.INTER_NEAREST)\n', (876, 940), False, 'import cv2\n'), ((969, 993), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""'], {}), "('image')\n", (984, 993), False, 'import cv2\n'), ((998, 1022), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (1008, 1022), False, 'import cv2\n'), ((1027, 1069), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""image"""', 'draw_circle'], {}), "('image', draw_circle)\n", (1047, 1069), False, 'import cv2\n'), ((1075, 1088), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (1086, 1088), False, 'import cv2\n'), ((1093, 1116), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1114, 1116), False, 'import cv2\n'), ((245, 291), 'cv2.circle', 'cv2.circle', (['img_cp', '(x, y)', '(5)', '(255, 0, 0)', '(-1)'], {}), '(img_cp, (x, y), 5, (255, 0, 0), -1)\n', (255, 291), False, 'import cv2\n'), ((325, 352), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img_cp'], {}), "('image', img_cp)\n", (335, 352), False, 'import cv2\n'), ((407, 420), 'numpy.float32', 'np.float32', (['l'], {}), '(l)\n', (417, 420), True, 'import numpy as np\n'), ((436, 488), 'numpy.float32', 'np.float32', (['[[0, 0], [300, 0], [0, 300], [300, 300]]'], {}), '([[0, 0], [300, 0], [0, 300], [300, 300]])\n', (446, 488), True, 'import numpy as np\n'), ((502, 541), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (529, 541), False, 'import cv2\n'), ((556, 595), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'M', '(300, 300)'], {}), '(img, M, (300, 300))\n', (575, 595), False, 'import cv2\n'), ((605, 641), 'cv2.imshow', 'cv2.imshow', (['"""Original image"""', 'img_cp'], {}), "('Original image', img_cp)\n", (615, 641), False, 'import cv2\n'), ((650, 674), 'cv2.imshow', 'cv2.imshow', (['"""Final"""', 'dst'], {}), "('Final', dst)\n", (660, 674), False, 'import cv2\n')]
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.patches import FancyBboxPatch
from matplotlib.colors import LinearSegmentedColormap
from mpl_toolkits.basemap import Basemap
import numpy as np
# Suppress matplotlib warnings
np.warnings.filterwarnings('ignore')
import xarray as xr
import cmocean
from pathlib import Path
import _pickle as pickle
import os
import ship_mapper as sm
import urllib.request
import netCDF4
def map_density(info, file_in=None, cmap='Default', sidebar=False,
to_screen=True, save=True,
filename_out='auto',filedir_out='auto'):
'''
Plots a map using a gridded (or merged) file
Arguments:
info (info): ``info`` object containing metadata
Keyword Arguments:
file_in (str): Gridded or merged file to map. If ``None`` it looks for
``merged_grid.nc`` in the `\merged` directory
cmap (str): Colormap to use
sidebar (bool): If ``True``, includes side panel with metadata
to_screen (bool): If ``True``, a plot is printed to screen
save (bool): If ``True`` a ``.png`` figure is saved to hardrive
filename_out (str): Name of produced figure.
If ``auto`` then name is ``info.run_name + '__' + file_in + '.png'``
filedir_out (str): Directory where figure is saved.
If ``auto`` then output directory is ``info.dirs.pngs``
Returns:
Basemap object
'''
print('map_density ------------------------------------------------------')
# Load data
if file_in == None:
file_in = os.path.join(str(info.dirs.merged_grid),'merged_grid.nc')
print(file_in)
d = xr.open_dataset(file_in)
# Define boundaries
if info.grid.minlat == None or info.grid.maxlat == None or info.grid.minlon == None or info.grid.maxlon == None:
minlat = d['lat'].values.min()
maxlat = d['lat'].values.max()
minlon = d['lon'].values.min()
maxlon = d['lon'].values.max()
else:
minlat = d.attrs['minlat']
maxlat = d.attrs['maxlat']
minlon = d.attrs['minlon']
maxlon = d.attrs['maxlon']
basemap_file = info.dirs.basemap
print('Basemap file: ' + basemap_file)
# Check for basemap.p and, if doesn;t exist, make it
if not os.path.exists(basemap_file):
m = sm.make_basemap(info,info.dirs.project_path,[minlat,maxlat,minlon,maxlon])
else:
print('Found basemap...')
m = pickle.load(open(basemap_file,'rb'))
# Create grid for mapping
lons_grid, lats_grid = np.meshgrid(d['lon'].values,d['lat'].values)
xx,yy = m(lons_grid, lats_grid)
H = d['ship_density'].values
# Rotate and flip H... ----------------------------------------------------------------------------
H = np.rot90(H)
H = np.flipud(H)
# Mask zeros
d.attrs['mask_below'] = info.maps.mask_below
Hmasked = np.ma.masked_where(H<=d.attrs['mask_below'],H)
# Set vman and vmin
print('Min: ' + str(np.min(Hmasked)))
print('Max: ' + str(np.max(Hmasked)))
print('Mean: ' + str(np.nanmean(Hmasked)))
print('Std: ' + str(Hmasked.std()))
if info.maps.cbarmax == 'auto':
# vmax = (np.median(Hmasked)) + (4*Hmasked.std())
vmax = (np.max(Hmasked)) - (2*Hmasked.std())
elif info.maps.cbarmax != None:
vmax = info.maps.cbarmax
else:
vmax = None
if info.maps.cbarmin == 'auto':
# vmin = (np.median(Hmasked)) - (4*Hmasked.std())
alat = (d.attrs['maxlat'] - d.attrs['minlat'])/2
cellsize = sm.degrees_to_meters(d.attrs['bin_size'], alat)
# max_speed = 616.66 # m/min ...roughly 20 knots
max_speed = 316.66 # m/min ...roughly 20 knots
vmin = cellsize / max_speed
elif info.maps.cbarmin != None:
vmin = info.maps.cbarmin
else:
vmin = None
# Log H for better display
Hmasked = np.log10(Hmasked)
if vmin != None:
vmin = np.log10(vmin)
if vmax != None:
vmax = np.log10(vmax)
# Make colormap
fig = plt.gcf()
ax = plt.gca()
if cmap == 'Default':
cmapcolor = load_my_cmap('my_cmap_amber2red')
elif cmap == 'red2black':
cmapcolor = load_my_cmap('my_cmap_red2black')
else:
cmapcolor =plt.get_cmap(cmap)
cs = m.pcolor(xx,yy,Hmasked, cmap=cmapcolor, zorder=10, vmin=vmin, vmax=vmax)
#scalebar
sblon = minlon + ((maxlon-minlon)/10)
sblat = minlat + ((maxlat-minlat)/20)
m.drawmapscale(sblon, sblat,
minlon, minlat,
info.maps.scalebar_km, barstyle='fancy',
units='km', fontsize=8,
fontcolor='#808080',
fillcolor1 = '#cccccc',
fillcolor2 = '#a6a6a6',
yoffset = (0.01*(m.ymax-m.ymin)),
labelstyle='simple',zorder=60)
if not sidebar:
cbaxes2 = fig.add_axes([0.70, 0.18, 0.2, 0.03],zorder=60)
cbar = plt.colorbar(extend='both', cax = cbaxes2, orientation='horizontal')
# Change colorbar labels for easier interpreting
label_values = cbar._tick_data_values
log_label_values = np.round(10 ** label_values,decimals=0)
labels = []
for log_label_value in log_label_values:
labels.append(str(int(log_label_value)))
cbar.ax.set_yticklabels(labels)
cbar.ax.set_xlabel(d.attrs['units'])
if sidebar:
text1, text2, text3, text4 = make_legend_text(info,d.attrs)
ax2 = plt.subplot2grid((1,24),(0,0),colspan=4)
# Turn off tick labels
ax2.get_xaxis().set_visible(False)
ax2.get_yaxis().set_visible(False)
ax2.add_patch(FancyBboxPatch((0,0),
width=1, height=1, clip_on=False,
boxstyle="square,pad=0", zorder=3,
facecolor='#e6e6e6', alpha=1.0,
edgecolor='#a6a6a6',
transform=plt.gca().transAxes))
plt.text(0.15, 0.99, text1,
verticalalignment='top',
horizontalalignment='left',
weight='bold',
size=10,
color= '#737373',
transform=plt.gca().transAxes)
plt.text(0.02, 0.83, text2,
horizontalalignment='left',
verticalalignment='top',
size=9,
color= '#808080',
transform=plt.gca().transAxes)
plt.text(0.02, 0.145, text3,
horizontalalignment='left',
verticalalignment='top',
size=7,
color= '#808080',
transform=plt.gca().transAxes)
plt.text(0.02, 0.25, text4,
style='italic',
horizontalalignment='left',
verticalalignment='top',
size=8,
color= '#808080',
transform=plt.gca().transAxes)
cbaxes2 = fig.add_axes([0.019, 0.9, 0.15, 0.02],zorder=60)
cbar = plt.colorbar(extend='both', cax = cbaxes2, orientation='horizontal')
cbar.ax.tick_params(labelsize=8, labelcolor='#808080')
# Change colorbar labels for easier interpreting
label_values = cbar._tick_data_values
# print("values")
# print(label_values)
log_label_values = np.round(10 ** label_values,decimals=0)
# print(log_label_values)
labels = []
for log_label_value in log_label_values:
labels.append(str(int(log_label_value)))
cbar.ax.set_xticklabels(labels)
cbar.ax.set_xlabel(d.attrs['units'], size=9, color='#808080')
# TODO: maybe delete this?
# mng = plt.get_current_fig_manager()
# mng.frame.Maximize(True)
#
# fig.tight_layout()
plt.show()
# Save map as png
if save:
if filedir_out == 'auto':
filedir = str(info.dirs.pngs)
else:
filedir = filedir_out
if filename_out == 'auto':
filename = info.run_name + '__' + sm.get_filename_from_fullpath(file_in) + '.png'
else:
filename = filename_out
sm.checkDir(filedir)
plt.savefig(os.path.join(filedir,filename), dpi=300)
# Close netCDF file
d.close()
if to_screen == False:
plt.close()
return
def make_legend_text(info,md):
'''
Makes text for legend in left block of map
:param info info: ``info`` object containing metadata
:return: text for legend
'''
import datetime
alat = (md['maxlat'] - md['minlat'])/2
text1 = 'VESSEL DENSITY HEATMAP'
# print(info)
# --------------------------------------------------------
text2 = ('Unit description: ' + md['unit_description'] + '\n\n' +
'Data source: ' + md['data_source'] + '\n\n' +
'Data source description:\n' + md['data_description'] + '\n\n' +
'Time range: \n' + md['startdate'][0:-3] + ' to ' + md['enddate'][0:-3] + '\n\n' +
'Included speeds: ' + info.sidebar.included_speeds + '\n' +
'Included vessels: ' + info.sidebar.included_vessel_types + '\n\n' +
'Grid size: ' + str(md['bin_size']) + ' degrees (~' + str(int(round(sm.degrees_to_meters(md['bin_size'], alat))))+ ' m)\n' +
'EPGS code: ' + md['epsg_code'] + '\n' +
'Interpolation: ' + md['interpolation'] + '\n' +
'Interpolation threshold: ' + str(md['interp_threshold']) + ' knots\n' +
'Time bin: ' + str(round(md['time_bin']*1440,1)) + ' minutes\n' +
'Mask below: ' + str(md['mask_below']) + ' vessels per grid'
)
text3 = ('Creation date: ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n' +
'Creation script: ' + info.run_name + '.py\n' +
'Software: ship mapper v0.1\n\n' +
'Created by:\n' +
'Oceans and Coastal Management Division\n' +
'Ecosystem Management Branch\n' +
'Fisheries and Oceans Canada – Maritimes Region\n' +
'Bedford Institute of Oceanography\n' +
'PO Box 1006, Dartmouth, NS, Canada, B2Y 4A2'
)
text4 = ('---------------------------------------------------------------\n' +
'WARNING: This is a preliminary data product.\n' +
'We cannot guarantee the validity, accuracy, \n' +
'or quality of this product. Data is provided\n' +
'on an "AS IS" basis. USE AT YOUR OWN RISK.\n' +
'---------------------------------------------------------------\n'
)
return text1, text2, text3, text4
def map_dots(info, file_in, sidebar=False, save=True):
'''
Creates a map of "pings" rather than gridded density
Arguments:
info (info): ``info`` object containing metadata
Keyword Arguments:
file_in (str): Gridded or merged file to map. If ``None`` it looks for
``merged_grid.nc`` in the `\merged` directory
sidebar (bool): If ``True``, includes side panel with metadata
save (bool): If ``True`` a ``.png`` figure is saved to hardrive
'''
print('Mapping...')
# -----------------------------------------------------------------------------
d = xr.open_dataset(file_in)
# Define boundaries
if info.grid.minlat == None or info.grid.maxlat == None or info.grid.minlon == None or info.grid.maxlon == None:
minlat = d['lat'].values.min()
maxlat = d['lat'].values.max()
minlon = d['lon'].values.min()
maxlon = d['lon'].values.max()
else:
minlat = info.grid.minlat
maxlat = info.grid.maxlat
minlon = info.grid.minlon
maxlon = info.grid.maxlon
path_to_basemap = info.dirs.project_path / 'ancillary'
print('-----------------------------------------------------')
print('-----------------------------------------------------')
if sidebar:
basemap_file = str(path_to_basemap / 'basemap_sidebar.p')
else:
basemap_file = str(path_to_basemap / 'basemap.p')
if not os.path.exists(basemap_file):
m = sm.make_basemap(info,[minlat,maxlat,minlon,maxlon])
else:
print('Found basemap...')
m = pickle.load(open(basemap_file,'rb'))
x, y = m(d['longitude'].values,d['latitude'].values)
cs = m.scatter(x,y,s=0.1,marker='o',color='r', zorder=10)
#
plt.show()
# # Save map as png
# if save:
# filedir = str(info.dirs.pngs)
# sm.checkDir(filedir)
# filename = info.project_name + '_' + str(info.grid.bin_number) + '.png'
# plt.savefig(os.path.join(filedir,filename), dpi=300)
return
def map_dots_one_ship(info, file_in, Ship_No, save=True):
'''
Creates a map of "pings" (i.e. not gridded density) of only one ship
Arguments:
info (info): ``info`` object containing metadata
Keyword Arguments:
file_in (str): Gridded or merged file to map. If ``None`` it looks for
``merged_grid.nc`` in the `\merged` directory
Ship_No (str): Unique identifier of the ship to plot
save (bool): If ``True`` a ``.png`` figure is saved to hardrive
'''
import pandas as pd
print('Mapping...')
# -----------------------------------------------------------------------------
d = xr.open_dataset(file_in)
# Define boundaries
if info.grid.minlat == None or info.grid.maxlat == None or info.grid.minlon == None or info.grid.maxlon == None:
minlat = d['lat'].values.min()
maxlat = d['lat'].values.max()
minlon = d['lon'].values.min()
maxlon = d['lon'].values.max()
else:
minlat = info.grid.minlat
maxlat = info.grid.maxlat
minlon = info.grid.minlon
maxlon = info.grid.maxlon
path_to_basemap = info.dirs.project_path / 'ancillary'
print('-----------------------------------------------------')
print('-----------------------------------------------------')
# basemap_file = str(path_to_basemap / 'basemap_spots.p')
m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon])
# if not os.path.exists(str(path_to_basemap / 'basemap.p')):
# m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon])
# else:
# print('Found basemap...')
# m = pickle.load(open(basemap_file,'rb'))
indx = ((d['longitude']> minlon) &
(d['longitude']<= maxlon) &
(d['latitude']> minlat) &
(d['latitude']<= maxlat))
filtered_data = d.sel(Dindex=indx)
ship_id = info.ship_id
unis = pd.unique(filtered_data[ship_id].values)
ship = unis[Ship_No]
indxship = (filtered_data[ship_id] == ship)
singleship = filtered_data.sel(Dindex=indxship)
print('Ship id:'+ str(ship))
# print(singleship['longitude'].values)
# print(singleship['latitude'].values)
x, y = m(singleship['longitude'].values,singleship['latitude'].values)
# x, y = m(d['longitude'].values,d['latitude'].values)
cs = m.scatter(x,y,2,marker='o',color='r', zorder=30)
# fig = plt.figure()
# plt.plot(filtered_data['longitude'].values,filtered_data['latitude'].values,'.')
#
plt.show()
# # Save map as png
# if save:
# filedir = str(info.dirs.pngs)
# sm.checkDir(filedir)
# filename = info.project_name + '_' + str(info.grid.bin_number) + '.png'
# plt.savefig(os.path.join(filedir,filename), dpi=300)
return
def define_path_to_map(info, path_to_basemap='auto'):
'''
Figures out where is the .basemap and .grid files
Arguments:
info (info): ``info`` object containing metadata
'''
if path_to_basemap == 'auto':
if info.grid.type == 'one-off':
path_to_map = os.path.join(info.dirs.project_path,info.grid.region,'ancillary')
elif info.grid.type == 'generic':
path_to_map = os.path.abspath(os.path.join(info.dirs.project_path,'ancillary'))
else:
path_to_map = path_to_basemap
return path_to_map
def make_basemap(info,spatial,path_to_basemap='auto', sidebar=False):
'''
Makes a basemap
Arguments:
info (info): ``info`` object containing metadata
spatial (list): List with corners... this will be deprecated soon
Keyword arguments:
path_to_basemap (str): Directory where to save the produced basemap. If ``'auto'``
then path is setup by :func:`~ship_mapper.mapper.define_path_to_map`
sidebar (bool): If ``True`` space for a side panel is added to the basemap
Returns:
A ``.basemap`` and a ``.grid`` files
'''
print('Making basemap...')
# -----------------------------------------------------------------------------
path_to_map = define_path_to_map(info, path_to_basemap=path_to_basemap)
sm.checkDir(str(path_to_map))
minlat = spatial[0]
maxlat = spatial[1]
minlon = spatial[2]
maxlon = spatial[3]
# Create map
m = Basemap(projection='mill', llcrnrlat=minlat,urcrnrlat=maxlat,
llcrnrlon=minlon, urcrnrlon=maxlon,resolution=info.maps.resolution)
# TOPO
# Read data from: http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.html
# using the netCDF output option
# bathymetry_file = str(path_to_map / 'usgsCeSrtm30v6.nc')
bathymetry_file = os.path.join(path_to_map, 'usgsCeSrtm30v6.nc')
if not os.path.isfile(bathymetry_file):
isub = 1
base_url='http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.nc?'
query='topo[(%f):%d:(%f)][(%f):%d:(%f)]' % (maxlat,isub,minlat,minlon,isub,maxlon)
url = base_url+query
# store data in NetCDF file
urllib.request.urlretrieve(url, bathymetry_file)
# open NetCDF data in
nc = netCDF4.Dataset(bathymetry_file)
ncv = nc.variables
lon = ncv['longitude'][:]
lat = ncv['latitude'][:]
lons, lats = np.meshgrid(lon,lat)
topo = ncv['topo'][:,:]
#
fig = plt.figure(figsize=(19,9))
# ax = fig.add_axes([0.05,0.05,0.80,1])
# ax = fig.add_axes([0,0,0.80,1])
# ax = fig.add_axes([0.23,0.035,0.85,0.9])
if sidebar:
ax = plt.subplot2grid((1,24),(0,5),colspan=19)
else:
ax = fig.add_axes([0.05,0.05,0.94,0.94])
TOPOmasked = np.ma.masked_where(topo>0,topo)
cs = m.pcolormesh(lons,lats,TOPOmasked,cmap=load_my_cmap('my_cmap_lightblue'),latlon=True,zorder=5)
# m.drawcoastlines(color='#A27D0C',linewidth=0.5,zorder=25)
# m.fillcontinents(color='#E1E1A0',zorder=23)
m.drawcoastlines(color='#a6a6a6',linewidth=0.5,zorder=25)
m.fillcontinents(color='#e6e6e6',zorder=23)
m.drawmapboundary()
def setcolor(x, color):
for m in x:
for t in x[m][1]:
t.set_color(color)
parallels = np.arange(minlat,maxlat,info.maps.parallels)
# labels = [left,right,top,bottom]
par = m.drawparallels(parallels,labels=[True,False,False,False],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25)
setcolor(par,'#00a3cc')
meridians = np.arange(minlon,maxlon,info.maps.meridians)
mers = m.drawmeridians(meridians,labels=[False,False,False,True],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25)
setcolor(mers,'#00a3cc')
ax = plt.gca()
# ax.axhline(linewidth=4, color="#00a3cc")
# ax.axvline(linewidth=4, color="#00a3cc")
#
ax.spines['top'].set_color('#00a3cc')
ax.spines['right'].set_color('#00a3cc')
ax.spines['bottom'].set_color('#00a3cc')
ax.spines['left'].set_color('#00a3cc')
for k, spine in ax.spines.items(): #ax.spines is a dictionary
spine.set_zorder(35)
# ax.spines['top'].set_visible(False)
# ax.spines['right'].set_visible(False)
# ax.spines['bottom'].set_visible(False)
# ax.spines['left'].set_visible(False)
# fig.tight_layout(pad=0.25)
fig.tight_layout(rect=[0.01,0.01,.99,.99])
plt.show()
if sidebar:
basemap_name = 'basemap_sidebar.p'
else:
basemap_name = 'basemap.p'
info = sm.calculate_gridcell_areas(info)
# Save basemap
save_basemap(m,info,path_to_basemap=path_to_map)
# picklename = str(path_to_map / basemap_name)
# pickle.dump(m,open(picklename,'wb'),-1)
# print('!!! Pickle just made: ' + picklename)
#
## pngDir = 'C:\\Users\\IbarraD\\Documents\\VMS\\png\\'
## plt.savefig(datadir[0:-5] + 'png\\' + filename + '- Grid' + str(BinNo) + ' - Filter' +str(downLim) + '-' + str(upLim) + '.png')
# plt.savefig('test.png')
return m
def load_my_cmap(name):
'''
Creates and loads custom colormap
'''
# cdict = {'red': ((0.0, 0.0, 0.0),
# (1.0, 0.7, 0.7)),
# 'green': ((0.0, 0.25, 0.25),
# (1.0, 0.85, 0.85)),
# 'blue': ((0.0, 0.5, 0.5),
# (1.0, 1.0, 1.0))}
# my_cmap = LinearSegmentedColormap('my_colormap',cdict,256)
if name == 'my_cmap_lightblue':
cdict = {'red': ((0.0, 0.0, 0.0), # Dark
(1.0, 0.9, 0.9)), # Light
'green': ((0.0, 0.9, 0.9),
(1.0, 1.0,1.0)),
'blue': ((0.0, 0.9, 0.9),
(1.0, 1.0, 1.0))}
my_cmap = LinearSegmentedColormap('my_colormap',cdict,256)
elif name == 'my_cmap_amber2red':
# cdict = {'red': ((0.0, 1.0, 1.0),
# (1.0, 0.5, 0.5)),
# 'green': ((0.0, 1.0, 1.0),
# (1.0, 0.0, 0.0)),
# 'blue': ((0.0, 0.0, 0.0),
# (1.0, 0.0, 0.0))}
# my_cmap_yellow2red = LinearSegmentedColormap('my_colormap',cdict,256)
cdict = {'red': ((0.0, 1.0, 1.0),
(1.0, 0.5, 0.5)),
'green': ((0.0, 0.85, 0.85),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.3, 0.3),
(1.0, 0.0, 0.0))}
my_cmap = LinearSegmentedColormap('my_colormap',cdict,256)
elif name == 'my_cmap_red2black':
# c1 = np.array([252,142,110])/256 #RGB/256
c1 = np.array([250,59,59])/256 #RGB/256
c2 = np.array([103,0,13])/256 #RGB/256
cdict = {'red': ((0.0, c1[0], c1[0]),
(1.0, c2[0], c2[0])),
'green': ((0.0, c1[1], c1[1]),
(1.0, c2[1], c2[1])),
'blue': ((0.0, c1[2], c1[2]),
(1.0, c2[2], c2[2]))}
my_cmap = LinearSegmentedColormap('my_colormap',cdict,256)
else:
print('cmap name does not match any of the available cmaps')
return my_cmap
def save_basemap(m,info,path_to_basemap='auto'):
'''
Saves basemap (and correspoding info.grid) to a pickle file
Arguments:
m (mpl_toolkits.basemap.Basemap): Basemap object
info (info): ``info`` object containing metadata
Keyword Arguments:
path_to_basemap (str): If ``'auto'`` it looks in ``grids`` directory
Returns:
Pickle file
See also:
:mod:`pickle`
'''
#
# basemap = [grid, m]
# f = open(str(path_to_map / (info.grid.basemap + '.p')),'w')
# pickle.dump(grid, f)
# pickle.dump(m, f)
# f.close()
# picklename = str(path_to_map / (info.grid.basemap + '.p'))
# pickle.dump(basemap, open(picklename, 'wb'), -1)
# print('!!! Pickle just made: ' + picklename)
path_to_map = define_path_to_map(info, path_to_basemap=path_to_basemap)
# basemap_picklename = str(path_to_map / (info.grid.basemap + '.basemap'))
basemap_picklename = os.path.join(path_to_map,info.grid.basemap + '.basemap')
pickle.dump(m, open(basemap_picklename, 'wb'), -1)
# info_picklename = str(path_to_map / (info.grid.basemap + '.grid'))
info_picklename = os.path.join(path_to_map, info.grid.basemap + '.grid')
pickle.dump(info, open(info_picklename, 'wb'), -1)
print('!!! Pickles were just made: ' + basemap_picklename)
return
|
[
"matplotlib.colors.LinearSegmentedColormap",
"matplotlib.pyplot.subplot2grid",
"matplotlib.pyplot.figure",
"numpy.rot90",
"numpy.arange",
"os.path.isfile",
"matplotlib.pyplot.gca",
"os.path.join",
"numpy.round",
"numpy.nanmean",
"netCDF4.Dataset",
"numpy.meshgrid",
"ship_mapper.degrees_to_meters",
"matplotlib.pyplot.close",
"os.path.exists",
"matplotlib.pyplot.colorbar",
"numpy.max",
"numpy.log10",
"datetime.datetime.now",
"matplotlib.pyplot.show",
"matplotlib.pyplot.get_cmap",
"numpy.ma.masked_where",
"ship_mapper.checkDir",
"numpy.flipud",
"numpy.min",
"matplotlib.use",
"ship_mapper.make_basemap",
"matplotlib.pyplot.gcf",
"ship_mapper.get_filename_from_fullpath",
"xarray.open_dataset",
"pandas.unique",
"numpy.array",
"ship_mapper.calculate_gridcell_areas",
"numpy.warnings.filterwarnings",
"mpl_toolkits.basemap.Basemap"
] |
[((19, 40), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (33, 40), False, 'import matplotlib\n'), ((273, 309), 'numpy.warnings.filterwarnings', 'np.warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (299, 309), True, 'import numpy as np\n'), ((1869, 1893), 'xarray.open_dataset', 'xr.open_dataset', (['file_in'], {}), '(file_in)\n', (1884, 1893), True, 'import xarray as xr\n'), ((2811, 2856), 'numpy.meshgrid', 'np.meshgrid', (["d['lon'].values", "d['lat'].values"], {}), "(d['lon'].values, d['lat'].values)\n", (2822, 2856), True, 'import numpy as np\n'), ((3053, 3064), 'numpy.rot90', 'np.rot90', (['H'], {}), '(H)\n', (3061, 3064), True, 'import numpy as np\n'), ((3074, 3086), 'numpy.flipud', 'np.flipud', (['H'], {}), '(H)\n', (3083, 3086), True, 'import numpy as np\n'), ((3177, 3226), 'numpy.ma.masked_where', 'np.ma.masked_where', (["(H <= d.attrs['mask_below'])", 'H'], {}), "(H <= d.attrs['mask_below'], H)\n", (3195, 3226), True, 'import numpy as np\n'), ((4238, 4255), 'numpy.log10', 'np.log10', (['Hmasked'], {}), '(Hmasked)\n', (4246, 4255), True, 'import numpy as np\n'), ((4402, 4411), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4409, 4411), True, 'import matplotlib.pyplot as plt\n'), ((4422, 4431), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4429, 4431), True, 'import matplotlib.pyplot as plt\n'), ((8435, 8445), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8443, 8445), True, 'import matplotlib.pyplot as plt\n'), ((12143, 12167), 'xarray.open_dataset', 'xr.open_dataset', (['file_in'], {}), '(file_in)\n', (12158, 12167), True, 'import xarray as xr\n'), ((13355, 13365), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13363, 13365), True, 'import matplotlib.pyplot as plt\n'), ((14338, 14362), 'xarray.open_dataset', 'xr.open_dataset', (['file_in'], {}), '(file_in)\n', (14353, 14362), True, 'import xarray as xr\n'), ((15122, 15195), 'ship_mapper.make_basemap', 'sm.make_basemap', (['info.dirs.project_path', '[minlat, maxlat, minlon, maxlon]'], {}), '(info.dirs.project_path, [minlat, maxlat, minlon, maxlon])\n', (15137, 15195), True, 'import ship_mapper as sm\n'), ((15709, 15749), 'pandas.unique', 'pd.unique', (['filtered_data[ship_id].values'], {}), '(filtered_data[ship_id].values)\n', (15718, 15749), True, 'import pandas as pd\n'), ((16351, 16361), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16359, 16361), True, 'import matplotlib.pyplot as plt\n'), ((18245, 18381), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'projection': '"""mill"""', 'llcrnrlat': 'minlat', 'urcrnrlat': 'maxlat', 'llcrnrlon': 'minlon', 'urcrnrlon': 'maxlon', 'resolution': 'info.maps.resolution'}), "(projection='mill', llcrnrlat=minlat, urcrnrlat=maxlat, llcrnrlon=\n minlon, urcrnrlon=maxlon, resolution=info.maps.resolution)\n", (18252, 18381), False, 'from mpl_toolkits.basemap import Basemap\n'), ((18628, 18674), 'os.path.join', 'os.path.join', (['path_to_map', '"""usgsCeSrtm30v6.nc"""'], {}), "(path_to_map, 'usgsCeSrtm30v6.nc')\n", (18640, 18674), False, 'import os\n'), ((19081, 19113), 'netCDF4.Dataset', 'netCDF4.Dataset', (['bathymetry_file'], {}), '(bathymetry_file)\n', (19096, 19113), False, 'import netCDF4\n'), ((19217, 19238), 'numpy.meshgrid', 'np.meshgrid', (['lon', 'lat'], {}), '(lon, lat)\n', (19228, 19238), True, 'import numpy as np\n'), ((19285, 19312), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(19, 9)'}), '(figsize=(19, 9))\n', (19295, 19312), True, 'import matplotlib.pyplot as plt\n'), ((19621, 19655), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(topo > 0)', 'topo'], {}), '(topo > 0, topo)\n', (19639, 19655), True, 'import numpy as np\n'), ((20160, 20206), 'numpy.arange', 'np.arange', (['minlat', 'maxlat', 'info.maps.parallels'], {}), '(minlat, maxlat, info.maps.parallels)\n', (20169, 20206), True, 'import numpy as np\n'), ((20440, 20486), 'numpy.arange', 'np.arange', (['minlon', 'maxlon', 'info.maps.meridians'], {}), '(minlon, maxlon, info.maps.meridians)\n', (20449, 20486), True, 'import numpy as np\n'), ((20657, 20666), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (20664, 20666), True, 'import matplotlib.pyplot as plt\n'), ((21338, 21348), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21346, 21348), True, 'import matplotlib.pyplot as plt\n'), ((21485, 21518), 'ship_mapper.calculate_gridcell_areas', 'sm.calculate_gridcell_areas', (['info'], {}), '(info)\n', (21512, 21518), True, 'import ship_mapper as sm\n'), ((25188, 25245), 'os.path.join', 'os.path.join', (['path_to_map', "(info.grid.basemap + '.basemap')"], {}), "(path_to_map, info.grid.basemap + '.basemap')\n", (25200, 25245), False, 'import os\n'), ((25403, 25457), 'os.path.join', 'os.path.join', (['path_to_map', "(info.grid.basemap + '.grid')"], {}), "(path_to_map, info.grid.basemap + '.grid')\n", (25415, 25457), False, 'import os\n'), ((2530, 2558), 'os.path.exists', 'os.path.exists', (['basemap_file'], {}), '(basemap_file)\n', (2544, 2558), False, 'import os\n'), ((2573, 2652), 'ship_mapper.make_basemap', 'sm.make_basemap', (['info', 'info.dirs.project_path', '[minlat, maxlat, minlon, maxlon]'], {}), '(info, info.dirs.project_path, [minlat, maxlat, minlon, maxlon])\n', (2588, 2652), True, 'import ship_mapper as sm\n'), ((3872, 3919), 'ship_mapper.degrees_to_meters', 'sm.degrees_to_meters', (["d.attrs['bin_size']", 'alat'], {}), "(d.attrs['bin_size'], alat)\n", (3892, 3919), True, 'import ship_mapper as sm\n'), ((4294, 4308), 'numpy.log10', 'np.log10', (['vmin'], {}), '(vmin)\n', (4302, 4308), True, 'import numpy as np\n'), ((4347, 4361), 'numpy.log10', 'np.log10', (['vmax'], {}), '(vmax)\n', (4355, 4361), True, 'import numpy as np\n'), ((5323, 5389), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'extend': '"""both"""', 'cax': 'cbaxes2', 'orientation': '"""horizontal"""'}), "(extend='both', cax=cbaxes2, orientation='horizontal')\n", (5335, 5389), True, 'import matplotlib.pyplot as plt\n'), ((5535, 5575), 'numpy.round', 'np.round', (['(10 ** label_values)'], {'decimals': '(0)'}), '(10 ** label_values, decimals=0)\n', (5543, 5575), True, 'import numpy as np\n'), ((5925, 5969), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 24)', '(0, 0)'], {'colspan': '(4)'}), '((1, 24), (0, 0), colspan=4)\n', (5941, 5969), True, 'import matplotlib.pyplot as plt\n'), ((7597, 7663), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'extend': '"""both"""', 'cax': 'cbaxes2', 'orientation': '"""horizontal"""'}), "(extend='both', cax=cbaxes2, orientation='horizontal')\n", (7609, 7663), True, 'import matplotlib.pyplot as plt\n'), ((7930, 7970), 'numpy.round', 'np.round', (['(10 ** label_values)'], {'decimals': '(0)'}), '(10 ** label_values, decimals=0)\n', (7938, 7970), True, 'import numpy as np\n'), ((8839, 8859), 'ship_mapper.checkDir', 'sm.checkDir', (['filedir'], {}), '(filedir)\n', (8850, 8859), True, 'import ship_mapper as sm\n'), ((9025, 9036), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9034, 9036), True, 'import matplotlib.pyplot as plt\n'), ((13022, 13050), 'os.path.exists', 'os.path.exists', (['basemap_file'], {}), '(basemap_file)\n', (13036, 13050), False, 'import os\n'), ((13065, 13120), 'ship_mapper.make_basemap', 'sm.make_basemap', (['info', '[minlat, maxlat, minlon, maxlon]'], {}), '(info, [minlat, maxlat, minlon, maxlon])\n', (13080, 13120), True, 'import ship_mapper as sm\n'), ((18689, 18720), 'os.path.isfile', 'os.path.isfile', (['bathymetry_file'], {}), '(bathymetry_file)\n', (18703, 18720), False, 'import os\n'), ((19494, 19539), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 24)', '(0, 5)'], {'colspan': '(19)'}), '((1, 24), (0, 5), colspan=19)\n', (19510, 19539), True, 'import matplotlib.pyplot as plt\n'), ((22736, 22786), 'matplotlib.colors.LinearSegmentedColormap', 'LinearSegmentedColormap', (['"""my_colormap"""', 'cdict', '(256)'], {}), "('my_colormap', cdict, 256)\n", (22759, 22786), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((3549, 3564), 'numpy.max', 'np.max', (['Hmasked'], {}), '(Hmasked)\n', (3555, 3564), True, 'import numpy as np\n'), ((4637, 4655), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (4649, 4655), True, 'import matplotlib.pyplot as plt\n'), ((8881, 8912), 'os.path.join', 'os.path.join', (['filedir', 'filename'], {}), '(filedir, filename)\n', (8893, 8912), False, 'import os\n'), ((16957, 17024), 'os.path.join', 'os.path.join', (['info.dirs.project_path', 'info.grid.region', '"""ancillary"""'], {}), "(info.dirs.project_path, info.grid.region, 'ancillary')\n", (16969, 17024), False, 'import os\n'), ((23453, 23503), 'matplotlib.colors.LinearSegmentedColormap', 'LinearSegmentedColormap', (['"""my_colormap"""', 'cdict', '(256)'], {}), "('my_colormap', cdict, 256)\n", (23476, 23503), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((3281, 3296), 'numpy.min', 'np.min', (['Hmasked'], {}), '(Hmasked)\n', (3287, 3296), True, 'import numpy as np\n'), ((3324, 3339), 'numpy.max', 'np.max', (['Hmasked'], {}), '(Hmasked)\n', (3330, 3339), True, 'import numpy as np\n'), ((3368, 3387), 'numpy.nanmean', 'np.nanmean', (['Hmasked'], {}), '(Hmasked)\n', (3378, 3387), True, 'import numpy as np\n'), ((24014, 24064), 'matplotlib.colors.LinearSegmentedColormap', 'LinearSegmentedColormap', (['"""my_colormap"""', 'cdict', '(256)'], {}), "('my_colormap', cdict, 256)\n", (24037, 24064), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((6712, 6721), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6719, 6721), True, 'import matplotlib.pyplot as plt\n'), ((6954, 6963), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6961, 6963), True, 'import matplotlib.pyplot as plt\n'), ((7197, 7206), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7204, 7206), True, 'import matplotlib.pyplot as plt\n'), ((7472, 7481), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7479, 7481), True, 'import matplotlib.pyplot as plt\n'), ((8716, 8754), 'ship_mapper.get_filename_from_fullpath', 'sm.get_filename_from_fullpath', (['file_in'], {}), '(file_in)\n', (8745, 8754), True, 'import ship_mapper as sm\n'), ((17109, 17158), 'os.path.join', 'os.path.join', (['info.dirs.project_path', '"""ancillary"""'], {}), "(info.dirs.project_path, 'ancillary')\n", (17121, 17158), False, 'import os\n'), ((23617, 23640), 'numpy.array', 'np.array', (['[250, 59, 59]'], {}), '([250, 59, 59])\n', (23625, 23640), True, 'import numpy as np\n'), ((23666, 23688), 'numpy.array', 'np.array', (['[103, 0, 13]'], {}), '([103, 0, 13])\n', (23674, 23688), True, 'import numpy as np\n'), ((6446, 6455), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6453, 6455), True, 'import matplotlib.pyplot as plt\n'), ((10476, 10499), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10497, 10499), False, 'import datetime\n'), ((10002, 10044), 'ship_mapper.degrees_to_meters', 'sm.degrees_to_meters', (["md['bin_size']", 'alat'], {}), "(md['bin_size'], alat)\n", (10022, 10044), True, 'import ship_mapper as sm\n')]
|
import itertools
from collections import OrderedDict
import numpy as np
import cv2
import torch
import torch.nn as nn
from torch.nn import Sequential as Seq, Linear, ReLU
import torch.nn.functional as F
from torch_geometric.data import Data, Batch
from . import base_networks
from . import graph_construction as gc
from . import constants
from .util import utilities as util_
class DeleteNet(nn.Module):
def __init__(self, config):
super(DeleteNet, self).__init__()
self.node_encoder = base_networks.NodeEncoder(config['node_encoder_config'])
self.bg_fusion_module = base_networks.LinearEncoder(config['bg_fusion_module_config'])
def forward(self, graph):
"""DeleteNet forward pass.
Note: Assume that the graph contains the background node as the first node.
Args:
graph: a torch_geometric.Data instance with attributes:
- rgb: a [N, 256, h, w] torch.FloatTensor of ResnNet50+FPN rgb image features
- depth: a [N, 3, h, w] torch.FloatTensor. XYZ image
- mask: a [N, h, w] torch.FloatTensor of values in {0, 1}
- orig_masks: a [N, H, W] torch.FloatTensor of values in {0, 1}. Original image size.
- crop_indices: a [N, 4] torch.LongTensor. xmin, ymin, xmax, ymax.
Returns:
a [N] torch.FloatTensor of delete score logits. The first logit (background) is always low,
so BG is never deleted.
"""
encodings = self.node_encoder(graph) # dictionary
concat_features = torch.cat([encodings[key] for key in encodings], dim=1) # [N, \sum_i d_i]
bg_feature = concat_features[0:1] # [1, \sum_i d_i]
node_features = concat_features[1:] # [N-1, \sum_i d_i]
node_minus_bg_features = node_features - bg_feature # [N-1, \sum_i d_i]
node_delete_logits = self.bg_fusion_module(node_minus_bg_features) # [N-1, 1]
delete_logits = torch.cat([torch.ones((1, 1), device=constants.DEVICE) * -100,
node_delete_logits], dim=0)
return delete_logits[:,0]
class DeleteNetWrapper(base_networks.NetworkWrapper):
def setup(self):
if 'deletenet_model' in self.config:
self.model = self.config['deletenet_model']
else:
self.model = DeleteNet(self.config)
self.model.to(self.device)
def get_new_potential_masks(self, masks, fg_mask):
"""Compute new potential masks.
See if any connected components of fg_mask _setminus_ mask can be
considered as a new mask. Concatenate them to masks.
Args:
masks: a [N, H, W] torch.Tensor with values in {0, 1}.
fg_mask: a [H, W] torch.Tensor with values in {0, 1}.
Returns:
a [N + delta, H, W] np.ndarray of new masks. delta = #new_masks.
"""
occupied_mask = masks.sum(dim=0) > 0.5
fg_mask = fg_mask.cpu().numpy().astype(np.uint8)
fg_mask[occupied_mask.cpu().numpy()] = 0
fg_mask = cv2.erode(fg_mask, np.ones((3,3)), iterations=1)
nc, components = cv2.connectedComponents(fg_mask, connectivity=8)
components = torch.from_numpy(components).float().to(constants.DEVICE)
for j in range(1, nc):
mask = components == j
component_size = mask.sum().float()
if component_size > self.config['min_pixels_thresh']:
masks = torch.cat([masks, mask[None].float()], dim=0)
return masks
def delete_scores(self, graph):
"""Compute delete scores for each node in the graph.
Args:
graph: a torch_geometric.Data instance
Returns:
a [N] torch.Tensor with values in [0, 1]
"""
return torch.sigmoid(self.model(graph))
|
[
"torch.ones",
"numpy.ones",
"torch.cat",
"cv2.connectedComponents",
"torch.from_numpy"
] |
[((1587, 1642), 'torch.cat', 'torch.cat', (['[encodings[key] for key in encodings]'], {'dim': '(1)'}), '([encodings[key] for key in encodings], dim=1)\n', (1596, 1642), False, 'import torch\n'), ((3169, 3217), 'cv2.connectedComponents', 'cv2.connectedComponents', (['fg_mask'], {'connectivity': '(8)'}), '(fg_mask, connectivity=8)\n', (3192, 3217), False, 'import cv2\n'), ((3105, 3120), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (3112, 3120), True, 'import numpy as np\n'), ((1993, 2036), 'torch.ones', 'torch.ones', (['(1, 1)'], {'device': 'constants.DEVICE'}), '((1, 1), device=constants.DEVICE)\n', (2003, 2036), False, 'import torch\n'), ((3239, 3267), 'torch.from_numpy', 'torch.from_numpy', (['components'], {}), '(components)\n', (3255, 3267), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
"""Console script to generate goals for real_robots"""
import click
import numpy as np
from real_robots.envs import Goal
import gym
import math
basePosition = None
slow = False
render = False
def pairwise_distances(a):
b = a.reshape(a.shape[0], 1, a.shape[1])
return np.sqrt(np.einsum('ijk, ijk->ij', a-b, a-b))
def runEnv(env, max_t=1000):
reward = 0
done = False
render = slow
action = {'joint_command': np.zeros(9), 'render': render}
objects = env.robot.used_objects[1:]
positions = np.vstack([env.get_obj_pose(obj) for obj in objects])
still = False
stable = 0
for t in range(max_t):
old_positions = positions
observation, reward, done, _ = env.step(action)
positions = np.vstack([env.get_obj_pose(obj) for obj in objects])
maxPosDiff = 0
maxOrientDiff = 0
for i, obj in enumerate(objects):
posDiff = np.linalg.norm(old_positions[i][:3] - positions[i][:3])
q1 = old_positions[i][3:]
q2 = positions[i][3:]
orientDiff = min(np.linalg.norm(q1 - q2), np.linalg.norm(q1+q2))
maxPosDiff = max(maxPosDiff, posDiff)
maxOrientDiff = max(maxOrientDiff, orientDiff)
if maxPosDiff < 0.0001 and maxOrientDiff < 0.001 and t > 10:
stable += 1
else:
stable = 0
action['render'] = slow
if stable > 19:
action['render'] = True
if stable > 20:
still = True
break
pos_dict = {}
for obj in objects:
pos_dict[obj] = env.get_obj_pose(obj)
print("Exiting environment after {} timesteps..".format(t))
if not still:
print("Failed because maxPosDiff:{:.6f},"
"maxOrientDiff:{:.6f}".format(maxPosDiff, maxOrientDiff))
return observation['retina'], pos_dict, not still, t, observation['mask']
class Position:
def __init__(self, start_state=None, fixed_state=None, retina=None, mask=None):
self.start_state = start_state
self.fixed_state = fixed_state
self.retina = retina
self.mask = mask
def generatePosition(env, obj, fixed=False, tablePlane=None):
if tablePlane is None:
min_x = -.25
max_x = .25
elif tablePlane:
min_x = -.25
max_x = .05
else:
min_x = .10
max_x = .25
min_y = -.45
max_y = .45
x = np.random.rand()*(max_x-min_x)+min_x
y = np.random.rand()*(max_y-min_y)+min_y
if x <= 0.05:
z = 0.40
else:
z = 0.50
if fixed:
orientation = basePosition[obj][3:]
else:
orientation = (np.random.rand(3)*math.pi*2).tolist()
orientation = env._p.getQuaternionFromEuler(orientation)
pose = [x, y, z] + np.array(orientation).tolist()
return pose
def generateRealPosition(env, startPositions):
env.reset()
runEnv(env)
# Generate Images
for obj in startPositions:
pos = startPositions[obj]
env.robot.object_bodies[obj].reset_pose(pos[:3], pos[3:])
actual_image, actual_position, failed, it, mask = runEnv(env)
return actual_image, actual_position, failed, it, mask
def checkMinSeparation(state):
positions = np.vstack([state[obj][:3] for obj in state])
if len(positions) > 1:
distances = pairwise_distances(positions)
clearance = distances[distances > 0].min()
else:
clearance = np.inf
return clearance
def drawPosition(env, fixedOrientation=False, fixedObjects=[],
fixedPositions=None, minSeparation=0, objOnTable=None):
failed = True
while failed:
# skip 1st object, i.e the table
objects = env.robot.used_objects[1:]
position = Position()
startPositions = {}
for obj in fixedObjects:
startPositions[obj] = fixedPositions[obj]
for obj in np.random.permutation(objects):
if obj in fixedObjects:
continue
while True:
table = None
if objOnTable is not None:
if obj in objOnTable:
table = objOnTable[obj]
startPose = generatePosition(env, obj,
fixedOrientation,
tablePlane=table)
startPositions[obj] = startPose
if len(startPositions) == 1:
break
clearance = checkMinSeparation(startPositions)
if clearance >= minSeparation:
break
print("Failed minimum separation ({}), draw again {}.."
.format(clearance, obj))
(a, p, f, it, m) = generateRealPosition(env, startPositions)
actual_image = a
actual_mask = m
actual_position = p
failed = f
if failed:
print("Failed image generation...")
continue
clearance = checkMinSeparation(actual_position)
if clearance < minSeparation:
failed = True
print("Failed minimum separation ({}) after real generation, "
"draw again everything..".format(clearance))
continue
if fixedOrientation:
for obj in objects:
q1 = startPositions[obj][3:]
q2 = actual_position[obj][3:]
orientDiff = min(np.linalg.norm(q1 - q2),
np.linalg.norm(q1+q2))
# TODO CHECK This - we had to rise it many times
failed = failed or orientDiff > 0.041
if failed:
print("{} changed orientation by {}"
.format(obj, orientDiff))
break
else:
print("{} kept orientation.".format(obj))
if failed:
print("Failed to keep orientation...")
continue
for obj in fixedObjects:
posDiff = np.linalg.norm(startPositions[obj][:3] -
actual_position[obj][:3])
q1 = startPositions[obj][3:]
q2 = actual_position[obj][3:]
orientDiff = min(np.linalg.norm(q1 - q2), np.linalg.norm(q1+q2))
failed = failed or posDiff > 0.002 or orientDiff > 0.041
if failed:
print("{} changed pos by {} and orientation by {}"
.format(obj, posDiff, orientDiff))
print(startPositions[obj])
print(actual_position[obj])
break
if failed:
print("Failed to keep objects fixed...")
continue
position.start_state = startPositions
position.fixed_state = actual_position
position.retina = actual_image
position.mask = actual_mask
return position
def checkRepeatability(env, goals):
maxDiffPos = 0
maxDiffOr = 0
for goal in goals:
_, pos, failed, _, _ = generateRealPosition(env, goal.initial_state)
objects = [o for o in goal.initial_state]
p0 = np.vstack([goal.initial_state[o] for o in objects])
p1 = np.vstack([pos[o] for o in objects])
diffPos = np.linalg.norm(p1[:, :3]-p0[:, :3])
diffOr = min(np.linalg.norm(p1[:, 3:]-p0[:, 3:]),
np.linalg.norm(p1[:, 3:]+p0[:, 3:]))
maxDiffPos = max(maxDiffPos, diffPos)
maxDiffOr = max(maxDiffPos, diffOr)
print("Replicated diffPos:{} diffOr:{}".format(diffPos, diffOr))
if failed:
print("*****************FAILED************!!!!")
return 1000000
return maxDiffPos, maxDiffOr
def isOnShelf(obj, state):
z = state[obj][2]
if obj == 'cube' and z > 0.55 - 0.15:
return True
if obj == 'orange' and z > 0.55 - 0.15:
return True
if obj == 'tomato' and z > 0.55 - 0.15:
return True
if obj == 'mustard' and z > 0.545 - 0.15:
return True
return False
def isOnTable(obj, state):
z = state[obj][2]
if obj == 'cube' and z < 0.48 - 0.15:
return True
if obj == 'orange' and z < 0.48 - 0.15:
return True
if obj == 'tomato' and z < 0.49 - 0.15:
return True
if obj == 'mustard' and z < 0.48 - 0.15:
return True
return False
def generateGoalREAL2020(env, n_obj, goal_type, on_shelf=False, min_start_goal_dist=0.1, min_objects_dist=0.05, max_objects_dist=2):
print("Generating GOAL..")
objOnTable = None
if not on_shelf:
objects = env.robot.used_objects[1:]
objOnTable = {}
for obj in objects:
objOnTable[obj] = True
if goal_type == '3D':
fixedOrientation = False
else:
fixedOrientation = True
found = False
while not(found):
initial = drawPosition(env, fixedOrientation=fixedOrientation, objOnTable=objOnTable, minSeparation=min_objects_dist)
found = True
# checks whether at least two objects are close together as specified in max_objects_dist
if n_obj == 1:
at_least_two_near_objects = True
else:
at_least_two_near_objects = False
for obj1 in initial.fixed_state.keys():
for obj2 in initial.fixed_state.keys():
if obj1 == obj2:
continue
if np.linalg.norm(initial.fixed_state[obj1][:3]-initial.fixed_state[obj2][:3]) <= max_objects_dist or goal_type != '3D' or len(initial.fixed_state.keys()) == 1:
at_least_two_near_objects = True
break
if at_least_two_near_objects:
break
# checks if at least one object is on the table
at_least_one_on_shelf = False
for obj in initial.fixed_state.keys():
if isOnShelf(obj, initial.fixed_state) or goal_type == '2D':
at_least_one_on_shelf = True
break
found = False
while not(found):
found = True
final = drawPosition(env, fixedOrientation=fixedOrientation, objOnTable=objOnTable, minSeparation=min_objects_dist)
# checks whether at least two objects are close together as specified in max_objects_dist. This only if in the initial positions it is not true
if not at_least_two_near_objects:
found = False
for obj1 in final.fixed_state.keys():
for obj2 in final.fixed_state.keys():
if obj1 == obj2:
continue
if np.linalg.norm(final.fixed_state[obj1][:3]-final.fixed_state[obj2][:3]) <= max_objects_dist:
found = True
break
if found:
break
# checks if at least one object is on the table. This only if in the initial positions it is not true
if found and not at_least_one_on_shelf:
found = False
for obj in final.fixed_state.keys():
if isOnShelf(obj, final.fixed_state):
found = True
break
# checks if the distance between initial and final positions of the objects is at least how much specified in min_start_goal_dist
for obj in final.fixed_state.keys():
if min_start_goal_dist > np.linalg.norm(final.fixed_state[obj][:2]-initial.fixed_state[obj][:2]):
found = False
break
goal = Goal()
goal.challenge = goal_type
goal.subtype = str(n_obj)
goal.initial_state = initial.fixed_state
goal.final_state = final.fixed_state
goal.retina_before = initial.retina
goal.retina = final.retina
goal.mask = final.mask
print("SUCCESSFULL generation of GOAL {}!".format(goal_type))
return goal
def visualizeGoalDistribution(all_goals, images=True):
import matplotlib.pyplot as plt
challenges = np.unique([goal.challenge for goal in all_goals])
fig, axes = plt.subplots(max(2, len(challenges)), 3)
for c, challenge in enumerate(challenges):
goals = [goal for goal in all_goals if goal.challenge == challenge]
if len(goals) > 0:
if images:
# Superimposed images view
tomatos = sum([goal.mask == 2 for goal in goals])
mustards = sum([goal.mask == 3 for goal in goals])
cubes = sum([goal.mask == 4 for goal in goals])
axes[c, 0].imshow(tomatos, cmap='gray')
axes[c, 1].imshow(mustards, cmap='gray')
axes[c, 2].imshow(cubes, cmap='gray')
else:
# Positions scatter view
for i, o in enumerate(goals[0].final_state.keys()):
positions = np.vstack([goal.final_state[o] for goal in goals])
axes[c, i].set_title("{} {}".format(o, challenge))
axes[c, i].hist2d(positions[:, 0], positions[:, 1])
axes[c, i].set_xlim([-0.3, 0.3])
axes[c, i].set_ylim([-0.6, 0.6])
plt.show()
@click.command()
@click.option('--seed', type=int,
help='Generate goals using this SEED for numpy.random')
@click.option('--n_2d_goals', type=int, default=25,
help='# of 2D goals (default 25)')
@click.option('--n_25d_goals', type=int, default=15,
help='# of 2.5D goals (default 15)')
@click.option('--n_3d_goals', type=int, default=10,
help='# of 3D goals (default 10)')
@click.option('--n_obj', type=int, default=3,
help='# of objects (default 3)')
def main(seed=None, n_2d_goals=25, n_25d_goals=15, n_3d_goals=10, n_obj=3):
"""
Generates the specified number of goals
and saves them in a file.\n
The file is called goals-REAL2020-s{}-{}-{}-{}-{}.npy.npz
where enclosed brackets are replaced with the
supplied options (seed, n_2d_goals, n_25d_goals, n_3d_goals, n_obj)
or the default value.
"""
np.random.seed(seed)
allgoals = []
env = gym.make('REALRobot2020-R1J{}-v0'.format(n_obj))
if render:
env.render('human')
env.reset()
global basePosition
_, basePosition, _, _, _ = runEnv(env)
# In these for loops, we could add some progress bar...
for _ in range(n_2d_goals):
allgoals += [generateGoalREAL2020(env, n_obj, "2D", on_shelf=False, min_start_goal_dist=0.2, min_objects_dist=0.25)]
for _ in range(n_25d_goals):
allgoals += [generateGoalREAL2020(env, n_obj, "2.5D", on_shelf=True, min_start_goal_dist=0.2, min_objects_dist=0.25)]
for _ in range(n_3d_goals):
allgoals += [generateGoalREAL2020(env, n_obj, "3D", on_shelf=True, min_start_goal_dist=0.2, min_objects_dist=0)]
np.savez_compressed('goals-REAL2020-s{}-{}-{}-{}-{}.npy'
.format(seed, n_2d_goals, n_25d_goals, n_3d_goals, n_obj), allgoals)
checkRepeatability(env, allgoals)
visualizeGoalDistribution(allgoals)
if __name__ == "__main__":
main()
|
[
"matplotlib.pyplot.show",
"numpy.random.seed",
"numpy.random.rand",
"click.option",
"numpy.einsum",
"numpy.zeros",
"click.command",
"numpy.linalg.norm",
"numpy.array",
"numpy.random.permutation",
"numpy.vstack",
"real_robots.envs.Goal",
"numpy.unique"
] |
[((13119, 13134), 'click.command', 'click.command', ([], {}), '()\n', (13132, 13134), False, 'import click\n'), ((13136, 13229), 'click.option', 'click.option', (['"""--seed"""'], {'type': 'int', 'help': '"""Generate goals using this SEED for numpy.random"""'}), "('--seed', type=int, help=\n 'Generate goals using this SEED for numpy.random')\n", (13148, 13229), False, 'import click\n'), ((13240, 13330), 'click.option', 'click.option', (['"""--n_2d_goals"""'], {'type': 'int', 'default': '(25)', 'help': '"""# of 2D goals (default 25)"""'}), "('--n_2d_goals', type=int, default=25, help=\n '# of 2D goals (default 25)')\n", (13252, 13330), False, 'import click\n'), ((13341, 13434), 'click.option', 'click.option', (['"""--n_25d_goals"""'], {'type': 'int', 'default': '(15)', 'help': '"""# of 2.5D goals (default 15)"""'}), "('--n_25d_goals', type=int, default=15, help=\n '# of 2.5D goals (default 15)')\n", (13353, 13434), False, 'import click\n'), ((13445, 13535), 'click.option', 'click.option', (['"""--n_3d_goals"""'], {'type': 'int', 'default': '(10)', 'help': '"""# of 3D goals (default 10)"""'}), "('--n_3d_goals', type=int, default=10, help=\n '# of 3D goals (default 10)')\n", (13457, 13535), False, 'import click\n'), ((13546, 13623), 'click.option', 'click.option', (['"""--n_obj"""'], {'type': 'int', 'default': '(3)', 'help': '"""# of objects (default 3)"""'}), "('--n_obj', type=int, default=3, help='# of objects (default 3)')\n", (13558, 13623), False, 'import click\n'), ((3266, 3310), 'numpy.vstack', 'np.vstack', (['[state[obj][:3] for obj in state]'], {}), '([state[obj][:3] for obj in state])\n', (3275, 3310), True, 'import numpy as np\n'), ((11508, 11514), 'real_robots.envs.Goal', 'Goal', ([], {}), '()\n', (11512, 11514), False, 'from real_robots.envs import Goal\n'), ((11954, 12003), 'numpy.unique', 'np.unique', (['[goal.challenge for goal in all_goals]'], {}), '([goal.challenge for goal in all_goals])\n', (11963, 12003), True, 'import numpy as np\n'), ((13105, 13115), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13113, 13115), True, 'import matplotlib.pyplot as plt\n'), ((14044, 14064), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (14058, 14064), True, 'import numpy as np\n'), ((312, 351), 'numpy.einsum', 'np.einsum', (['"""ijk, ijk->ij"""', '(a - b)', '(a - b)'], {}), "('ijk, ijk->ij', a - b, a - b)\n", (321, 351), True, 'import numpy as np\n'), ((461, 472), 'numpy.zeros', 'np.zeros', (['(9)'], {}), '(9)\n', (469, 472), True, 'import numpy as np\n'), ((3925, 3955), 'numpy.random.permutation', 'np.random.permutation', (['objects'], {}), '(objects)\n', (3946, 3955), True, 'import numpy as np\n'), ((7181, 7232), 'numpy.vstack', 'np.vstack', (['[goal.initial_state[o] for o in objects]'], {}), '([goal.initial_state[o] for o in objects])\n', (7190, 7232), True, 'import numpy as np\n'), ((7246, 7282), 'numpy.vstack', 'np.vstack', (['[pos[o] for o in objects]'], {}), '([pos[o] for o in objects])\n', (7255, 7282), True, 'import numpy as np\n'), ((7301, 7338), 'numpy.linalg.norm', 'np.linalg.norm', (['(p1[:, :3] - p0[:, :3])'], {}), '(p1[:, :3] - p0[:, :3])\n', (7315, 7338), True, 'import numpy as np\n'), ((942, 997), 'numpy.linalg.norm', 'np.linalg.norm', (['(old_positions[i][:3] - positions[i][:3])'], {}), '(old_positions[i][:3] - positions[i][:3])\n', (956, 997), True, 'import numpy as np\n'), ((2446, 2462), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2460, 2462), True, 'import numpy as np\n'), ((2491, 2507), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2505, 2507), True, 'import numpy as np\n'), ((6086, 6152), 'numpy.linalg.norm', 'np.linalg.norm', (['(startPositions[obj][:3] - actual_position[obj][:3])'], {}), '(startPositions[obj][:3] - actual_position[obj][:3])\n', (6100, 6152), True, 'import numpy as np\n'), ((7358, 7395), 'numpy.linalg.norm', 'np.linalg.norm', (['(p1[:, 3:] - p0[:, 3:])'], {}), '(p1[:, 3:] - p0[:, 3:])\n', (7372, 7395), True, 'import numpy as np\n'), ((7416, 7453), 'numpy.linalg.norm', 'np.linalg.norm', (['(p1[:, 3:] + p0[:, 3:])'], {}), '(p1[:, 3:] + p0[:, 3:])\n', (7430, 7453), True, 'import numpy as np\n'), ((1099, 1122), 'numpy.linalg.norm', 'np.linalg.norm', (['(q1 - q2)'], {}), '(q1 - q2)\n', (1113, 1122), True, 'import numpy as np\n'), ((1124, 1147), 'numpy.linalg.norm', 'np.linalg.norm', (['(q1 + q2)'], {}), '(q1 + q2)\n', (1138, 1147), True, 'import numpy as np\n'), ((2810, 2831), 'numpy.array', 'np.array', (['orientation'], {}), '(orientation)\n', (2818, 2831), True, 'import numpy as np\n'), ((6302, 6325), 'numpy.linalg.norm', 'np.linalg.norm', (['(q1 - q2)'], {}), '(q1 - q2)\n', (6316, 6325), True, 'import numpy as np\n'), ((6327, 6350), 'numpy.linalg.norm', 'np.linalg.norm', (['(q1 + q2)'], {}), '(q1 + q2)\n', (6341, 6350), True, 'import numpy as np\n'), ((11371, 11444), 'numpy.linalg.norm', 'np.linalg.norm', (['(final.fixed_state[obj][:2] - initial.fixed_state[obj][:2])'], {}), '(final.fixed_state[obj][:2] - initial.fixed_state[obj][:2])\n', (11385, 11444), True, 'import numpy as np\n'), ((5480, 5503), 'numpy.linalg.norm', 'np.linalg.norm', (['(q1 - q2)'], {}), '(q1 - q2)\n', (5494, 5503), True, 'import numpy as np\n'), ((5538, 5561), 'numpy.linalg.norm', 'np.linalg.norm', (['(q1 + q2)'], {}), '(q1 + q2)\n', (5552, 5561), True, 'import numpy as np\n'), ((12800, 12850), 'numpy.vstack', 'np.vstack', (['[goal.final_state[o] for goal in goals]'], {}), '([goal.final_state[o] for goal in goals])\n', (12809, 12850), True, 'import numpy as np\n'), ((2683, 2700), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (2697, 2700), True, 'import numpy as np\n'), ((9425, 9502), 'numpy.linalg.norm', 'np.linalg.norm', (['(initial.fixed_state[obj1][:3] - initial.fixed_state[obj2][:3])'], {}), '(initial.fixed_state[obj1][:3] - initial.fixed_state[obj2][:3])\n', (9439, 9502), True, 'import numpy as np\n'), ((10590, 10663), 'numpy.linalg.norm', 'np.linalg.norm', (['(final.fixed_state[obj1][:3] - final.fixed_state[obj2][:3])'], {}), '(final.fixed_state[obj1][:3] - final.fixed_state[obj2][:3])\n', (10604, 10663), True, 'import numpy as np\n')]
|
import numpy as np
class Sersic:
def b(self,n):
return 1.9992*n - 0.3271 + 4*(405*n)**-1
def kappa(self,x, y, n_sersic, r_eff, k_eff, q, center_x=0, center_y=0):
bn = self.b(n_sersic)
r = (x**2+y**2*q**-2)**0.5
return k_eff*np.exp(-bn*((r*r_eff**-1)**(n_sersic**-1)-1))
|
[
"numpy.exp"
] |
[((270, 325), 'numpy.exp', 'np.exp', (['(-bn * ((r * r_eff ** -1) ** n_sersic ** -1 - 1))'], {}), '(-bn * ((r * r_eff ** -1) ** n_sersic ** -1 - 1))\n', (276, 325), True, 'import numpy as np\n')]
|
import pandas as pd
import pdb
import requests
import numpy as np
import os, sys
import xarray as xr
from datetime import datetime, timedelta
import logging
from scipy.interpolate import PchipInterpolator
import argparse
from collections import OrderedDict, defaultdict
class PchipOceanSlices(object):
def __init__(self, pLevelRange, basin=None, exceptBasin={None}, starttdx=None, appLocal=False):
self.appLocal = appLocal
self.datesSet = self.get_dates_set()
self.exceptBasin = exceptBasin
self.starttdx = starttdx
self.reduceMeas = False #removes excess points from db query
self.qcKeep = set([1,2]) # used to filter bad positions and dates
self.basin = basin # indian ocean only Set to None otherwise
self.presLevels = [ 2.5, 10. , 20. , 30. , 40. , 50. , 60. , 70. , 80. ,
90. , 100. , 110. , 120. , 130. , 140. , 150. , 160. , 170. ,
182.5, 200. , 220. , 240. , 260. , 280. , 300. , 320. , 340. ,
360. , 380. , 400. , 420. , 440. , 462.5, 500. , 550. , 600. ,
650. , 700. , 750. , 800. , 850. , 900. , 950. , 1000. , 1050. ,
1100. , 1150. , 1200. , 1250. , 1300. , 1350. , 1412.5, 1500. , 1600. ,
1700. , 1800. , 1900. , 1975., 2000.]
self.pLevelRange = pLevelRange
self.presRanges = self.make_rg_pres_ranges()
self.reduce_presLevels_and_presRanges()
@staticmethod
def get_dates_set(period=30):
"""
create a set of dates split into n periods.
period is in days.
"""
n_rows = int(np.floor(365/period))
datesSet = []
for year in range(2007, 2019):
yearSet = np.array_split(pd.date_range(str(year)+'-01-01', str(year)+'-12-31'), n_rows)
datesSet = datesSet + yearSet
keepEnds = lambda x: [x[0].strftime(format='%Y-%m-%d'), x[-1].strftime(format='%Y-%m-%d')]
datesSet = list(map(keepEnds, datesSet))
return datesSet
@staticmethod
def get_ocean_slice(startDate, endDate, presRange, intPres, basin=None, appLocal=None, reduceMeas=False):
'''
query horizontal slice of ocean for a specified time range
startDate and endDate should be a string formated like so: 'YYYY-MM-DD'
presRange should comprise of a string formatted to be: '[lowPres,highPres]'
Try to make the query small enough so as to not pass the 15 MB limit set by the database.
'''
if appLocal:
baseURL = 'http://localhost:3000'
else:
baseURL = 'https://argovis.colorado.edu'
baseURL += '/gridding/presSliceForInterpolation/'
startDateQuery = '?startDate=' + startDate
endDateQuery = '&endDate=' + endDate
presRangeQuery = '&presRange=' + presRange
intPresQuery = '&intPres=' + str(intPres)
url = baseURL + startDateQuery + endDateQuery + presRangeQuery + intPresQuery
if basin:
basinQuery = '&basin=' + basin
url += basinQuery
url += '&reduceMeas=' + str(reduceMeas).lower()
resp = requests.get(url)
# Consider any status other than 2xx an error
if not resp.status_code // 100 == 2:
raise ValueError("Error: Unexpected response {}".format(resp))
profiles = resp.json()
return profiles
def reject_profile(self, profile):
if not profile['position_qc'] in self.qcKeep:
reject = True
elif not profile['date_qc'] in self.qcKeep:
reject = True
elif len(profile['measurements']) < 2: # cannot be interpolated
reject = True
elif profile['BASIN'] in self.exceptBasin: # ignores basins
reject=True
else:
reject = False
return reject
@staticmethod
def make_profile_interpolation_function(x,y):
'''
creates interpolation function
df is a dataframe containing columns xLab and yLab
'''
try:
f = PchipInterpolator(x, y, axis=1, extrapolate=False)
except Exception as err:
pdb.set_trace()
logging.warning(err)
raise Exception
return f
@staticmethod
def make_pres_ranges(presLevels):
"""
Pressure ranges are based off of depths catagory
surface: at 2.5 dbar +- 2.5
shallow: 10 to 182.5 dbar +- 5
medium: 200 to 462.5 dbar +- 15
deep: 500 to 1050 dbar +- 30
abbysal: 1100 to 1975 dbar +- 60
"""
stringifyArray = lambda x: str(x).replace(' ', '')
surfaceRange = [[presLevels[0] - 2.5, presLevels[0]+ 2.5]]
shallowRanges = [ [x - 5, x + 5] for x in presLevels[1:19] ]
mediumRanges = [ [x - 15, x + 15] for x in presLevels[19:33] ]
deepRanges = [ [x - 30, x + 30] for x in presLevels[33:45] ]
abbysalRanges = [ [x - 60, x + 60] for x in presLevels[45:] ]
presRanges = surfaceRange + shallowRanges + mediumRanges + deepRanges + abbysalRanges
presRanges = [stringifyArray(x) for x in presRanges]
return presRanges
@staticmethod
def make_rg_pres_ranges():
'''
uses pressure ranges defined in RG climatology
'''
rgFilename = '/home/tyler/Desktop/RG_ArgoClim_Temp.nc'
rg = xr.open_dataset(rgFilename, decode_times=False)
bnds = rg['PRESSURE_bnds']
presRanges = bnds.values.tolist()
stringifyArray = lambda x: str(x).replace(' ', '')
presRanges = [stringifyArray(x) for x in presRanges]
return presRanges
@staticmethod
def save_iDF(iDf, filename, tdx):
iDf.date = pd.to_datetime(iDf.date)
iDf.date = iDf.date.apply(lambda d: d.strftime("%d-%b-%Y %H:%M:%S"))
if not iDf.empty:
with open(filename, 'a') as f:
if tdx==0:
iDf.to_csv(f, header=True)
else:
iDf.to_csv(f, header=False)
@staticmethod
def record_to_array(measurements, xLab, yLab):
x = []
y = []
for meas in measurements:
x.append(meas[xLab])
y.append(meas[yLab])
return x, y
@staticmethod
def sort_list(x, y):
'''sort x based off of y'''
xy = zip(x, y)
ys = [y for _, y in sorted(xy)]
xs = sorted(x)
return xs, ys
@staticmethod
def unique_idxs(seq):
'''gets unique, non nan and non -999 indexes'''
tally = defaultdict(list)
for idx,item in enumerate(seq):
tally[item].append(idx)
dups = [ (key,locs) for key,locs in tally.items() ]
dups = [ (key, locs) for key, locs in dups if not np.isnan(key) or key not in {-999, None, np.NaN} ]
idxs = []
for dup in sorted(dups):
idxs.append(dup[1][0])
return idxs
def format_xy(self, x, y):
'''prep for interpolation'''
x2, y2 = self.sort_list(x, y)
try:
x_dup_idx = self.unique_idxs(x2)
xu = [x2[idx] for idx in x_dup_idx]
yu = [y2[idx] for idx in x_dup_idx]
# remove none -999 and none
y_nan_idx =[idx for idx,key in enumerate(yu) if not key in {-999, None, np.NaN} ]
except Exception as err:
pdb.set_trace()
print(err)
xu = [xu[idx] for idx in y_nan_idx]
yu = [yu[idx] for idx in y_nan_idx]
return xu, yu
def make_interpolated_profile(self, profile, xintp, xLab, yLab):
meas = profile['measurements']
if len(meas) == 0:
return None
if not yLab in meas[0].keys():
return None
x, y = self.record_to_array(meas, xLab, yLab)
x, y = self.format_xy(x, y)
if len(x) < 2: # pchip needs at least two points
return None
f = self.make_profile_interpolation_function(x, y)
rowDict = profile.copy()
del rowDict['measurements']
rowDict[xLab] = xintp
if len(meas) == 1 and meas[xLab][0] == xintp:
yintp = meas[yLab][0]
else:
yintp = f(xintp)
rowDict[yLab] = yintp
return rowDict
def make_interpolated_df(self, profiles, xintp, xLab='pres', yLab='temp'):
'''
make a dataframe of interpolated values set at xintp for each profile
xLab: the column name for the interpolation input x
yLab: the column to be interpolated
xintp: the values to be interpolated
'''
outArray = []
for profile in profiles:
rowDict = self.make_interpolated_profile(profile, xintp, xLab, yLab)
if rowDict:
outArray.append(rowDict)
outDf = pd.DataFrame(outArray)
outDf = outDf.rename({'_id': 'profile_id'}, axis=1)
outDf = outDf.dropna(subset=[xLab, yLab], how='any', axis=0)
logging.debug('number of rows in df: {}'.format(outDf.shape[0]))
logging.debug('number of profiles interpolated: {}'.format(len(outDf['profile_id'].unique())))
return outDf
def intp_pres(self, xintp, presRange):
if self.basin:
iTempFileName = 'iTempData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin)
iPsalFileName = 'iPsalData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin)
else:
iTempFileName = 'iTempData_pres_{}.csv'.format(xintp)
iPsalFileName = 'iPsalData_pres_{}.csv'.format(xintp)
start = datetime.now()
logging.debug('number of dates:{}'.format(len(self.datesSet)))
for tdx, dates in enumerate(self.datesSet):
if tdx < self.starttdx:
continue
logging.debug('starting interpolation at time index: {}'.format(tdx))
startDate, endDate = dates
try:
sliceProfiles = self.get_ocean_slice(startDate, endDate, presRange, xintp, self.basin, self.appLocal, self.reduceMeas)
except Exception as err:
logging.warning('profiles not recieved: {}'.format(err))
continue
logging.debug('xintp: {0} on tdx: {1}'.format(xintp, tdx))
logging.debug('number of profiles found in interval: {}'.format(len(sliceProfiles)))
try:
iTempDf = self.make_interpolated_df(sliceProfiles, xintp, 'pres', 'temp')
except Exception as err:
logging.warning('error when interpolating temp')
logging.warning(err)
continue
try:
iPsalDf = self.make_interpolated_df(sliceProfiles, xintp, 'pres', 'psal')
except Exception as err:
pdb.set_trace()
logging.warning('error when interpolating psal')
logging.warning(err)
continue
self.save_iDF(iTempDf, iTempFileName, tdx)
self.save_iDF(iPsalDf, iPsalFileName, tdx)
logging.debug('interpolation complete at time index: {}'.format(tdx))
timeTick = datetime.now()
logging.debug(timeTick.strftime(format='%Y-%m-%d %H:%M'))
dt = timeTick-start
logging.debug('completed run for psal {0} running for: {1}'.format(xintp, dt))
def reduce_presLevels_and_presRanges(self):
'''
reduces presLevels and pres ranges to those specified in pLevelRange
'''
self.startIdx = self.presLevels.index(self.pLevelRange[0])
self.endIdx = self.presLevels.index(self.pLevelRange[1])
self.presLevels = self.presLevels[ self.startIdx:self.endIdx ]
self.presRanges = self.presRanges[ self.startIdx:self.endIdx ]
def main(self):
logging.debug('inside main loop')
logging.debug('running pressure level ranges: {}'.format(self.pLevelRange))
for idx, presLevel in enumerate(self.presLevels):
xintp = presLevel
presRange = self.presRanges[idx]
self.intp_pres(xintp, presRange)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--maxl", help="start on pressure level", type=float, nargs='?', default=2000)
parser.add_argument("--minl", help="end on pressure level", type=float, nargs='?', default=1975)
parser.add_argument("--basin", help="filter this basin", type=str, nargs='?', default=None)
parser.add_argument("--starttdx", help="start time index", type=int, nargs='?', default=0)
parser.add_argument("--logFileName", help="name of log file", type=str, nargs='?', default='pchipOceanSlices.log')
myArgs = parser.parse_args()
pLevelRange = [myArgs.minl, myArgs.maxl]
basin = myArgs.basin
starttdx = myArgs.starttdx
#idxStr = str(myArgs.minl) + ':' + str(myArgs.maxl)
#logFileName = 'pchipOceanSlices{}.log'.format(idxStr)
FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=FORMAT,
filename=myArgs.logFileName,
level=logging.DEBUG)
logging.debug('Start of log file')
startTime = datetime.now()
pos = PchipOceanSlices(pLevelRange, basin=basin, exceptBasin={}, starttdx=starttdx, appLocal=True)
pos.main()
endTime = datetime.now()
dt = endTime - startTime
logging.debug('end of log file for pressure level ranges: {}'.format(pLevelRange))
dtStr = 'time to complete: {} seconds'.format(dt.seconds)
print(dtStr)
logging.debug(dtStr)
|
[
"pandas.DataFrame",
"scipy.interpolate.PchipInterpolator",
"logging.debug",
"argparse.ArgumentParser",
"logging.basicConfig",
"logging.warning",
"numpy.floor",
"xarray.open_dataset",
"numpy.isnan",
"collections.defaultdict",
"pandas.to_datetime",
"pdb.set_trace",
"requests.get",
"datetime.datetime.now"
] |
[((12241, 12337), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawTextHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawTextHelpFormatter)\n', (12264, 12337), False, 'import argparse\n'), ((13171, 13260), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'FORMAT', 'filename': 'myArgs.logFileName', 'level': 'logging.DEBUG'}), '(format=FORMAT, filename=myArgs.logFileName, level=\n logging.DEBUG)\n', (13190, 13260), False, 'import logging\n'), ((13309, 13343), 'logging.debug', 'logging.debug', (['"""Start of log file"""'], {}), "('Start of log file')\n", (13322, 13343), False, 'import logging\n'), ((13360, 13374), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (13372, 13374), False, 'from datetime import datetime, timedelta\n'), ((13507, 13521), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (13519, 13521), False, 'from datetime import datetime, timedelta\n'), ((13721, 13741), 'logging.debug', 'logging.debug', (['dtStr'], {}), '(dtStr)\n', (13734, 13741), False, 'import logging\n'), ((3238, 3255), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (3250, 3255), False, 'import requests\n'), ((5476, 5523), 'xarray.open_dataset', 'xr.open_dataset', (['rgFilename'], {'decode_times': '(False)'}), '(rgFilename, decode_times=False)\n', (5491, 5523), True, 'import xarray as xr\n'), ((5822, 5846), 'pandas.to_datetime', 'pd.to_datetime', (['iDf.date'], {}), '(iDf.date)\n', (5836, 5846), True, 'import pandas as pd\n'), ((6664, 6681), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6675, 6681), False, 'from collections import OrderedDict, defaultdict\n'), ((8910, 8932), 'pandas.DataFrame', 'pd.DataFrame', (['outArray'], {}), '(outArray)\n', (8922, 8932), True, 'import pandas as pd\n'), ((9666, 9680), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (9678, 9680), False, 'from datetime import datetime, timedelta\n'), ((11251, 11265), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (11263, 11265), False, 'from datetime import datetime, timedelta\n'), ((11904, 11937), 'logging.debug', 'logging.debug', (['"""inside main loop"""'], {}), "('inside main loop')\n", (11917, 11937), False, 'import logging\n'), ((1721, 1743), 'numpy.floor', 'np.floor', (['(365 / period)'], {}), '(365 / period)\n', (1729, 1743), True, 'import numpy as np\n'), ((4158, 4208), 'scipy.interpolate.PchipInterpolator', 'PchipInterpolator', (['x', 'y'], {'axis': '(1)', 'extrapolate': '(False)'}), '(x, y, axis=1, extrapolate=False)\n', (4175, 4208), False, 'from scipy.interpolate import PchipInterpolator\n'), ((4254, 4269), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (4267, 4269), False, 'import pdb\n'), ((4282, 4302), 'logging.warning', 'logging.warning', (['err'], {}), '(err)\n', (4297, 4302), False, 'import logging\n'), ((7473, 7488), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (7486, 7488), False, 'import pdb\n'), ((10612, 10660), 'logging.warning', 'logging.warning', (['"""error when interpolating temp"""'], {}), "('error when interpolating temp')\n", (10627, 10660), False, 'import logging\n'), ((10677, 10697), 'logging.warning', 'logging.warning', (['err'], {}), '(err)\n', (10692, 10697), False, 'import logging\n'), ((10884, 10899), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (10897, 10899), False, 'import pdb\n'), ((10916, 10964), 'logging.warning', 'logging.warning', (['"""error when interpolating psal"""'], {}), "('error when interpolating psal')\n", (10931, 10964), False, 'import logging\n'), ((10981, 11001), 'logging.warning', 'logging.warning', (['err'], {}), '(err)\n', (10996, 11001), False, 'import logging\n'), ((6876, 6889), 'numpy.isnan', 'np.isnan', (['key'], {}), '(key)\n', (6884, 6889), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.widgets import Slider
import cv2 as cv
FILE_NAME = 'res/mountain-and-lake.jpg'
# https://matplotlib.org/3.3.1/gallery/widgets/slider_demo.html
# https://sodocumentation.net/matplotlib/topic/6983/animations-and-interactive-plotting
# img:
# image in rbg
#
# satadj:
# 1.0 means no change. Under it converts to greyscale
# and about 1.5 is immensely high
def saturate(img, satadj):
imghsv = cv.cvtColor(img, cv.COLOR_RGB2HSV).astype("float32")
(h, s, v) = cv.split(imghsv)
s = s*satadj
s = np.clip(s,0,255)
imghsv = cv.merge([h,s,v])
imgrgb = cv.cvtColor(imghsv.astype("uint8"), cv.COLOR_HSV2RGB)
# assume: return rgb
return imgrgb
def brightness(img, exp_adj):
imghsv = cv.cvtColor(img, cv.COLOR_RGB2HSV).astype("float32")
(h, s, v) = cv.split(imghsv)
v = v*exp_adj
v = np.clip(v,0,255)
imghsv = cv.merge([h,s,v])
imgrgb = cv.cvtColor(imghsv.astype("uint8"), cv.COLOR_HSV2RGB)
# assume: return rgb
return imgrgb
def plt_hist(ax, img, color):
colors = ['b', 'g', 'r']
k = colors.index(color)
histogram = cv.calcHist([img],[k],None,[256],[0,256])
plt_handle, = ax.plot(histogram, color=color)
return plt_handle
def main():
fig, ax = plt.subplots(1, 2,figsize=(27.0,27.0))
ax1 = ax[0] # The histogram
ax2 = ax[1] # The image
ax2.set_xlim(0.0,1280.0)
fig.suptitle('Image toner', fontsize=16)
# Calculate the initial value for the image
img = cv.imread(cv.samples.findFile(FILE_NAME)) # assume: BGR
img = cv.cvtColor(img, cv.COLOR_BGR2RGB) # plt assumes RGB
# Draw the image
# Take the handle for later
imobj = ax2.imshow(img)
# Axes for the saturation and brightness
ax_sat = plt.axes([0.25, .03, 0.50, 0.02])
ax_exp = plt.axes([0.25, 0.01, 0.50, 0.02])
# Slider
sat_slider = Slider(ax_sat, 'Saturation', 0, 20, valinit=1)
exp_slider = Slider(ax_exp, 'Brightness', -10, 10, valinit=1)
# Histogram
colors = ('r', 'g', 'b')
lines = []
for k,color in enumerate(colors):
histogram = cv.calcHist([img],[k],None,[256],[0,256])
line, = ax1.plot(histogram,color=color)
lines.append(line)
def update_sat(val):
newimg = img
# update image
newimg = saturate(newimg, val)
newimg = brightness(newimg, exp_slider.val)
imobj.set_data(newimg)
# update also the histogram
colors = ('r', 'g', 'b')
for k,color in enumerate(colors):
histogram = cv.calcHist([newimg],[k],None,[256],[0,256])
lines[k].set_ydata(histogram)
# redraw canvas while idle
fig.canvas.draw_idle()
def update_exp(val):
newimg = img
newimg = saturate(newimg, sat_slider.val)
newimg = brightness(newimg, val)
imobj.set_data(newimg)
# update also the histogram
colors = ('b', 'g', 'r')
for k,color in enumerate(colors):
histogram = cv.calcHist([newimg],[k],None,[256],[0,256])
lines[k].set_ydata(histogram)
# redraw canvas while idle
fig.canvas.draw_idle()
# call update function on slider value change
sat_slider.on_changed(update_sat)
exp_slider.on_changed(update_exp)
plt.show()
main()
|
[
"matplotlib.pyplot.show",
"cv2.cvtColor",
"cv2.calcHist",
"matplotlib.pyplot.axes",
"matplotlib.widgets.Slider",
"numpy.clip",
"cv2.split",
"cv2.samples.findFile",
"cv2.merge",
"matplotlib.pyplot.subplots"
] |
[((569, 585), 'cv2.split', 'cv.split', (['imghsv'], {}), '(imghsv)\n', (577, 585), True, 'import cv2 as cv\n'), ((607, 625), 'numpy.clip', 'np.clip', (['s', '(0)', '(255)'], {}), '(s, 0, 255)\n', (614, 625), True, 'import numpy as np\n'), ((634, 653), 'cv2.merge', 'cv.merge', (['[h, s, v]'], {}), '([h, s, v])\n', (642, 653), True, 'import cv2 as cv\n'), ((863, 879), 'cv2.split', 'cv.split', (['imghsv'], {}), '(imghsv)\n', (871, 879), True, 'import cv2 as cv\n'), ((902, 920), 'numpy.clip', 'np.clip', (['v', '(0)', '(255)'], {}), '(v, 0, 255)\n', (909, 920), True, 'import numpy as np\n'), ((929, 948), 'cv2.merge', 'cv.merge', (['[h, s, v]'], {}), '([h, s, v])\n', (937, 948), True, 'import cv2 as cv\n'), ((1145, 1191), 'cv2.calcHist', 'cv.calcHist', (['[img]', '[k]', 'None', '[256]', '[0, 256]'], {}), '([img], [k], None, [256], [0, 256])\n', (1156, 1191), True, 'import cv2 as cv\n'), ((1278, 1318), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(27.0, 27.0)'}), '(1, 2, figsize=(27.0, 27.0))\n', (1290, 1318), True, 'import matplotlib.pyplot as plt\n'), ((1560, 1594), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2RGB'], {}), '(img, cv.COLOR_BGR2RGB)\n', (1571, 1594), True, 'import cv2 as cv\n'), ((1739, 1772), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.03, 0.5, 0.02]'], {}), '([0.25, 0.03, 0.5, 0.02])\n', (1747, 1772), True, 'import matplotlib.pyplot as plt\n'), ((1783, 1816), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.01, 0.5, 0.02]'], {}), '([0.25, 0.01, 0.5, 0.02])\n', (1791, 1816), True, 'import matplotlib.pyplot as plt\n'), ((1843, 1889), 'matplotlib.widgets.Slider', 'Slider', (['ax_sat', '"""Saturation"""', '(0)', '(20)'], {'valinit': '(1)'}), "(ax_sat, 'Saturation', 0, 20, valinit=1)\n", (1849, 1889), False, 'from matplotlib.widgets import Slider\n'), ((1904, 1952), 'matplotlib.widgets.Slider', 'Slider', (['ax_exp', '"""Brightness"""', '(-10)', '(10)'], {'valinit': '(1)'}), "(ax_exp, 'Brightness', -10, 10, valinit=1)\n", (1910, 1952), False, 'from matplotlib.widgets import Slider\n'), ((3071, 3081), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3079, 3081), True, 'import matplotlib.pyplot as plt\n'), ((1507, 1537), 'cv2.samples.findFile', 'cv.samples.findFile', (['FILE_NAME'], {}), '(FILE_NAME)\n', (1526, 1537), True, 'import cv2 as cv\n'), ((2054, 2100), 'cv2.calcHist', 'cv.calcHist', (['[img]', '[k]', 'None', '[256]', '[0, 256]'], {}), '([img], [k], None, [256], [0, 256])\n', (2065, 2100), True, 'import cv2 as cv\n'), ((503, 537), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_RGB2HSV'], {}), '(img, cv.COLOR_RGB2HSV)\n', (514, 537), True, 'import cv2 as cv\n'), ((797, 831), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_RGB2HSV'], {}), '(img, cv.COLOR_RGB2HSV)\n', (808, 831), True, 'import cv2 as cv\n'), ((2430, 2479), 'cv2.calcHist', 'cv.calcHist', (['[newimg]', '[k]', 'None', '[256]', '[0, 256]'], {}), '([newimg], [k], None, [256], [0, 256])\n', (2441, 2479), True, 'import cv2 as cv\n'), ((2817, 2866), 'cv2.calcHist', 'cv.calcHist', (['[newimg]', '[k]', 'None', '[256]', '[0, 256]'], {}), '([newimg], [k], None, [256], [0, 256])\n', (2828, 2866), True, 'import cv2 as cv\n')]
|
import numpy as np
from platformx.plat_tensorflow.tools.processor.np_utils import shape_utils, \
anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, \
visualization_utils as vis_util
from platformx.plat_tensorflow.tools.processor.np_utils import standard_fields as fields
from platformx.plat_tensorflow.tools.processor import model_config
import config
from PIL import Image
import matplotlib
matplotlib.use('Agg')
from platformx.plat_tensorflow.tools.processor.np_utils import label_map_util
from scipy import misc
import os
BOX_ENCODINGS = 'box_encodings'
CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background'
BASE_BoxEncodingPredictor = "_BoxEncodingPredictor"
BASE_ClassPredictor = "_ClassPredictor"
PPN_BoxPredictor_0 = "WeightSharedConvolutionalBoxPredictor_BoxPredictor"
PPN_ClassPredictor_0 = "WeightSharedConvolutionalBoxPredictor_ClassPredictor"
BASE_PPN_BoxPredictor = "_BoxPredictor"
BASE_PPN_ClassPredictor = "WeightSharedConvolutionalBoxPredictor"
PATH_TO_LABELS = config.cfg.POSTPROCESSOR.PATH_TO_LABELS
def run_ssd_tf_post(preprocessed_inputs, result_middle=None):
boxes_encodings_np = []
classes_predictions_with_background_np = []
feature_maps_np = []
for i in range(6):
for key, value in result_middle.items():
if str(i) + BASE_BoxEncodingPredictor in key:
print(str(i) + BASE_BoxEncodingPredictor + ": ", value.shape)
boxes_encodings_np.append(value)
break
if i == 0:
if PPN_BoxPredictor_0 in key:
print("PPN_BoxPredictor_0:", value.shape)
boxes_encodings_np.append(value)
break
else:
if str(i) + BASE_PPN_BoxPredictor in key:
print(str(i) + BASE_PPN_BoxPredictor, value.shape)
boxes_encodings_np.append(value)
break
for key, value in result_middle.items():
if str(i) + BASE_ClassPredictor in key and BASE_PPN_ClassPredictor not in key:
print(str(i) + BASE_ClassPredictor+ ": ", value.shape)
classes_predictions_with_background_np.append(value)
break
if i == 0:
if PPN_ClassPredictor_0 in key:
print(PPN_ClassPredictor_0 + ":", value.shape)
classes_predictions_with_background_np.append(value)
break
else:
if str(i) + BASE_ClassPredictor in key and BASE_PPN_ClassPredictor in key:
print(str(i) + BASE_ClassPredictor + ":", value.shape)
classes_predictions_with_background_np.append(value)
break
for key, value in result_middle.items():
if "FeatureExtractor" in key and "fpn" not in key:
print("key {} value {}".format(key, value.shape))
feature_maps_np.append(value)
if len(feature_maps_np) < 1:
key_dict = {}
for key, value in result_middle.items():
if "FeatureExtractor" in key and "fpn"in key:
key_dict[key] = value.shape[1]
sorted_key_dict = sorted(key_dict.items(), key=lambda x: x[1], reverse=True)
for key, value in sorted_key_dict:
feature_maps_np.append(result_middle[key])
input_shape = preprocessed_inputs.shape
true_image_shapes = np.array([input_shape[1], input_shape[2], input_shape[3]], dtype=np.int32)
true_image_shapes = true_image_shapes.reshape((1, 3))
post_result = post_deal(boxes_encodings_np, classes_predictions_with_background_np, feature_maps_np,
preprocessed_inputs,
true_image_shapes)
show_detection_result(post_result)
return post_result
def show_detection_result(result):
print("PATH_TO_LABELS:", PATH_TO_LABELS)
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
# NUM_CLASSES
NUM_CLASSES = config.cfg.POSTPROCESSOR.NUM_CLASSES
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
result['detection_classes'] = result[
'detection_classes'][0].astype(np.uint8)
result['detection_boxes'] = result['detection_boxes'][0]
result['detection_scores'] = result['detection_scores'][0]
img_dir = config.cfg.PREPROCESS.IMG_LIST
file_list = os.listdir(img_dir)
IMG_PATH = os.path.join(img_dir, file_list[0])
print("IMG_PATH:", IMG_PATH)
image = Image.open(IMG_PATH)
image_np = load_image_into_numpy_array(image)
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
result['detection_boxes'],
result['detection_classes'],
result['detection_scores'],
category_index,
instance_masks=result.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=8)
# IMAGE_SIZE = (12, 8)
# plt.figure(figsize=IMAGE_SIZE)
misc.imsave('detection_result_ssd.png', image_np)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def post_deal(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs=None,
true_image_shapes=None):
"""
SSD model POST processer
:param boxes_encodings:
:param classes_predictions_with_background:
:param feature_maps:
:param preprocessed_inputs:
:param true_image_shapes:
:return:
"""
prediction_dict, anchors = last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps,
preprocessed_inputs)
postprocessed_tensors = postprocess(anchors, prediction_dict, true_image_shapes)
return _add_output_tensor_nodes(postprocessed_tensors)
def _add_output_tensor_nodes(postprocessed_tensors):
print("------------------ _add_output_tensor_nodes ------------------")
detection_fields = fields.DetectionResultFields
label_id_offset = 1
boxes = postprocessed_tensors.get(detection_fields.detection_boxes)
scores = postprocessed_tensors.get(detection_fields.detection_scores)
classes = postprocessed_tensors.get(
detection_fields.detection_classes) + label_id_offset
keypoints = postprocessed_tensors.get(detection_fields.detection_keypoints)
masks = postprocessed_tensors.get(detection_fields.detection_masks)
num_detections = postprocessed_tensors.get(detection_fields.num_detections)
if isinstance(num_detections, list):
num_detections = num_detections[0]
elif isinstance(num_detections, float):
num_detections = int(num_detections)
elif isinstance(num_detections, np.ndarray):
num_detections = int(num_detections[0])
print("=============== num_detections :", num_detections)
outputs = {}
print("scores:", scores)
scores = scores.flatten()
# todo 读取配置文件 置 0 置 1 操作原始代码
if scores.shape[0] < 100:
raw_shape = 100
else:
raw_shape = scores.shape[0]
scores_1 = scores[0:num_detections]
print("scores_1:", scores_1)
scores_2 = np.zeros(shape=raw_shape - num_detections)
scores = np.hstack((scores_1, scores_2))
scores = np.reshape(scores, (1, scores.shape[0]))
outputs[detection_fields.detection_scores] = scores
classes = classes.flatten()
classes_1 = classes[0:num_detections]
print("classes_1:", classes_1)
classes_2 = np.ones(shape=raw_shape - num_detections)
classes = np.hstack((classes_1, classes_2))
classes = np.reshape(classes, (1, classes.shape[0]))
outputs[detection_fields.detection_classes] = classes
boxes_1 = boxes[:, 0:num_detections]
print("boxes_1:", boxes_1)
boxes_2 = np.zeros(shape=(1, raw_shape - num_detections, 4))
boxes = np.hstack((boxes_1, boxes_2))
outputs[detection_fields.detection_boxes] = boxes
outputs[detection_fields.num_detections] = num_detections
if keypoints is not None:
outputs[detection_fields.detection_keypoints] = keypoints
if masks is not None:
outputs[detection_fields.detection_masks] = masks
return outputs
def last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs=None):
print("------------------ last_predict_part ------------------")
"""Predicts unpostprocessed tensors from input tensor.
This function takes an input batch of images and runs it through the forward
pass of the network to yield unpostprocessesed predictions.
A side effect of calling the predict method is that self._anchors is
populated with a box_list.BoxList of anchors. These anchors must be
constructed before the postprocess or loss functions can be called.
Args:
boxes_encodings:
classes_predictions_with_background:
feature_maps:
preprocessed_inputs: a [batch, height, width, channels] image tensor.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
"""
anchor_generator = anchor_generator_builder.build()
num_predictions_per_location_list = anchor_generator.num_anchors_per_location()
# print("num_predictions_per_location_list:", num_predictions_per_location_list)
prediction_dict = post_processor(boxes_encodings, classes_predictions_with_background,
feature_maps, num_predictions_per_location_list)
image_shape = shape_utils.combined_static_and_dynamic_shape(
preprocessed_inputs)
feature_map_spatial_dims = get_feature_map_spatial_dims(
feature_maps)
anchors_list = anchor_generator.generate(
feature_map_spatial_dims,
im_height=image_shape[1],
im_width=image_shape[2])
anchors = box_list_ops.concatenate(anchors_list)
box_encodings = np.concatenate(prediction_dict['box_encodings'], axis=1)
if box_encodings.ndim == 4 and box_encodings.shape[2] == 1:
box_encodings = np.squeeze(box_encodings, axis=2)
class_predictions_with_background = np.concatenate(
prediction_dict['class_predictions_with_background'], axis=1)
predictions_dict = {
'preprocessed_inputs': preprocessed_inputs,
'box_encodings': box_encodings,
'class_predictions_with_background':
class_predictions_with_background,
'feature_maps': feature_maps,
'anchors': anchors.get()
}
return predictions_dict, anchors
def get_feature_map_spatial_dims(feature_maps):
"""Return list of spatial dimensions for each feature map in a list.
Args:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i].
Returns:
a list of pairs (height, width) for each feature map in feature_maps
"""
feature_map_shapes = [
shape_utils.combined_static_and_dynamic_shape(
feature_map) for feature_map in feature_maps
]
return [(shape[1], shape[2]) for shape in feature_map_shapes]
def post_processor(boxes_encodings, classes_predictions_with_background, image_features,
num_predictions_per_location_list):
print("------------------ post_processor ------------------")
"""Computes encoded object locations and corresponding confidences.
Args:
image_features: A list of float tensors of shape [batch_size, height_i,
width_i, channels_i] containing features for a batch of images.
num_predictions_per_location_list: A list of integers representing the
number of box predictions to be made per spatial location for each
feature map.
Returns:
box_encodings: A list of float tensors of shape
[batch_size, num_anchors_i, q, code_size] representing the location of
the objects, where q is 1 or the number of classes. Each entry in the
list corresponds to a feature map in the input `image_features` list.
class_predictions_with_background: A list of float tensors of shape
[batch_size, num_anchors_i, num_classes + 1] representing the class
predictions for the proposals. Each entry in the list corresponds to a
feature map in the input `image_features` list.
"""
box_encodings_list = []
class_predictions_list = []
for (image_feature,
num_predictions_per_location,
box_encodings,
class_predictions_with_background) in zip(image_features,
num_predictions_per_location_list,
boxes_encodings,
classes_predictions_with_background):
combined_feature_map_shape = image_feature.shape
box_code_size = config.cfg.POSTPROCESSOR.BOX_CODE_SIZE
new_shape = np.stack([combined_feature_map_shape[0],
combined_feature_map_shape[1] *
combined_feature_map_shape[2] *
num_predictions_per_location,
1, box_code_size])
box_encodings = np.reshape(box_encodings, new_shape)
box_encodings_list.append(box_encodings)
num_classes = config.cfg.POSTPROCESSOR.NUM_CLASSES
num_class_slots = num_classes + 1
class_predictions_with_background = np.reshape(
class_predictions_with_background,
np.stack([combined_feature_map_shape[0],
combined_feature_map_shape[1] *
combined_feature_map_shape[2] *
num_predictions_per_location,
num_class_slots]))
class_predictions_list.append(class_predictions_with_background)
return {BOX_ENCODINGS: box_encodings_list,
CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_list}
def postprocess(anchors, prediction_dict, true_image_shapes):
print("------------------ postprocess ------------------")
if ('box_encodings' not in prediction_dict or
'class_predictions_with_background' not in prediction_dict):
raise ValueError('prediction_dict does not contain expected entries.')
preprocessed_images = prediction_dict['preprocessed_inputs']
box_encodings = prediction_dict['box_encodings']
box_encodings = box_encodings
class_predictions = prediction_dict['class_predictions_with_background']
detection_boxes, detection_keypoints = _batch_decode(anchors, box_encodings)
detection_boxes = detection_boxes
detection_boxes = np.expand_dims(detection_boxes, axis=2)
non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(model_config.SSD)
detection_scores_with_background = score_conversion_fn(class_predictions)
detection_scores = detection_scores_with_background[0:, 0:, 1:]
additional_fields = None
if detection_keypoints is not None:
additional_fields = {
fields.BoxListFields.keypoints: detection_keypoints}
(nmsed_boxes, nmsed_scores, nmsed_classes, _, nmsed_additional_fields,
num_detections) = non_max_suppression_fn(
detection_boxes,
detection_scores,
clip_window=_compute_clip_window(
preprocessed_images, true_image_shapes),
additional_fields=additional_fields)
detection_dict = {
fields.DetectionResultFields.detection_boxes: nmsed_boxes,
fields.DetectionResultFields.detection_scores: nmsed_scores,
fields.DetectionResultFields.detection_classes: nmsed_classes,
fields.DetectionResultFields.num_detections:
float(num_detections)
}
if (nmsed_additional_fields is not None and
fields.BoxListFields.keypoints in nmsed_additional_fields):
detection_dict[fields.DetectionResultFields.detection_keypoints] = (
nmsed_additional_fields[fields.BoxListFields.keypoints])
return detection_dict
def _compute_clip_window(preprocessed_images, true_image_shapes):
resized_inputs_shape = shape_utils.combined_static_and_dynamic_shape(
preprocessed_images)
true_heights, true_widths, _ = np.split(true_image_shapes, 3, axis=1)
padded_height = float(resized_inputs_shape[1])
padded_width = float(resized_inputs_shape[2])
cliped_image = np.stack(
[np.zeros_like(true_heights), np.zeros_like(true_widths),
true_heights / padded_height, true_widths / padded_width], axis=1)
cliped_imaged = cliped_image.reshape(1, -1)
return cliped_imaged
def _batch_decode(anchors, box_encodings):
"""Decodes a batch of box encodings with respect to the anchors.
Args:
box_encodings: A float32 tensor of shape
[batch_size, num_anchors, box_code_size] containing box encodings.
Returns:
decoded_boxes: A float32 tensor of shape
[batch_size, num_anchors, 4] containing the decoded boxes.
decoded_keypoints: A float32 tensor of shape
[batch_size, num_anchors, num_keypoints, 2] containing the decoded
keypoints if present in the input `box_encodings`, None otherwise.
"""
combined_shape = shape_utils.combined_static_and_dynamic_shape(
box_encodings)
batch_size = combined_shape[0]
tiled_anchor_boxes = np.tile(
np.expand_dims(anchors.get(), 0), [batch_size, 1, 1])
tiled_anchors_boxlist = box_list.BoxList(
np.reshape(tiled_anchor_boxes, [-1, 4]))
box_coder = box_coder_builder.build("faster_rcnn_box_coder")
decoded_boxes = box_coder.decode(
np.reshape(box_encodings, [-1, box_coder.code_size]),
tiled_anchors_boxlist)
decoded_keypoints = None
if decoded_boxes.has_field(fields.BoxListFields.keypoints):
decoded_keypoints = decoded_boxes.get_field(
fields.BoxListFields.keypoints)
num_keypoints = decoded_keypoints.get_shape()[1]
decoded_keypoints = np.reshape(
decoded_keypoints,
np.stack([combined_shape[0], combined_shape[1], num_keypoints, 2]))
decoded_boxes = np.reshape(decoded_boxes.get(), np.stack(
[combined_shape[0], combined_shape[1], 4]))
return decoded_boxes, decoded_keypoints
|
[
"numpy.ones",
"scipy.misc.imsave",
"os.path.join",
"numpy.zeros_like",
"platformx.plat_tensorflow.tools.processor.np_utils.label_map_util.create_category_index",
"platformx.plat_tensorflow.tools.processor.np_utils.label_map_util.load_labelmap",
"numpy.reshape",
"platformx.plat_tensorflow.tools.processor.np_utils.label_map_util.convert_label_map_to_categories",
"numpy.stack",
"platformx.plat_tensorflow.tools.processor.np_utils.shape_utils.combined_static_and_dynamic_shape",
"numpy.hstack",
"matplotlib.use",
"numpy.squeeze",
"platformx.plat_tensorflow.tools.processor.np_utils.box_coder_builder.build",
"os.listdir",
"numpy.concatenate",
"platformx.plat_tensorflow.tools.processor.np_utils.box_list_ops.concatenate",
"numpy.zeros",
"numpy.expand_dims",
"PIL.Image.open",
"numpy.split",
"platformx.plat_tensorflow.tools.processor.np_utils.anchor_generator_builder.build",
"numpy.array",
"platformx.plat_tensorflow.tools.processor.np_utils.post_processing_builder.build"
] |
[((452, 473), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (466, 473), False, 'import matplotlib\n'), ((3544, 3618), 'numpy.array', 'np.array', (['[input_shape[1], input_shape[2], input_shape[3]]'], {'dtype': 'np.int32'}), '([input_shape[1], input_shape[2], input_shape[3]], dtype=np.int32)\n', (3552, 3618), True, 'import numpy as np\n'), ((4053, 4097), 'platformx.plat_tensorflow.tools.processor.np_utils.label_map_util.load_labelmap', 'label_map_util.load_labelmap', (['PATH_TO_LABELS'], {}), '(PATH_TO_LABELS)\n', (4081, 4097), False, 'from platformx.plat_tensorflow.tools.processor.np_utils import label_map_util\n'), ((4191, 4305), 'platformx.plat_tensorflow.tools.processor.np_utils.label_map_util.convert_label_map_to_categories', 'label_map_util.convert_label_map_to_categories', (['label_map'], {'max_num_classes': 'NUM_CLASSES', 'use_display_name': '(True)'}), '(label_map, max_num_classes=\n NUM_CLASSES, use_display_name=True)\n', (4237, 4305), False, 'from platformx.plat_tensorflow.tools.processor.np_utils import label_map_util\n'), ((4388, 4436), 'platformx.plat_tensorflow.tools.processor.np_utils.label_map_util.create_category_index', 'label_map_util.create_category_index', (['categories'], {}), '(categories)\n', (4424, 4436), False, 'from platformx.plat_tensorflow.tools.processor.np_utils import label_map_util\n'), ((4723, 4742), 'os.listdir', 'os.listdir', (['img_dir'], {}), '(img_dir)\n', (4733, 4742), False, 'import os\n'), ((4761, 4796), 'os.path.join', 'os.path.join', (['img_dir', 'file_list[0]'], {}), '(img_dir, file_list[0])\n', (4773, 4796), False, 'import os\n'), ((4844, 4864), 'PIL.Image.open', 'Image.open', (['IMG_PATH'], {}), '(IMG_PATH)\n', (4854, 4864), False, 'from PIL import Image\n'), ((5327, 5376), 'scipy.misc.imsave', 'misc.imsave', (['"""detection_result_ssd.png"""', 'image_np'], {}), "('detection_result_ssd.png', image_np)\n", (5338, 5376), False, 'from scipy import misc\n'), ((7639, 7681), 'numpy.zeros', 'np.zeros', ([], {'shape': '(raw_shape - num_detections)'}), '(shape=raw_shape - num_detections)\n', (7647, 7681), True, 'import numpy as np\n'), ((7696, 7727), 'numpy.hstack', 'np.hstack', (['(scores_1, scores_2)'], {}), '((scores_1, scores_2))\n', (7705, 7727), True, 'import numpy as np\n'), ((7742, 7782), 'numpy.reshape', 'np.reshape', (['scores', '(1, scores.shape[0])'], {}), '(scores, (1, scores.shape[0]))\n', (7752, 7782), True, 'import numpy as np\n'), ((7973, 8014), 'numpy.ones', 'np.ones', ([], {'shape': '(raw_shape - num_detections)'}), '(shape=raw_shape - num_detections)\n', (7980, 8014), True, 'import numpy as np\n'), ((8030, 8063), 'numpy.hstack', 'np.hstack', (['(classes_1, classes_2)'], {}), '((classes_1, classes_2))\n', (8039, 8063), True, 'import numpy as np\n'), ((8079, 8121), 'numpy.reshape', 'np.reshape', (['classes', '(1, classes.shape[0])'], {}), '(classes, (1, classes.shape[0]))\n', (8089, 8121), True, 'import numpy as np\n'), ((8274, 8324), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, raw_shape - num_detections, 4)'}), '(shape=(1, raw_shape - num_detections, 4))\n', (8282, 8324), True, 'import numpy as np\n'), ((8338, 8367), 'numpy.hstack', 'np.hstack', (['(boxes_1, boxes_2)'], {}), '((boxes_1, boxes_2))\n', (8347, 8367), True, 'import numpy as np\n'), ((9782, 9814), 'platformx.plat_tensorflow.tools.processor.np_utils.anchor_generator_builder.build', 'anchor_generator_builder.build', ([], {}), '()\n', (9812, 9814), False, 'from platformx.plat_tensorflow.tools.processor.np_utils import shape_utils, anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, visualization_utils as vis_util\n'), ((10190, 10256), 'platformx.plat_tensorflow.tools.processor.np_utils.shape_utils.combined_static_and_dynamic_shape', 'shape_utils.combined_static_and_dynamic_shape', (['preprocessed_inputs'], {}), '(preprocessed_inputs)\n', (10235, 10256), False, 'from platformx.plat_tensorflow.tools.processor.np_utils import shape_utils, anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, visualization_utils as vis_util\n'), ((10522, 10560), 'platformx.plat_tensorflow.tools.processor.np_utils.box_list_ops.concatenate', 'box_list_ops.concatenate', (['anchors_list'], {}), '(anchors_list)\n', (10546, 10560), False, 'from platformx.plat_tensorflow.tools.processor.np_utils import shape_utils, anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, visualization_utils as vis_util\n'), ((10584, 10640), 'numpy.concatenate', 'np.concatenate', (["prediction_dict['box_encodings']"], {'axis': '(1)'}), "(prediction_dict['box_encodings'], axis=1)\n", (10598, 10640), True, 'import numpy as np\n'), ((10808, 10884), 'numpy.concatenate', 'np.concatenate', (["prediction_dict['class_predictions_with_background']"], {'axis': '(1)'}), "(prediction_dict['class_predictions_with_background'], axis=1)\n", (10822, 10884), True, 'import numpy as np\n'), ((15463, 15502), 'numpy.expand_dims', 'np.expand_dims', (['detection_boxes'], {'axis': '(2)'}), '(detection_boxes, axis=2)\n', (15477, 15502), True, 'import numpy as np\n'), ((15554, 15601), 'platformx.plat_tensorflow.tools.processor.np_utils.post_processing_builder.build', 'post_processing_builder.build', (['model_config.SSD'], {}), '(model_config.SSD)\n', (15583, 15601), False, 'from platformx.plat_tensorflow.tools.processor.np_utils import shape_utils, anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, visualization_utils as vis_util\n'), ((16972, 17038), 'platformx.plat_tensorflow.tools.processor.np_utils.shape_utils.combined_static_and_dynamic_shape', 'shape_utils.combined_static_and_dynamic_shape', (['preprocessed_images'], {}), '(preprocessed_images)\n', (17017, 17038), False, 'from platformx.plat_tensorflow.tools.processor.np_utils import shape_utils, anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, visualization_utils as vis_util\n'), ((17085, 17123), 'numpy.split', 'np.split', (['true_image_shapes', '(3)'], {'axis': '(1)'}), '(true_image_shapes, 3, axis=1)\n', (17093, 17123), True, 'import numpy as np\n'), ((18102, 18162), 'platformx.plat_tensorflow.tools.processor.np_utils.shape_utils.combined_static_and_dynamic_shape', 'shape_utils.combined_static_and_dynamic_shape', (['box_encodings'], {}), '(box_encodings)\n', (18147, 18162), False, 'from platformx.plat_tensorflow.tools.processor.np_utils import shape_utils, anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, visualization_utils as vis_util\n'), ((18425, 18473), 'platformx.plat_tensorflow.tools.processor.np_utils.box_coder_builder.build', 'box_coder_builder.build', (['"""faster_rcnn_box_coder"""'], {}), "('faster_rcnn_box_coder')\n", (18448, 18473), False, 'from platformx.plat_tensorflow.tools.processor.np_utils import shape_utils, anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, visualization_utils as vis_util\n'), ((10731, 10764), 'numpy.squeeze', 'np.squeeze', (['box_encodings'], {'axis': '(2)'}), '(box_encodings, axis=2)\n', (10741, 10764), True, 'import numpy as np\n'), ((11623, 11681), 'platformx.plat_tensorflow.tools.processor.np_utils.shape_utils.combined_static_and_dynamic_shape', 'shape_utils.combined_static_and_dynamic_shape', (['feature_map'], {}), '(feature_map)\n', (11668, 11681), False, 'from platformx.plat_tensorflow.tools.processor.np_utils import shape_utils, anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, visualization_utils as vis_util\n'), ((13661, 13822), 'numpy.stack', 'np.stack', (['[combined_feature_map_shape[0], combined_feature_map_shape[1] *\n combined_feature_map_shape[2] * num_predictions_per_location, 1,\n box_code_size]'], {}), '([combined_feature_map_shape[0], combined_feature_map_shape[1] *\n combined_feature_map_shape[2] * num_predictions_per_location, 1,\n box_code_size])\n', (13669, 13822), True, 'import numpy as np\n'), ((13994, 14030), 'numpy.reshape', 'np.reshape', (['box_encodings', 'new_shape'], {}), '(box_encodings, new_shape)\n', (14004, 14030), True, 'import numpy as np\n'), ((18365, 18404), 'numpy.reshape', 'np.reshape', (['tiled_anchor_boxes', '[-1, 4]'], {}), '(tiled_anchor_boxes, [-1, 4])\n', (18375, 18404), True, 'import numpy as np\n'), ((18522, 18574), 'numpy.reshape', 'np.reshape', (['box_encodings', '[-1, box_coder.code_size]'], {}), '(box_encodings, [-1, box_coder.code_size])\n', (18532, 18574), True, 'import numpy as np\n'), ((19069, 19120), 'numpy.stack', 'np.stack', (['[combined_shape[0], combined_shape[1], 4]'], {}), '([combined_shape[0], combined_shape[1], 4])\n', (19077, 19120), True, 'import numpy as np\n'), ((14304, 14464), 'numpy.stack', 'np.stack', (['[combined_feature_map_shape[0], combined_feature_map_shape[1] *\n combined_feature_map_shape[2] * num_predictions_per_location,\n num_class_slots]'], {}), '([combined_feature_map_shape[0], combined_feature_map_shape[1] *\n combined_feature_map_shape[2] * num_predictions_per_location,\n num_class_slots])\n', (14312, 14464), True, 'import numpy as np\n'), ((17269, 17296), 'numpy.zeros_like', 'np.zeros_like', (['true_heights'], {}), '(true_heights)\n', (17282, 17296), True, 'import numpy as np\n'), ((17298, 17324), 'numpy.zeros_like', 'np.zeros_like', (['true_widths'], {}), '(true_widths)\n', (17311, 17324), True, 'import numpy as np\n'), ((18948, 19014), 'numpy.stack', 'np.stack', (['[combined_shape[0], combined_shape[1], num_keypoints, 2]'], {}), '([combined_shape[0], combined_shape[1], num_keypoints, 2])\n', (18956, 19014), True, 'import numpy as np\n')]
|
# Copyright 2020 <NAME>. All rights reserved
# Created on Tue Feb 11 12:29:35 2020
# Author: <NAME>, Purdue University
#
#
# The original code came with the following disclaimer:
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Zhi Huang be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
#
import numpy as np
def fisher_discriminant(H, label):
'''
Parameters
----------
H : Real-valued matrix with columns indicating samples.
label : Class indices.
Returns
-------
E_D : Real scalar value indicating fisher discriminant.
Notes
-----
This fisher discriminant is the equation (3 a,b) in
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3495075
label is further sorted in ascending order, then apply its order to label and H.
Otherwise denominator will be wrong.
References
----------
.. [1] <NAME>, <NAME>, Amari SI. A new discriminant NMF algorithm and its
application to the extraction of subtle emotional differences in speech.
Cognitive neurodynamics. 2012 Dec 1;6(6):525-35.
'''
order = np.argsort(label)
H = H[:,order]
label = label[order]
numerator, denominator = 0, 0
mu_rkn = np.zeros((H.shape[0], 0))
mu_r_all = 1/H.shape[1] * np.sum(H, axis = 1)
for k in np.unique(label):
N_k = np.sum(k == label)
mu_rk_block = np.zeros((0, N_k))
for r in range(H.shape[0]):
mu_r = mu_r_all[r]
mu_rk = 1/N_k * np.sum(H[r, k == label])
mu_rk_block = np.concatenate((mu_rk_block, np.array([mu_rk] * N_k).reshape(1,N_k)), axis = 0)
numerator += N_k * (mu_rk - mu_r) ** 2
mu_rkn = np.concatenate((mu_rkn, mu_rk_block), axis = 1)
denominator = np.sum((H - mu_rkn)**2)
E_D = numerator / denominator
return E_D
|
[
"numpy.sum",
"numpy.unique",
"numpy.zeros",
"numpy.argsort",
"numpy.array",
"numpy.concatenate"
] |
[((1662, 1679), 'numpy.argsort', 'np.argsort', (['label'], {}), '(label)\n', (1672, 1679), True, 'import numpy as np\n'), ((1771, 1796), 'numpy.zeros', 'np.zeros', (['(H.shape[0], 0)'], {}), '((H.shape[0], 0))\n', (1779, 1796), True, 'import numpy as np\n'), ((1860, 1876), 'numpy.unique', 'np.unique', (['label'], {}), '(label)\n', (1869, 1876), True, 'import numpy as np\n'), ((2312, 2337), 'numpy.sum', 'np.sum', (['((H - mu_rkn) ** 2)'], {}), '((H - mu_rkn) ** 2)\n', (2318, 2337), True, 'import numpy as np\n'), ((1827, 1844), 'numpy.sum', 'np.sum', (['H'], {'axis': '(1)'}), '(H, axis=1)\n', (1833, 1844), True, 'import numpy as np\n'), ((1892, 1910), 'numpy.sum', 'np.sum', (['(k == label)'], {}), '(k == label)\n', (1898, 1910), True, 'import numpy as np\n'), ((1933, 1951), 'numpy.zeros', 'np.zeros', (['(0, N_k)'], {}), '((0, N_k))\n', (1941, 1951), True, 'import numpy as np\n'), ((2246, 2291), 'numpy.concatenate', 'np.concatenate', (['(mu_rkn, mu_rk_block)'], {'axis': '(1)'}), '((mu_rkn, mu_rk_block), axis=1)\n', (2260, 2291), True, 'import numpy as np\n'), ((2047, 2071), 'numpy.sum', 'np.sum', (['H[r, k == label]'], {}), '(H[r, k == label])\n', (2053, 2071), True, 'import numpy as np\n'), ((2127, 2150), 'numpy.array', 'np.array', (['([mu_rk] * N_k)'], {}), '([mu_rk] * N_k)\n', (2135, 2150), True, 'import numpy as np\n')]
|
import numpy as np
import tensorflow as tf
import tensorflow.contrib.distributions as tfd
from integrators import ODERK4, SDEEM
from kernels import OperatorKernel
from gpflow import transforms
from param import Param
float_type = tf.float64
jitter0 = 1e-6
class NPODE:
def __init__(self,Z0,U0,sn0,kern,jitter=jitter0,
summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False):
""" Constructor for the NPODE model
Args:
Z0: Numpy matrix of initial inducing points of size MxD, M being the
number of inducing points.
U0: Numpy matrix of initial inducing vectors of size MxD, M being the
number of inducing points.
sn0: Numpy vector of size 1xD for initial signal variance
kern: Kernel object for GP interpolation
jitter: Float of jitter level
whiten: Boolean. Currently we perform the optimization only in the
white domain
summ: Boolean for Tensorflow summary
fix_Z: Boolean - whether inducing locations are fixed or optimized
fix_U: Boolean - whether inducing vectors are fixed or optimized
fix_sn: Boolean - whether noise variance is fixed or optimized
"""
self.name = 'npode'
self.whiten = whiten
self.kern = kern
self.jitter = jitter
with tf.name_scope("NPDE"):
Z = Param(Z0,
name = "Z",
summ = False,
fixed = fix_Z)
U = Param(U0,
name = "U",
summ = False,
fixed = fix_U)
sn = Param(np.array(sn0),
name = "sn",
summ = summ,
fixed = fix_sn,
transform = transforms.Log1pe())
self.Z = Z()
self.U = U()
self.sn = sn()
self.D = U.shape[1]
self.integrator = ODERK4(self)
self.fix_Z = fix_Z
self.fix_sn = fix_sn
self.fix_U = fix_U
def f(self,X,t=[0]):
""" Implements GP interpolation to compute the value of the differential
function at location(s) X.
Args:
X: TxD tensor of input locations, T is the number of locations.
Returns:
TxD tensor of differential function (GP conditional) computed on
input locations
"""
U = self.U
Z = self.Z
kern = self.kern
N = tf.shape(X)[0]
M = tf.shape(Z)[0]
D = tf.shape(Z)[1] # dim of state
if kern.ktype == "id":
Kzz = kern.K(Z) + tf.eye(M, dtype=float_type) * self.jitter
else:
Kzz = kern.K(Z) + tf.eye(M*D, dtype=float_type) * self.jitter
Lz = tf.cholesky(Kzz)
Kzx = kern.K(Z, X)
A = tf.matrix_triangular_solve(Lz, Kzx, lower=True)
if not self.whiten:
A = tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False)
f = tf.matmul(A, U, transpose_a=True)
# transformation for "id - rbf" kernel
if not kern.ktype == "id" and not kern.ktype == "kr" :
f = tf.reshape(f,[N,D])
return f
def build_prior(self):
if self.kern.ktype == "id" or self.kern.ktype == "kr":
if self.whiten:
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(self.U[:,0]))
else:
mvn = tfd.MultivariateNormalFullCovariance(
loc=tf.zeros_like(self.U[:,0]),
covariance_matrix=self.kern.K(self.Z,self.Z))
probs = tf.add_n([mvn.log_prob(self.U[:,d]) for d in range(self.kern.ndims)])
else:
if self.whiten:
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(self.U))
else:
mvn = tfd.MultivariateNormalFullCovariance(
loc=tf.zeros_like(self.U),
covariance_matrix=self.kern.K(self.Z,self.Z))
probs = tf.reduce_sum(mvn.log_prob(tf.squeeze(self.U)))
return probs
def forward(self,x0,ts):
return self.integrator.forward(x0=x0,ts=ts)
def predict(self,x0,t):
""" Computes the integral and returns the path
Args:
x0: Python/numpy array of initial value
t: Python/numpy array of time points the integral is evaluated at
Returns:
ODE solution computed at t, tensor of size [len(t),len(x0)]
"""
x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1))
t = [t]
integrator = ODERK4(self)
path = integrator.forward(x0,t)
path = path[0]
return path
def Kzz(self):
kern = self.kern
Z = self.Z
M = tf.shape(Z)[0]
D = tf.shape(Z)[1] # dim of state
if kern.ktype == "id":
Kzz = kern.K(Z) + tf.eye(M, dtype=float_type) * self.jitter
else:
Kzz = kern.K(Z) + tf.eye(M*D, dtype=float_type) * self.jitter
return Kzz
def U(self):
U = self.U
if self.whiten:
Lz = tf.cholesky(self.Kzz())
U = tf.matmul(Lz,U)
return U
def __str__(self):
rep = 'noise variance: ' + str(self.sn.eval()) + \
'\nsignal variance: ' + str(self.kern.sf.eval()) + \
'\nlengthscales: ' + str(self.kern.ell.eval())
return rep
class NPSDE(NPODE):
def __init__(self,Z0,U0,sn0,kern,diffus,s=1,jitter=jitter0,
summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False):
""" Constructor for the NPSDE model
Args:
Z0: Numpy matrix of initial inducing points of size MxD, M being the
number of inducing points.
U0: Numpy matrix of initial inducing vectors of size MxD, M being the
number of inducing points.
sn0: Numpy vector of size 1xD for initial signal variance
kern: Kernel object for GP interpolation
diffus: BrownianMotion object for diffusion GP interpolation
s: Integer parameterizing how denser the integration points are
jitter: Float of jitter level
summ: Boolean for Tensorflow summary
whiten: Boolean. Currently we perform the optimization only in the
white domain
fix_Z: Boolean - whether inducing locations are fixed or optimized
fix_U: Boolean - whether inducing vectors are fixed or optimized
fix_sn: Boolean - whether noise variance is fixed or optimized
"""
super().__init__(Z0,U0,sn0,kern,jitter=jitter,
summ=summ,whiten=whiten,fix_Z=fix_Z,fix_U=fix_U,fix_sn=fix_sn)
self.name = 'npsde'
self.diffus = diffus
self.integrator = SDEEM(self)
def build_prior(self):
pf = super().build_prior()
pg = self.diffus.build_prior()
return pf + pg
def g(self,ts,Nw=1):
return self.diffus.g(ts=ts,Nw=Nw)
def forward(self,x0,ts,Nw=1):
return self.integrator.forward(x0=x0,ts=ts,Nw=Nw)
def sample(self,x0,t,Nw):
""" Draws random samples from a learned SDE system
Args:
Nw: Integer number of samples
x0: Python/numpy array of initial value
t: Python/numpy array of time points the integral is evaluated at
Returns:
Tensor of size [Nw,len(t),len(x0)] storing samples
"""
# returns (Nw, len(t), D)
x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1))
t = [t]
path = self.integrator.forward(x0,t,Nw)
path = path[0]
return path
def __str__(self):
return super().__str__() + self.diffus.__str__()
class BrownianMotion:
def __init__(self,sf0,ell0,Z0,U0,whiten=False,summ=False,
fix_ell=True,fix_sf=True,fix_Z=True,fix_U=False):
with tf.name_scope('Brownian'):
Zg = Param(Z0,
name = "Z",
summ = False,
fixed = fix_Z)
Ug = Param(U0,
name = "U",
summ = False,
fixed = fix_U)
self.kern = OperatorKernel(sf0=sf0,
ell0=ell0,
ktype="id",
name='Kernel',
summ=summ,
fix_ell=fix_ell,
fix_sf=fix_sf)
self.Zg = Zg()
self.Ug = Ug()
self.jitter = 1e-6
self.whiten = whiten
self.fix_Z = fix_Z
self.fix_U = fix_U
def g(self,X,t):
""" generates state dependent brownian motion
Args:
X: current states (in rows)
t: current time (used if diffusion depends on time)
Returns:
A tensor of the same shape as X
"""
Ug = self.Ug
Zg = self.Zg
kern = self.kern
if not kern.ktype == "id":
raise NotImplementedError()
M = tf.shape(Zg)[0]
D = tf.shape(X)[1]
if kern.ktype == "id":
Kzz = kern.K(Zg) + tf.eye(M, dtype=float_type) * self.jitter
else:
Kzz = kern.K(Zg) + tf.eye(M*D, dtype=float_type) * self.jitter
Lz = tf.cholesky(Kzz)
Kzx = kern.K(Zg, X)
A = tf.matrix_triangular_solve(Lz, Kzx, lower=True)
if not self.whiten:
A = tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False)
g = tf.matmul(A, Ug, transpose_a=True)
dw = tf.random_normal(tf.shape(X),dtype=float_type)
return g*dw
def __str__(self):
rep = '\ndiff signal variance: ' + str(self.kern.sf.eval()) + \
'\ndiff lengthscales: ' + str(self.kern.ell.eval())
return rep
def build_prior(self):
if self.whiten:
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(self.Ug))
else:
mvn = tfd.MultivariateNormalFullCovariance(
loc=tf.zeros_like(self.Ug),
covariance_matrix=self.kern.K(self.Zg,self.Zg))
return tf.reduce_sum(mvn.log_prob(self.Ug))
|
[
"gpflow.transforms.Log1pe",
"kernels.OperatorKernel",
"numpy.asarray",
"tensorflow.reshape",
"integrators.SDEEM",
"tensorflow.eye",
"tensorflow.zeros_like",
"tensorflow.cholesky",
"tensorflow.transpose",
"tensorflow.matmul",
"tensorflow.shape",
"numpy.array",
"tensorflow.squeeze",
"tensorflow.matrix_triangular_solve",
"tensorflow.name_scope",
"integrators.ODERK4",
"param.Param"
] |
[((2065, 2077), 'integrators.ODERK4', 'ODERK4', (['self'], {}), '(self)\n', (2071, 2077), False, 'from integrators import ODERK4, SDEEM\n'), ((2893, 2909), 'tensorflow.cholesky', 'tf.cholesky', (['Kzz'], {}), '(Kzz)\n', (2904, 2909), True, 'import tensorflow as tf\n'), ((2951, 2998), 'tensorflow.matrix_triangular_solve', 'tf.matrix_triangular_solve', (['Lz', 'Kzx'], {'lower': '(True)'}), '(Lz, Kzx, lower=True)\n', (2977, 2998), True, 'import tensorflow as tf\n'), ((3118, 3151), 'tensorflow.matmul', 'tf.matmul', (['A', 'U'], {'transpose_a': '(True)'}), '(A, U, transpose_a=True)\n', (3127, 3151), True, 'import tensorflow as tf\n'), ((4815, 4827), 'integrators.ODERK4', 'ODERK4', (['self'], {}), '(self)\n', (4821, 4827), False, 'from integrators import ODERK4, SDEEM\n'), ((7073, 7084), 'integrators.SDEEM', 'SDEEM', (['self'], {}), '(self)\n', (7078, 7084), False, 'from integrators import ODERK4, SDEEM\n'), ((9605, 9621), 'tensorflow.cholesky', 'tf.cholesky', (['Kzz'], {}), '(Kzz)\n', (9616, 9621), True, 'import tensorflow as tf\n'), ((9664, 9711), 'tensorflow.matrix_triangular_solve', 'tf.matrix_triangular_solve', (['Lz', 'Kzx'], {'lower': '(True)'}), '(Lz, Kzx, lower=True)\n', (9690, 9711), True, 'import tensorflow as tf\n'), ((9831, 9865), 'tensorflow.matmul', 'tf.matmul', (['A', 'Ug'], {'transpose_a': '(True)'}), '(A, Ug, transpose_a=True)\n', (9840, 9865), True, 'import tensorflow as tf\n'), ((1411, 1432), 'tensorflow.name_scope', 'tf.name_scope', (['"""NPDE"""'], {}), "('NPDE')\n", (1424, 1432), True, 'import tensorflow as tf\n'), ((1450, 1494), 'param.Param', 'Param', (['Z0'], {'name': '"""Z"""', 'summ': '(False)', 'fixed': 'fix_Z'}), "(Z0, name='Z', summ=False, fixed=fix_Z)\n", (1455, 1494), False, 'from param import Param\n'), ((1586, 1630), 'param.Param', 'Param', (['U0'], {'name': '"""U"""', 'summ': '(False)', 'fixed': 'fix_U'}), "(U0, name='U', summ=False, fixed=fix_U)\n", (1591, 1630), False, 'from param import Param\n'), ((2604, 2615), 'tensorflow.shape', 'tf.shape', (['X'], {}), '(X)\n', (2612, 2615), True, 'import tensorflow as tf\n'), ((2631, 2642), 'tensorflow.shape', 'tf.shape', (['Z'], {}), '(Z)\n', (2639, 2642), True, 'import tensorflow as tf\n'), ((2658, 2669), 'tensorflow.shape', 'tf.shape', (['Z'], {}), '(Z)\n', (2666, 2669), True, 'import tensorflow as tf\n'), ((3279, 3300), 'tensorflow.reshape', 'tf.reshape', (['f', '[N, D]'], {}), '(f, [N, D])\n', (3289, 3300), True, 'import tensorflow as tf\n'), ((4987, 4998), 'tensorflow.shape', 'tf.shape', (['Z'], {}), '(Z)\n', (4995, 4998), True, 'import tensorflow as tf\n'), ((5014, 5025), 'tensorflow.shape', 'tf.shape', (['Z'], {}), '(Z)\n', (5022, 5025), True, 'import tensorflow as tf\n'), ((5372, 5388), 'tensorflow.matmul', 'tf.matmul', (['Lz', 'U'], {}), '(Lz, U)\n', (5381, 5388), True, 'import tensorflow as tf\n'), ((8209, 8234), 'tensorflow.name_scope', 'tf.name_scope', (['"""Brownian"""'], {}), "('Brownian')\n", (8222, 8234), True, 'import tensorflow as tf\n'), ((8253, 8297), 'param.Param', 'Param', (['Z0'], {'name': '"""Z"""', 'summ': '(False)', 'fixed': 'fix_Z'}), "(Z0, name='Z', summ=False, fixed=fix_Z)\n", (8258, 8297), False, 'from param import Param\n'), ((8390, 8434), 'param.Param', 'Param', (['U0'], {'name': '"""U"""', 'summ': '(False)', 'fixed': 'fix_U'}), "(U0, name='U', summ=False, fixed=fix_U)\n", (8395, 8434), False, 'from param import Param\n'), ((8537, 8645), 'kernels.OperatorKernel', 'OperatorKernel', ([], {'sf0': 'sf0', 'ell0': 'ell0', 'ktype': '"""id"""', 'name': '"""Kernel"""', 'summ': 'summ', 'fix_ell': 'fix_ell', 'fix_sf': 'fix_sf'}), "(sf0=sf0, ell0=ell0, ktype='id', name='Kernel', summ=summ,\n fix_ell=fix_ell, fix_sf=fix_sf)\n", (8551, 8645), False, 'from kernels import OperatorKernel\n'), ((9355, 9367), 'tensorflow.shape', 'tf.shape', (['Zg'], {}), '(Zg)\n', (9363, 9367), True, 'import tensorflow as tf\n'), ((9383, 9394), 'tensorflow.shape', 'tf.shape', (['X'], {}), '(X)\n', (9391, 9394), True, 'import tensorflow as tf\n'), ((9896, 9907), 'tensorflow.shape', 'tf.shape', (['X'], {}), '(X)\n', (9904, 9907), True, 'import tensorflow as tf\n'), ((1761, 1774), 'numpy.array', 'np.array', (['sn0'], {}), '(sn0)\n', (1769, 1774), True, 'import numpy as np\n'), ((3071, 3087), 'tensorflow.transpose', 'tf.transpose', (['Lz'], {}), '(Lz)\n', (3083, 3087), True, 'import tensorflow as tf\n'), ((4730, 4762), 'numpy.asarray', 'np.asarray', (['x0'], {'dtype': 'np.float64'}), '(x0, dtype=np.float64)\n', (4740, 4762), True, 'import numpy as np\n'), ((7807, 7839), 'numpy.asarray', 'np.asarray', (['x0'], {'dtype': 'np.float64'}), '(x0, dtype=np.float64)\n', (7817, 7839), True, 'import numpy as np\n'), ((9784, 9800), 'tensorflow.transpose', 'tf.transpose', (['Lz'], {}), '(Lz)\n', (9796, 9800), True, 'import tensorflow as tf\n'), ((1922, 1941), 'gpflow.transforms.Log1pe', 'transforms.Log1pe', ([], {}), '()\n', (1939, 1941), False, 'from gpflow import transforms\n'), ((2750, 2777), 'tensorflow.eye', 'tf.eye', (['M'], {'dtype': 'float_type'}), '(M, dtype=float_type)\n', (2756, 2777), True, 'import tensorflow as tf\n'), ((2836, 2867), 'tensorflow.eye', 'tf.eye', (['(M * D)'], {'dtype': 'float_type'}), '(M * D, dtype=float_type)\n', (2842, 2867), True, 'import tensorflow as tf\n'), ((4251, 4269), 'tensorflow.squeeze', 'tf.squeeze', (['self.U'], {}), '(self.U)\n', (4261, 4269), True, 'import tensorflow as tf\n'), ((5105, 5132), 'tensorflow.eye', 'tf.eye', (['M'], {'dtype': 'float_type'}), '(M, dtype=float_type)\n', (5111, 5132), True, 'import tensorflow as tf\n'), ((5191, 5222), 'tensorflow.eye', 'tf.eye', (['(M * D)'], {'dtype': 'float_type'}), '(M * D, dtype=float_type)\n', (5197, 5222), True, 'import tensorflow as tf\n'), ((9461, 9488), 'tensorflow.eye', 'tf.eye', (['M'], {'dtype': 'float_type'}), '(M, dtype=float_type)\n', (9467, 9488), True, 'import tensorflow as tf\n'), ((9548, 9579), 'tensorflow.eye', 'tf.eye', (['(M * D)'], {'dtype': 'float_type'}), '(M * D, dtype=float_type)\n', (9554, 9579), True, 'import tensorflow as tf\n'), ((10259, 10281), 'tensorflow.zeros_like', 'tf.zeros_like', (['self.Ug'], {}), '(self.Ug)\n', (10272, 10281), True, 'import tensorflow as tf\n'), ((10381, 10403), 'tensorflow.zeros_like', 'tf.zeros_like', (['self.Ug'], {}), '(self.Ug)\n', (10394, 10403), True, 'import tensorflow as tf\n'), ((3518, 3545), 'tensorflow.zeros_like', 'tf.zeros_like', (['self.U[:, 0]'], {}), '(self.U[:, 0])\n', (3531, 3545), True, 'import tensorflow as tf\n'), ((3656, 3683), 'tensorflow.zeros_like', 'tf.zeros_like', (['self.U[:, 0]'], {}), '(self.U[:, 0])\n', (3669, 3683), True, 'import tensorflow as tf\n'), ((3974, 3995), 'tensorflow.zeros_like', 'tf.zeros_like', (['self.U'], {}), '(self.U)\n', (3987, 3995), True, 'import tensorflow as tf\n'), ((4107, 4128), 'tensorflow.zeros_like', 'tf.zeros_like', (['self.U'], {}), '(self.U)\n', (4120, 4128), True, 'import tensorflow as tf\n')]
|
"""
Some methods for kenetics.s
"""
import carla
import numpy as np
import math
def get_speed(vehicle):
"""
Get speed consider only 2D velocity.
"""
vel = vehicle.get_velocity()
return math.sqrt(vel.x ** 2 + vel.y ** 2) # + vel.z ** 2)
def set_vehicle_speed(vehicle, speed: float):
"""
Set vehicle to a target speed.
Velocity vector coincide vehicle x-axis.
:param:speed in m/s
"""
# set a initial speed for ego vehicle
transform = vehicle.get_transform()
# transform matrix from actor coord system to world system
trans_matrix = get_transform_matrix(transform) # actor2world
# target velocity in local coordinate system, in m/s
target_vel = np.array([[speed], [0.], [0.]])
# target velocity in world coordinate system
target_vel_world = np.dot(trans_matrix, target_vel)
target_vel_world = np.squeeze(target_vel_world)
# in carla.Vector3D
target_velocity = carla.Vector3D(
x=target_vel_world[0],
y=target_vel_world[1],
z=target_vel_world[2],
)
#
vehicle.set_target_velocity(target_velocity)
def angle_reg(angle):
"""
Regularize angle into certain bound.
default range is [-pi, pi]
"""
while True:
if -np.pi <= angle <= np.pi:
return angle
if angle < -np.pi:
angle += 2 * np.pi
else:
angle -= 2 * np.pi
def get_transform_matrix(transform: carla.Transform):
"""
Get and parse a transformation matrix by transform.
Matrix is from Actor coord system to the world coord system.
:param transform:
:return trans_matrix: transform matrix in ndarray
"""
# original trans matrix in list
_T = transform.get_matrix()
# transform matrix from Actor system to world system
trans_matrix = np.array([[_T[0][0], _T[0][1], _T[0][2]],
[_T[1][0], _T[1][1], _T[1][2]],
[_T[2][0], _T[2][1], _T[2][2]]])
return trans_matrix
def get_inverse_transform_matrix(transform: carla.Transform):
"""
Get inverse transform matrix from a transform class.
Inverse transform refers to from world coord system to actor coord system.
"""
_T = transform.get_inverse_matrix()
# transform matrix from Actor system to world system
inverse_trans_matrix = np.array([[_T[0][0], _T[0][1], _T[0][2]],
[_T[1][0], _T[1][1], _T[1][2]],
[_T[2][0], _T[2][1], _T[2][2]]])
return inverse_trans_matrix
def vector2array(vector: carla.Vector3D):
"""
Transform carla.Vector3D instance to ndarray
"""
array = np.array([vector.x, vector.y, vector.z])
return array
def get_vehicle_kinetic(vehicle: carla.Vehicle):
"""
todo unfinished
Get kinetics of ego vehicle.
todo use a class to encapsulate all methods about getting kinetics
"""
kinetic_dict = {}
transform = vehicle.get_transform()
vehicle.get_acceleration()
vehicle.get_angular_velocity()
def get_distance_along_route(wmap, route, target_location):
"""
Calculate the distance of the given location along the route
Note: If the location is not along the route, the route length will be returned
:param wmap: carla.Map of current world
:param route: list of tuples, (carla.Transform, RoadOption)
:param target_location:
"""
covered_distance = 0
prev_position = None
found = False
# Don't use the input location, use the corresponding wp as location
target_location_from_wp = wmap.get_waypoint(target_location).transform.location
for trans, _ in route:
# input route is transform
position = trans.location
location = target_location_from_wp
# Don't perform any calculations for the first route point
if not prev_position:
prev_position = position
continue
# Calculate distance between previous and current route point
interval_length_squared = ((prev_position.x - position.x) ** 2) + ((prev_position.y - position.y) ** 2)
distance_squared = ((location.x - prev_position.x) ** 2) + ((location.y - prev_position.y) ** 2)
# Close to the current position? Stop calculation
if distance_squared < 1.0:
break
if distance_squared < 400 and not distance_squared < interval_length_squared:
# Check if a neighbor lane is closer to the route
# Do this only in a close distance to correct route interval, otherwise the computation load is too high
starting_wp = wmap.get_waypoint(location)
wp = starting_wp.get_left_lane()
while wp is not None:
new_location = wp.transform.location
new_distance_squared = ((new_location.x - prev_position.x) ** 2) + (
(new_location.y - prev_position.y) ** 2)
if np.sign(starting_wp.lane_id) != np.sign(wp.lane_id):
break
if new_distance_squared < distance_squared:
distance_squared = new_distance_squared
location = new_location
else:
break
wp = wp.get_left_lane()
wp = starting_wp.get_right_lane()
while wp is not None:
new_location = wp.transform.location
new_distance_squared = ((new_location.x - prev_position.x) ** 2) + (
(new_location.y - prev_position.y) ** 2)
if np.sign(starting_wp.lane_id) != np.sign(wp.lane_id):
break
if new_distance_squared < distance_squared:
distance_squared = new_distance_squared
location = new_location
else:
break
wp = wp.get_right_lane()
if distance_squared < interval_length_squared:
# The location could be inside the current route interval, if route/lane ids match
# Note: This assumes a sufficiently small route interval
# An alternative is to compare orientations, however, this also does not work for
# long route intervals
curr_wp = wmap.get_waypoint(position)
prev_wp = wmap.get_waypoint(prev_position)
wp = wmap.get_waypoint(location)
if prev_wp and curr_wp and wp:
if wp.road_id == prev_wp.road_id or wp.road_id == curr_wp.road_id:
# Roads match, now compare the sign of the lane ids
if (np.sign(wp.lane_id) == np.sign(prev_wp.lane_id) or
np.sign(wp.lane_id) == np.sign(curr_wp.lane_id)):
# The location is within the current route interval
covered_distance += math.sqrt(distance_squared)
found = True
break
covered_distance += math.sqrt(interval_length_squared)
prev_position = position
return covered_distance, found
|
[
"math.sqrt",
"numpy.array",
"numpy.sign",
"numpy.squeeze",
"numpy.dot",
"carla.Vector3D"
] |
[((211, 245), 'math.sqrt', 'math.sqrt', (['(vel.x ** 2 + vel.y ** 2)'], {}), '(vel.x ** 2 + vel.y ** 2)\n', (220, 245), False, 'import math\n'), ((718, 751), 'numpy.array', 'np.array', (['[[speed], [0.0], [0.0]]'], {}), '([[speed], [0.0], [0.0]])\n', (726, 751), True, 'import numpy as np\n'), ((822, 854), 'numpy.dot', 'np.dot', (['trans_matrix', 'target_vel'], {}), '(trans_matrix, target_vel)\n', (828, 854), True, 'import numpy as np\n'), ((878, 906), 'numpy.squeeze', 'np.squeeze', (['target_vel_world'], {}), '(target_vel_world)\n', (888, 906), True, 'import numpy as np\n'), ((953, 1041), 'carla.Vector3D', 'carla.Vector3D', ([], {'x': 'target_vel_world[0]', 'y': 'target_vel_world[1]', 'z': 'target_vel_world[2]'}), '(x=target_vel_world[0], y=target_vel_world[1], z=\n target_vel_world[2])\n', (967, 1041), False, 'import carla\n'), ((1834, 1945), 'numpy.array', 'np.array', (['[[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0], _T[1][1], _T[1][2]], [_T[2][0],\n _T[2][1], _T[2][2]]]'], {}), '([[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0], _T[1][1], _T[1][2]], [\n _T[2][0], _T[2][1], _T[2][2]]])\n', (1842, 1945), True, 'import numpy as np\n'), ((2366, 2477), 'numpy.array', 'np.array', (['[[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0], _T[1][1], _T[1][2]], [_T[2][0],\n _T[2][1], _T[2][2]]]'], {}), '([[_T[0][0], _T[0][1], _T[0][2]], [_T[1][0], _T[1][1], _T[1][2]], [\n _T[2][0], _T[2][1], _T[2][2]]])\n', (2374, 2477), True, 'import numpy as np\n'), ((2702, 2742), 'numpy.array', 'np.array', (['[vector.x, vector.y, vector.z]'], {}), '([vector.x, vector.y, vector.z])\n', (2710, 2742), True, 'import numpy as np\n'), ((7055, 7089), 'math.sqrt', 'math.sqrt', (['interval_length_squared'], {}), '(interval_length_squared)\n', (7064, 7089), False, 'import math\n'), ((4992, 5020), 'numpy.sign', 'np.sign', (['starting_wp.lane_id'], {}), '(starting_wp.lane_id)\n', (4999, 5020), True, 'import numpy as np\n'), ((5024, 5043), 'numpy.sign', 'np.sign', (['wp.lane_id'], {}), '(wp.lane_id)\n', (5031, 5043), True, 'import numpy as np\n'), ((5625, 5653), 'numpy.sign', 'np.sign', (['starting_wp.lane_id'], {}), '(starting_wp.lane_id)\n', (5632, 5653), True, 'import numpy as np\n'), ((5657, 5676), 'numpy.sign', 'np.sign', (['wp.lane_id'], {}), '(wp.lane_id)\n', (5664, 5676), True, 'import numpy as np\n'), ((6931, 6958), 'math.sqrt', 'math.sqrt', (['distance_squared'], {}), '(distance_squared)\n', (6940, 6958), False, 'import math\n'), ((6682, 6701), 'numpy.sign', 'np.sign', (['wp.lane_id'], {}), '(wp.lane_id)\n', (6689, 6701), True, 'import numpy as np\n'), ((6705, 6729), 'numpy.sign', 'np.sign', (['prev_wp.lane_id'], {}), '(prev_wp.lane_id)\n', (6712, 6729), True, 'import numpy as np\n'), ((6761, 6780), 'numpy.sign', 'np.sign', (['wp.lane_id'], {}), '(wp.lane_id)\n', (6768, 6780), True, 'import numpy as np\n'), ((6784, 6808), 'numpy.sign', 'np.sign', (['curr_wp.lane_id'], {}), '(curr_wp.lane_id)\n', (6791, 6808), True, 'import numpy as np\n')]
|
import numpy as np
from fluiddyn.clusters.legi import Calcul2 as Cluster
from critical_Ra_RB import Ra_c_RB as Ra_c_RB_tests
prandtl = 1.0
dim = 2
dt_max = 0.005
end_time = 30
nb_procs = 10
nx = 8
order = 10
stretch_factor = 0.0
Ra_vert = 1750
x_periodicity = False
z_periodicity = False
cluster = Cluster()
cluster.commands_setting_env = [
"PROJET_DIR=/fsnet/project/meige/2020/20CONVECTION",
"source /etc/profile",
"source $PROJET_DIR/miniconda3/etc/profile.d/conda.sh",
"conda activate env-snek",
"export NEK_SOURCE_ROOT=$HOME/Dev/snek5000/lib/Nek5000",
"export PATH=$PATH:$NEK_SOURCE_ROOT/bin",
"export FLUIDSIM_PATH=$PROJET_DIR/numerical/",
]
for aspect_ratio, Ra_c_test in Ra_c_RB_tests.items():
ny = int(nx * aspect_ratio)
if nx * aspect_ratio - ny:
continue
Ra_vert_nums = np.logspace(np.log10(Ra_c_test), np.log10(1.04 * Ra_c_test), 4)
for Ra_vert_num in Ra_vert_nums:
command = (
f"run_simul_check_from_python.py -Pr {prandtl} -nx {nx} --dim {dim} "
f"--order {order} --dt-max {dt_max} --end-time {end_time} -np {nb_procs} "
f"-a_y {aspect_ratio} --stretch-factor {stretch_factor} "
f"--Ra-vert {Ra_vert_num}"
)
if x_periodicity:
command += " --x-periodicity"
elif z_periodicity:
command += " --z-periodicity"
print(command)
name_run = f"RB_asp{aspect_ratio:.3f}_Ra{Ra_vert_num:.3e}_Pr{prandtl:.2f}_msh{nx*order}x{round(nx*aspect_ratio)*order}"
cluster.submit_script(
command,
name_run=name_run,
nb_cores_per_node=nb_procs,
omp_num_threads=1,
ask=False,
)
|
[
"fluiddyn.clusters.legi.Calcul2",
"numpy.log10",
"critical_Ra_RB.Ra_c_RB.items"
] |
[((306, 315), 'fluiddyn.clusters.legi.Calcul2', 'Cluster', ([], {}), '()\n', (313, 315), True, 'from fluiddyn.clusters.legi import Calcul2 as Cluster\n'), ((718, 739), 'critical_Ra_RB.Ra_c_RB.items', 'Ra_c_RB_tests.items', ([], {}), '()\n', (737, 739), True, 'from critical_Ra_RB import Ra_c_RB as Ra_c_RB_tests\n'), ((854, 873), 'numpy.log10', 'np.log10', (['Ra_c_test'], {}), '(Ra_c_test)\n', (862, 873), True, 'import numpy as np\n'), ((875, 901), 'numpy.log10', 'np.log10', (['(1.04 * Ra_c_test)'], {}), '(1.04 * Ra_c_test)\n', (883, 901), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import os
import sys
import numpy as np
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import pandas as pd
from matplotlib import pyplot as plt
import pmagpy.pmag as pmag
import pmagpy.pmagplotlib as pmagplotlib
from pmag_env import set_env
import operator
OPS = {'<' : operator.lt, '<=' : operator.le,
'>' : operator.gt, '>=': operator.ge, '=': operator.eq}
def main():
"""
NAME
foldtest_magic.py
DESCRIPTION
does a fold test (Tauxe, 2010) on data
INPUT FORMAT
pmag_specimens format file, er_samples.txt format file (for bedding)
SYNTAX
foldtest_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f sites formatted file [default for 3.0 is sites.txt, for 2.5, pmag_sites.txt]
-fsa samples formatted file
-fsi sites formatted file
-exc use criteria to set acceptance criteria (supported only for data model 3)
-n NB, set number of bootstraps, default is 1000
-b MIN, MAX, set bounds for untilting, default is -10, 150
-fmt FMT, specify format - default is svg
-sav saves plots and quits
-DM NUM MagIC data model number (2 or 3, default 3)
OUTPUT
Geographic: is an equal area projection of the input data in
original coordinates
Stratigraphic: is an equal area projection of the input data in
tilt adjusted coordinates
% Untilting: The dashed (red) curves are representative plots of
maximum eigenvalue (tau_1) as a function of untilting
The solid line is the cumulative distribution of the
% Untilting required to maximize tau for all the
bootstrapped data sets. The dashed vertical lines
are 95% confidence bounds on the % untilting that yields
the most clustered result (maximum tau_1).
Command line: prints out the bootstrapped iterations and
finally the confidence bounds on optimum untilting.
If the 95% conf bounds include 0, then a pre-tilt magnetization is indicated
If the 95% conf bounds include 100, then a post-tilt magnetization is indicated
If the 95% conf bounds exclude both 0 and 100, syn-tilt magnetization is
possible as is vertical axis rotation or other pathologies
"""
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
kappa = 0
dir_path = pmag.get_named_arg("-WD", ".")
nboot = int(float(pmag.get_named_arg("-n", 1000))) # number of bootstraps
fmt = pmag.get_named_arg("-fmt", "svg")
data_model_num = int(float(pmag.get_named_arg("-DM", 3)))
if data_model_num == 3:
infile = pmag.get_named_arg("-f", 'sites.txt')
orfile = 'samples.txt'
site_col = 'site'
dec_col = 'dir_dec'
inc_col = 'dir_inc'
tilt_col = 'dir_tilt_correction'
dipkey, azkey = 'bed_dip', 'bed_dip_direction'
crit_col = 'criterion'
critfile = 'criteria.txt'
else:
infile = pmag.get_named_arg("-f", 'pmag_sites.txt')
orfile = 'er_samples.txt'
site_col = 'er_site_name'
dec_col = 'site_dec'
inc_col = 'site_inc'
tilt_col = 'site_tilt_correction'
dipkey, azkey = 'sample_bed_dip', 'sample_bed_dip_direction'
crit_col = 'pmag_criteria_code'
critfile = 'pmag_criteria.txt'
if '-sav' in sys.argv:
plot = 1
else:
plot = 0
if '-b' in sys.argv:
ind = sys.argv.index('-b')
untilt_min = int(sys.argv[ind+1])
untilt_max = int(sys.argv[ind+2])
else:
untilt_min, untilt_max = -10, 150
if '-fsa' in sys.argv:
orfile = pmag.get_named_arg("-fsa", "")
elif '-fsi' in sys.argv:
orfile = pmag.get_named_arg("-fsi", "")
if data_model_num == 3:
dipkey, azkey = 'bed_dip', 'bed_dip_direction'
else:
dipkey, azkey = 'site_bed_dip', 'site_bed_dip_direction'
else:
if data_model_num == 3:
orfile = 'sites.txt'
else:
orfile = 'pmag_sites.txt'
orfile = pmag.resolve_file_name(orfile, dir_path)
infile = pmag.resolve_file_name(infile, dir_path)
critfile = pmag.resolve_file_name(critfile, dir_path)
df = pd.read_csv(infile, sep='\t', header=1)
# keep only records with tilt_col
data = df.copy()
data = data[data[tilt_col].notnull()]
data = data.where(data.notnull(), "")
# turn into pmag data list
data = list(data.T.apply(dict))
# get orientation data
if data_model_num == 3:
# often orientation will be in infile (sites table)
if os.path.split(orfile)[1] == os.path.split(infile)[1]:
ordata = df[df[azkey].notnull()]
ordata = ordata[ordata[dipkey].notnull()]
ordata = list(ordata.T.apply(dict))
# sometimes orientation might be in a sample file instead
else:
ordata = pd.read_csv(orfile, sep='\t', header=1)
ordata = list(ordata.T.apply(dict))
else:
ordata, file_type = pmag.magic_read(orfile)
if '-exc' in sys.argv:
crits, file_type = pmag.magic_read(critfile)
SiteCrits = []
for crit in crits:
if crit[crit_col] == "DE-SITE":
SiteCrits.append(crit)
#break
# get to work
#
PLTS = {'geo': 1, 'strat': 2, 'taus': 3} # make plot dictionary
if not set_env.IS_WIN:
pmagplotlib.plot_init(PLTS['geo'], 5, 5)
pmagplotlib.plot_init(PLTS['strat'], 5, 5)
pmagplotlib.plot_init(PLTS['taus'], 5, 5)
if data_model_num == 2:
GEOrecs = pmag.get_dictitem(data, tilt_col, '0', 'T')
else:
GEOrecs = data
if len(GEOrecs) > 0: # have some geographic data
num_dropped = 0
DIDDs = [] # set up list for dec inc dip_direction, dip
for rec in GEOrecs: # parse data
dip, dip_dir = 0, -1
Dec = float(rec[dec_col])
Inc = float(rec[inc_col])
orecs = pmag.get_dictitem(
ordata, site_col, rec[site_col], 'T')
if len(orecs) > 0:
if orecs[0][azkey] != "":
dip_dir = float(orecs[0][azkey])
if orecs[0][dipkey] != "":
dip = float(orecs[0][dipkey])
if dip != 0 and dip_dir != -1:
if '-exc' in sys.argv:
keep = 1
for site_crit in SiteCrits:
crit_name = site_crit['table_column'].split('.')[1]
if crit_name and crit_name in rec.keys() and rec[crit_name]:
# get the correct operation (<, >=, =, etc.)
op = OPS[site_crit['criterion_operation']]
# then make sure the site record passes
if op(float(rec[crit_name]), float(site_crit['criterion_value'])):
keep = 0
if keep == 1:
DIDDs.append([Dec, Inc, dip_dir, dip])
else:
num_dropped += 1
else:
DIDDs.append([Dec, Inc, dip_dir, dip])
if num_dropped:
print("-W- Dropped {} records because each failed one or more criteria".format(num_dropped))
else:
print('no geographic directional data found')
sys.exit()
pmagplotlib.plot_eq(PLTS['geo'], DIDDs, 'Geographic')
data = np.array(DIDDs)
D, I = pmag.dotilt_V(data)
TCs = np.array([D, I]).transpose()
pmagplotlib.plot_eq(PLTS['strat'], TCs, 'Stratigraphic')
if plot == 0:
pmagplotlib.draw_figs(PLTS)
Percs = list(range(untilt_min, untilt_max))
Cdf, Untilt = [], []
plt.figure(num=PLTS['taus'])
print('doing ', nboot, ' iterations...please be patient.....')
for n in range(nboot): # do bootstrap data sets - plot first 25 as dashed red line
if n % 50 == 0:
print(n)
Taus = [] # set up lists for taus
PDs = pmag.pseudo(DIDDs)
if kappa != 0:
for k in range(len(PDs)):
d, i = pmag.fshdev(kappa)
dipdir, dip = pmag.dodirot(d, i, PDs[k][2], PDs[k][3])
PDs[k][2] = dipdir
PDs[k][3] = dip
for perc in Percs:
tilt = np.array([1., 1., 1., 0.01*perc])
D, I = pmag.dotilt_V(PDs*tilt)
TCs = np.array([D, I]).transpose()
ppars = pmag.doprinc(TCs) # get principal directions
Taus.append(ppars['tau1'])
if n < 25:
plt.plot(Percs, Taus, 'r--')
# tilt that gives maximum tau
Untilt.append(Percs[Taus.index(np.max(Taus))])
Cdf.append(float(n) / float(nboot))
plt.plot(Percs, Taus, 'k')
plt.xlabel('% Untilting')
plt.ylabel('tau_1 (red), CDF (green)')
Untilt.sort() # now for CDF of tilt of maximum tau
plt.plot(Untilt, Cdf, 'g')
lower = int(.025*nboot)
upper = int(.975*nboot)
plt.axvline(x=Untilt[lower], ymin=0, ymax=1, linewidth=1, linestyle='--')
plt.axvline(x=Untilt[upper], ymin=0, ymax=1, linewidth=1, linestyle='--')
tit = '%i - %i %s' % (Untilt[lower], Untilt[upper], 'Percent Unfolding')
print(tit)
plt.title(tit)
if plot == 0:
pmagplotlib.draw_figs(PLTS)
ans = input('S[a]ve all figures, <Return> to quit \n ')
if ans != 'a':
print("Good bye")
sys.exit()
files = {}
for key in list(PLTS.keys()):
files[key] = ('foldtest_'+'%s' % (key.strip()[:2])+'.'+fmt)
pmagplotlib.save_plots(PLTS, files)
if __name__ == "__main__":
main()
|
[
"matplotlib.pyplot.title",
"pandas.read_csv",
"pmagpy.pmag.fshdev",
"matplotlib.pyplot.figure",
"matplotlib.get_backend",
"pmagpy.pmagplotlib.plot_init",
"matplotlib.pyplot.axvline",
"pmagpy.pmag.pseudo",
"pmagpy.pmag.get_dictitem",
"pmagpy.pmagplotlib.draw_figs",
"pmagpy.pmag.dotilt_V",
"pmagpy.pmag.get_named_arg",
"numpy.max",
"sys.argv.index",
"pmagpy.pmag.resolve_file_name",
"pmagpy.pmagplotlib.plot_eq",
"pmagpy.pmag.dodirot",
"pmagpy.pmag.magic_read",
"matplotlib.use",
"matplotlib.pyplot.ylabel",
"sys.exit",
"matplotlib.pyplot.plot",
"pmagpy.pmag.doprinc",
"numpy.array",
"matplotlib.pyplot.xlabel",
"pmagpy.pmagplotlib.save_plots",
"os.path.split"
] |
[((83, 107), 'matplotlib.get_backend', 'matplotlib.get_backend', ([], {}), '()\n', (105, 107), False, 'import matplotlib\n'), ((124, 147), 'matplotlib.use', 'matplotlib.use', (['"""TKAgg"""'], {}), "('TKAgg')\n", (138, 147), False, 'import matplotlib\n'), ((2632, 2662), 'pmagpy.pmag.get_named_arg', 'pmag.get_named_arg', (['"""-WD"""', '"""."""'], {}), "('-WD', '.')\n", (2650, 2662), True, 'import pmagpy.pmag as pmag\n'), ((2755, 2788), 'pmagpy.pmag.get_named_arg', 'pmag.get_named_arg', (['"""-fmt"""', '"""svg"""'], {}), "('-fmt', 'svg')\n", (2773, 2788), True, 'import pmagpy.pmag as pmag\n'), ((4327, 4367), 'pmagpy.pmag.resolve_file_name', 'pmag.resolve_file_name', (['orfile', 'dir_path'], {}), '(orfile, dir_path)\n', (4349, 4367), True, 'import pmagpy.pmag as pmag\n'), ((4381, 4421), 'pmagpy.pmag.resolve_file_name', 'pmag.resolve_file_name', (['infile', 'dir_path'], {}), '(infile, dir_path)\n', (4403, 4421), True, 'import pmagpy.pmag as pmag\n'), ((4437, 4479), 'pmagpy.pmag.resolve_file_name', 'pmag.resolve_file_name', (['critfile', 'dir_path'], {}), '(critfile, dir_path)\n', (4459, 4479), True, 'import pmagpy.pmag as pmag\n'), ((4489, 4528), 'pandas.read_csv', 'pd.read_csv', (['infile'], {'sep': '"""\t"""', 'header': '(1)'}), "(infile, sep='\\t', header=1)\n", (4500, 4528), True, 'import pandas as pd\n'), ((7679, 7732), 'pmagpy.pmagplotlib.plot_eq', 'pmagplotlib.plot_eq', (["PLTS['geo']", 'DIDDs', '"""Geographic"""'], {}), "(PLTS['geo'], DIDDs, 'Geographic')\n", (7698, 7732), True, 'import pmagpy.pmagplotlib as pmagplotlib\n'), ((7744, 7759), 'numpy.array', 'np.array', (['DIDDs'], {}), '(DIDDs)\n', (7752, 7759), True, 'import numpy as np\n'), ((7771, 7790), 'pmagpy.pmag.dotilt_V', 'pmag.dotilt_V', (['data'], {}), '(data)\n', (7784, 7790), True, 'import pmagpy.pmag as pmag\n'), ((7834, 7890), 'pmagpy.pmagplotlib.plot_eq', 'pmagplotlib.plot_eq', (["PLTS['strat']", 'TCs', '"""Stratigraphic"""'], {}), "(PLTS['strat'], TCs, 'Stratigraphic')\n", (7853, 7890), True, 'import pmagpy.pmagplotlib as pmagplotlib\n'), ((8022, 8050), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': "PLTS['taus']"}), "(num=PLTS['taus'])\n", (8032, 8050), True, 'from matplotlib import pyplot as plt\n'), ((9044, 9070), 'matplotlib.pyplot.plot', 'plt.plot', (['Percs', 'Taus', '"""k"""'], {}), "(Percs, Taus, 'k')\n", (9052, 9070), True, 'from matplotlib import pyplot as plt\n'), ((9075, 9100), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""% Untilting"""'], {}), "('% Untilting')\n", (9085, 9100), True, 'from matplotlib import pyplot as plt\n'), ((9105, 9143), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""tau_1 (red), CDF (green)"""'], {}), "('tau_1 (red), CDF (green)')\n", (9115, 9143), True, 'from matplotlib import pyplot as plt\n'), ((9204, 9230), 'matplotlib.pyplot.plot', 'plt.plot', (['Untilt', 'Cdf', '"""g"""'], {}), "(Untilt, Cdf, 'g')\n", (9212, 9230), True, 'from matplotlib import pyplot as plt\n'), ((9291, 9364), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'Untilt[lower]', 'ymin': '(0)', 'ymax': '(1)', 'linewidth': '(1)', 'linestyle': '"""--"""'}), "(x=Untilt[lower], ymin=0, ymax=1, linewidth=1, linestyle='--')\n", (9302, 9364), True, 'from matplotlib import pyplot as plt\n'), ((9369, 9442), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'Untilt[upper]', 'ymin': '(0)', 'ymax': '(1)', 'linewidth': '(1)', 'linestyle': '"""--"""'}), "(x=Untilt[upper], ymin=0, ymax=1, linewidth=1, linestyle='--')\n", (9380, 9442), True, 'from matplotlib import pyplot as plt\n'), ((9539, 9553), 'matplotlib.pyplot.title', 'plt.title', (['tit'], {}), '(tit)\n', (9548, 9553), True, 'from matplotlib import pyplot as plt\n'), ((9870, 9905), 'pmagpy.pmagplotlib.save_plots', 'pmagplotlib.save_plots', (['PLTS', 'files'], {}), '(PLTS, files)\n', (9892, 9905), True, 'import pmagpy.pmagplotlib as pmagplotlib\n'), ((2573, 2583), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2581, 2583), False, 'import sys\n'), ((2896, 2933), 'pmagpy.pmag.get_named_arg', 'pmag.get_named_arg', (['"""-f"""', '"""sites.txt"""'], {}), "('-f', 'sites.txt')\n", (2914, 2933), True, 'import pmagpy.pmag as pmag\n'), ((3235, 3277), 'pmagpy.pmag.get_named_arg', 'pmag.get_named_arg', (['"""-f"""', '"""pmag_sites.txt"""'], {}), "('-f', 'pmag_sites.txt')\n", (3253, 3277), True, 'import pmagpy.pmag as pmag\n'), ((3704, 3724), 'sys.argv.index', 'sys.argv.index', (['"""-b"""'], {}), "('-b')\n", (3718, 3724), False, 'import sys\n'), ((3905, 3935), 'pmagpy.pmag.get_named_arg', 'pmag.get_named_arg', (['"""-fsa"""', '""""""'], {}), "('-fsa', '')\n", (3923, 3935), True, 'import pmagpy.pmag as pmag\n'), ((5293, 5316), 'pmagpy.pmag.magic_read', 'pmag.magic_read', (['orfile'], {}), '(orfile)\n', (5308, 5316), True, 'import pmagpy.pmag as pmag\n'), ((5372, 5397), 'pmagpy.pmag.magic_read', 'pmag.magic_read', (['critfile'], {}), '(critfile)\n', (5387, 5397), True, 'import pmagpy.pmag as pmag\n'), ((5675, 5715), 'pmagpy.pmagplotlib.plot_init', 'pmagplotlib.plot_init', (["PLTS['geo']", '(5)', '(5)'], {}), "(PLTS['geo'], 5, 5)\n", (5696, 5715), True, 'import pmagpy.pmagplotlib as pmagplotlib\n'), ((5724, 5766), 'pmagpy.pmagplotlib.plot_init', 'pmagplotlib.plot_init', (["PLTS['strat']", '(5)', '(5)'], {}), "(PLTS['strat'], 5, 5)\n", (5745, 5766), True, 'import pmagpy.pmagplotlib as pmagplotlib\n'), ((5775, 5816), 'pmagpy.pmagplotlib.plot_init', 'pmagplotlib.plot_init', (["PLTS['taus']", '(5)', '(5)'], {}), "(PLTS['taus'], 5, 5)\n", (5796, 5816), True, 'import pmagpy.pmagplotlib as pmagplotlib\n'), ((5863, 5906), 'pmagpy.pmag.get_dictitem', 'pmag.get_dictitem', (['data', 'tilt_col', '"""0"""', '"""T"""'], {}), "(data, tilt_col, '0', 'T')\n", (5880, 5906), True, 'import pmagpy.pmag as pmag\n'), ((7663, 7673), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7671, 7673), False, 'import sys\n'), ((7917, 7944), 'pmagpy.pmagplotlib.draw_figs', 'pmagplotlib.draw_figs', (['PLTS'], {}), '(PLTS)\n', (7938, 7944), True, 'import pmagpy.pmagplotlib as pmagplotlib\n'), ((8308, 8326), 'pmagpy.pmag.pseudo', 'pmag.pseudo', (['DIDDs'], {}), '(DIDDs)\n', (8319, 8326), True, 'import pmagpy.pmag as pmag\n'), ((9580, 9607), 'pmagpy.pmagplotlib.draw_figs', 'pmagplotlib.draw_figs', (['PLTS'], {}), '(PLTS)\n', (9601, 9607), True, 'import pmagpy.pmagplotlib as pmagplotlib\n'), ((2685, 2715), 'pmagpy.pmag.get_named_arg', 'pmag.get_named_arg', (['"""-n"""', '(1000)'], {}), "('-n', 1000)\n", (2703, 2715), True, 'import pmagpy.pmag as pmag\n'), ((2820, 2848), 'pmagpy.pmag.get_named_arg', 'pmag.get_named_arg', (['"""-DM"""', '(3)'], {}), "('-DM', 3)\n", (2838, 2848), True, 'import pmagpy.pmag as pmag\n'), ((3982, 4012), 'pmagpy.pmag.get_named_arg', 'pmag.get_named_arg', (['"""-fsi"""', '""""""'], {}), "('-fsi', '')\n", (4000, 4012), True, 'import pmagpy.pmag as pmag\n'), ((5167, 5206), 'pandas.read_csv', 'pd.read_csv', (['orfile'], {'sep': '"""\t"""', 'header': '(1)'}), "(orfile, sep='\\t', header=1)\n", (5178, 5206), True, 'import pandas as pd\n'), ((6256, 6311), 'pmagpy.pmag.get_dictitem', 'pmag.get_dictitem', (['ordata', 'site_col', 'rec[site_col]', '"""T"""'], {}), "(ordata, site_col, rec[site_col], 'T')\n", (6273, 6311), True, 'import pmagpy.pmag as pmag\n'), ((7801, 7817), 'numpy.array', 'np.array', (['[D, I]'], {}), '([D, I])\n', (7809, 7817), True, 'import numpy as np\n'), ((8614, 8652), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 0.01 * perc]'], {}), '([1.0, 1.0, 1.0, 0.01 * perc])\n', (8622, 8652), True, 'import numpy as np\n'), ((8667, 8692), 'pmagpy.pmag.dotilt_V', 'pmag.dotilt_V', (['(PDs * tilt)'], {}), '(PDs * tilt)\n', (8680, 8692), True, 'import pmagpy.pmag as pmag\n'), ((8758, 8775), 'pmagpy.pmag.doprinc', 'pmag.doprinc', (['TCs'], {}), '(TCs)\n', (8770, 8775), True, 'import pmagpy.pmag as pmag\n'), ((8874, 8902), 'matplotlib.pyplot.plot', 'plt.plot', (['Percs', 'Taus', '"""r--"""'], {}), "(Percs, Taus, 'r--')\n", (8882, 8902), True, 'from matplotlib import pyplot as plt\n'), ((9738, 9748), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9746, 9748), False, 'import sys\n'), ((4865, 4886), 'os.path.split', 'os.path.split', (['orfile'], {}), '(orfile)\n', (4878, 4886), False, 'import os\n'), ((4893, 4914), 'os.path.split', 'os.path.split', (['infile'], {}), '(infile)\n', (4906, 4914), False, 'import os\n'), ((8411, 8429), 'pmagpy.pmag.fshdev', 'pmag.fshdev', (['kappa'], {}), '(kappa)\n', (8422, 8429), True, 'import pmagpy.pmag as pmag\n'), ((8460, 8500), 'pmagpy.pmag.dodirot', 'pmag.dodirot', (['d', 'i', 'PDs[k][2]', 'PDs[k][3]'], {}), '(d, i, PDs[k][2], PDs[k][3])\n', (8472, 8500), True, 'import pmagpy.pmag as pmag\n'), ((8709, 8725), 'numpy.array', 'np.array', (['[D, I]'], {}), '([D, I])\n', (8717, 8725), True, 'import numpy as np\n'), ((8980, 8992), 'numpy.max', 'np.max', (['Taus'], {}), '(Taus)\n', (8986, 8992), True, 'import numpy as np\n')]
|
# coding=utf-8
import numpy as np
import reikna.cluda as cluda
from reikna.fft import FFT, FFTShift
import pyopencl.array as clarray
from pyopencl import clmath
from reikna.core import Computation, Transformation, Parameter, Annotation, Type
from reikna.algorithms import PureParallel
from matplotlib import cm
import time as t
import matplotlib.pyplot as plt
import statistic_functions4 as sf
#import mylog as Log
np.set_printoptions(threshold=np.inf)
batch = 100
N = 1024
api = cluda.any_api()
thr = api.Thread.create()
data = np.load('8psk_data.npy')
data = np.reshape(data, (batch*4, N)) # 一共 batch*4 = 400次
t1 = t.clock()
data0 = data[0:batch, :].astype(np.complex128)
data_g = thr.to_device(data0)
print(t.clock()-t1)
#compile
fft = FFT(data_g, (0,1))
fftc = fft.compile(thr)
data_f = thr.array(data0.shape, dtype=np.complex128)
shift = FFTShift(data_f, (0,1))
shiftc = shift.compile(thr)
data_shift = thr.array(data0.shape, dtype=np.complex128)
sum = sf.stat(thr)
logg10 = sf.logg10(thr)
def myfft(data):
'''
input:
data: cluda-Array (100, 1024)
-----------------------------------------------
output:
TS_gpu: cluda-Array (1000, 1024)
'''
#FFT
t_fft = t.clock()
data_f = thr.array(data.shape, dtype=np.complex128)
STAT_gpu = thr.array(data.shape, dtype=np.complex128)
fftc(data_f, data)
shiftc(STAT_gpu, data_f)
#log
t_log = t.clock()
STAT_gpu = abs(STAT_gpu)
logg10(STAT_gpu, global_size = (N, batch))
#统计,插值
t_st = t.clock()
TS_gpu = cluda.ocl.Array(thr, shape=(1000, N), dtype=np.int)
sum(TS_gpu, STAT_gpu, global_size = (N,batch))
print('fft: %f, log: %f, stat: %f'%(t_log-t_fft, t_st-t_log, t.clock()-t_st))
print('total: %f'%(t.clock()-t_fft))
return TS_gpu
i=0
j=0
fig=plt.figure()
#fig, ax = plt.subplots()
summ = 0
while i<100:
t1 = t.clock()
data0 = data[j:(j+1)*batch, :].astype(np.complex128)
data_g = thr.to_device(data0)
out = myfft(data_g)
out = out.get()
t2 = t.clock()
#nipy_spectral
plt.clf()
#plt.imshow(out, cmap = cm.hot)
plt.imshow(out, cmap = 'nipy_spectral')
plt.ylim(0,1000)
plt.pause(0.00000001)
print('No. %d, transmission+compute: %f, plot: %f'%(i, t2-t1, t.clock()-t2))
summ = summ + t2-t1
j = j + 1
i = i + 1
if j == 4:
j=0
print('avg compute: %f'%(summ/100))
|
[
"numpy.load",
"numpy.set_printoptions",
"statistic_functions4.logg10",
"matplotlib.pyplot.clf",
"reikna.fft.FFTShift",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.ylim",
"time.clock",
"statistic_functions4.stat",
"matplotlib.pyplot.figure",
"numpy.reshape",
"reikna.cluda.any_api",
"reikna.cluda.ocl.Array",
"matplotlib.pyplot.pause",
"reikna.fft.FFT"
] |
[((430, 467), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (449, 467), True, 'import numpy as np\n'), ((500, 515), 'reikna.cluda.any_api', 'cluda.any_api', ([], {}), '()\n', (513, 515), True, 'import reikna.cluda as cluda\n'), ((555, 579), 'numpy.load', 'np.load', (['"""8psk_data.npy"""'], {}), "('8psk_data.npy')\n", (562, 579), True, 'import numpy as np\n'), ((588, 620), 'numpy.reshape', 'np.reshape', (['data', '(batch * 4, N)'], {}), '(data, (batch * 4, N))\n', (598, 620), True, 'import numpy as np\n'), ((647, 656), 'time.clock', 't.clock', ([], {}), '()\n', (654, 656), True, 'import time as t\n'), ((775, 794), 'reikna.fft.FFT', 'FFT', (['data_g', '(0, 1)'], {}), '(data_g, (0, 1))\n', (778, 794), False, 'from reikna.fft import FFT, FFTShift\n'), ((882, 906), 'reikna.fft.FFTShift', 'FFTShift', (['data_f', '(0, 1)'], {}), '(data_f, (0, 1))\n', (890, 906), False, 'from reikna.fft import FFT, FFTShift\n'), ((1000, 1012), 'statistic_functions4.stat', 'sf.stat', (['thr'], {}), '(thr)\n', (1007, 1012), True, 'import statistic_functions4 as sf\n'), ((1023, 1037), 'statistic_functions4.logg10', 'sf.logg10', (['thr'], {}), '(thr)\n', (1032, 1037), True, 'import statistic_functions4 as sf\n'), ((1856, 1868), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1866, 1868), True, 'import matplotlib.pyplot as plt\n'), ((1249, 1258), 'time.clock', 't.clock', ([], {}), '()\n', (1256, 1258), True, 'import time as t\n'), ((1452, 1461), 'time.clock', 't.clock', ([], {}), '()\n', (1459, 1461), True, 'import time as t\n'), ((1565, 1574), 'time.clock', 't.clock', ([], {}), '()\n', (1572, 1574), True, 'import time as t\n'), ((1589, 1640), 'reikna.cluda.ocl.Array', 'cluda.ocl.Array', (['thr'], {'shape': '(1000, N)', 'dtype': 'np.int'}), '(thr, shape=(1000, N), dtype=np.int)\n', (1604, 1640), True, 'import reikna.cluda as cluda\n'), ((1930, 1939), 'time.clock', 't.clock', ([], {}), '()\n', (1937, 1939), True, 'import time as t\n'), ((2089, 2098), 'time.clock', 't.clock', ([], {}), '()\n', (2096, 2098), True, 'import time as t\n'), ((2124, 2133), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2131, 2133), True, 'import matplotlib.pyplot as plt\n'), ((2176, 2213), 'matplotlib.pyplot.imshow', 'plt.imshow', (['out'], {'cmap': '"""nipy_spectral"""'}), "(out, cmap='nipy_spectral')\n", (2186, 2213), True, 'import matplotlib.pyplot as plt\n'), ((2221, 2238), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1000)'], {}), '(0, 1000)\n', (2229, 2238), True, 'import matplotlib.pyplot as plt\n'), ((2253, 2269), 'matplotlib.pyplot.pause', 'plt.pause', (['(1e-08)'], {}), '(1e-08)\n', (2262, 2269), True, 'import matplotlib.pyplot as plt\n'), ((743, 752), 'time.clock', 't.clock', ([], {}), '()\n', (750, 752), True, 'import time as t\n'), ((1802, 1811), 'time.clock', 't.clock', ([], {}), '()\n', (1809, 1811), True, 'import time as t\n'), ((1761, 1770), 'time.clock', 't.clock', ([], {}), '()\n', (1768, 1770), True, 'import time as t\n'), ((2342, 2351), 'time.clock', 't.clock', ([], {}), '()\n', (2349, 2351), True, 'import time as t\n')]
|
import os
import argparse
import numpy as np
import tensorflow as tf
import tensorflow.keras as K
from sklearn.metrics import classification_report
from dataset import FLIRDataset
def grid_search(train_labels: str,
test_labels: str,
output:str,
res:tuple=(120, 160),
lazy:bool=True,
batch_size:int=16,
epochs:int=20):
"""
Runs a grid search over all known models.
Params
------
train_labels: str
Path to training labels
test_labels: str
Path to testing labels
output: str
Path to output directory
res: tuple
Input resolution of network
lazy: bool
Whether to load data lazily in batches during training
batch_size: int
Batch size in case of lazy loading
epochs: int
Training epochs
"""
# Data
print("=> Loading data.")
train = FLIRDataset(train_labels, res=res, batch_size=batch_size)
test = FLIRDataset(test_labels, res=res, batch_size=batch_size)
# In eager loading mode, train on everything.
if not lazy:
X_train, y_train = train.get_all()
X_test, y_test = test.get_all()
X_train = np.concatenate([X_train, X_test], axis=0)
y_train = np.concatenate([y_train, y_test], axis=0)
def net(x, num_classes=1):
x = K.applications.resnet_v2.ResNet50V2(include_top=False, weights=None, input_shape=x.shape[1:])(x)
x = K.layers.Flatten()(x)
x = K.layers.Dense(num_classes, activation="softmax")(x)
return x
print("\n=> Training model.")
input_tensor = K.layers.Input((160, 120, 1))
output_tensor = net(input_tensor, num_classes=train.num_classes())
model = K.Model(input_tensor, output_tensor)
model.compile(optimizer="sgd",
loss="categorical_crossentropy",
metrics=["accuracy"])
# Train model
if lazy:
model.fit(x=train,
epochs=epochs,
validation_data=train,
verbose=2)
else:
model.fit(x=X_train,
y=y_train,
epochs=epochs,
batch_size=batch_size,
verbose=2)
# Save weights
model.save_weights(os.path.join(output, "flir_pretrained_weights.h5"))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Train model on FLIR dataset.")
parser.add_argument("train", help="Directory containing training labels")
parser.add_argument("test", help="Directory containing testing labels")
parser.add_argument("out", help="Output directory for results")
parser.add_argument("epochs", help="Number of epochs")
parser.add_argument("-l", "--lazy", dest="lazy", help="Load data lazily", action="store_true")
args = vars(parser.parse_args())
grid_search(args["train"],
args["test"],
args["out"],
res=(120, 160),
lazy=bool(args["lazy"]),
epochs=int(args["epochs"]))
print("\n=> Finished.")
|
[
"argparse.ArgumentParser",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.applications.resnet_v2.ResNet50V2",
"dataset.FLIRDataset",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Input",
"os.path.join",
"numpy.concatenate",
"tensorflow.keras.layers.Flatten"
] |
[((1006, 1063), 'dataset.FLIRDataset', 'FLIRDataset', (['train_labels'], {'res': 'res', 'batch_size': 'batch_size'}), '(train_labels, res=res, batch_size=batch_size)\n', (1017, 1063), False, 'from dataset import FLIRDataset\n'), ((1075, 1131), 'dataset.FLIRDataset', 'FLIRDataset', (['test_labels'], {'res': 'res', 'batch_size': 'batch_size'}), '(test_labels, res=res, batch_size=batch_size)\n', (1086, 1131), False, 'from dataset import FLIRDataset\n'), ((1715, 1744), 'tensorflow.keras.layers.Input', 'K.layers.Input', (['(160, 120, 1)'], {}), '((160, 120, 1))\n', (1729, 1744), True, 'import tensorflow.keras as K\n'), ((1828, 1864), 'tensorflow.keras.Model', 'K.Model', (['input_tensor', 'output_tensor'], {}), '(input_tensor, output_tensor)\n', (1835, 1864), True, 'import tensorflow.keras as K\n'), ((2477, 2544), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train model on FLIR dataset."""'}), "(description='Train model on FLIR dataset.')\n", (2500, 2544), False, 'import argparse\n'), ((1301, 1342), 'numpy.concatenate', 'np.concatenate', (['[X_train, X_test]'], {'axis': '(0)'}), '([X_train, X_test], axis=0)\n', (1315, 1342), True, 'import numpy as np\n'), ((1361, 1402), 'numpy.concatenate', 'np.concatenate', (['[y_train, y_test]'], {'axis': '(0)'}), '([y_train, y_test], axis=0)\n', (1375, 1402), True, 'import numpy as np\n'), ((2380, 2430), 'os.path.join', 'os.path.join', (['output', '"""flir_pretrained_weights.h5"""'], {}), "(output, 'flir_pretrained_weights.h5')\n", (2392, 2430), False, 'import os\n'), ((1448, 1545), 'tensorflow.keras.applications.resnet_v2.ResNet50V2', 'K.applications.resnet_v2.ResNet50V2', ([], {'include_top': '(False)', 'weights': 'None', 'input_shape': 'x.shape[1:]'}), '(include_top=False, weights=None,\n input_shape=x.shape[1:])\n', (1483, 1545), True, 'import tensorflow.keras as K\n'), ((1557, 1575), 'tensorflow.keras.layers.Flatten', 'K.layers.Flatten', ([], {}), '()\n', (1573, 1575), True, 'import tensorflow.keras as K\n'), ((1591, 1640), 'tensorflow.keras.layers.Dense', 'K.layers.Dense', (['num_classes'], {'activation': '"""softmax"""'}), "(num_classes, activation='softmax')\n", (1605, 1640), True, 'import tensorflow.keras as K\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 4 09:33:16 2020
@author: jvergere
Ideas: Something similar to the Iridium Constellation:
66 Sats
781 km (7159 semimajor axis)
86.4 inclination
6 Orbit planes 30 degrees apart
11 in each plane
"""
import datetime as dt
import numpy as np
import os
#Need to cleanup this file before running each time,
#or refactor code to avoid writing to file in append mode
if os.path.exists("MaxOutageData.txt"):
os.remove("MaxOutageData.txt")
from comtypes.client import CreateObject # Will allow you to launch STK
#from comtypes.client import GetActiveObject #Will allow you to connect a running instance of STK
#Start the application, it will return a pointer to the Application Interface
app = CreateObject("STK12.Application")
#app = GetActiveObject("STK12.Application")
#app is a pointer to IAgUiApplication
#type info is available with python builtin type method
#type(app)
#More info is available via python built in dir method, which will list
#all the available properties and methods available
#dir(app)
#Additional useful information is available via the python builtin help
#help(app)
app.Visible = True
app.UserControl = True
root = app.Personality2 #root ->IAgStkObjectRoot
#These are not available to import until this point if this is the first time
#running STK via COM with python....it won't hurt to leave them there, but after running once they can be
#included at the top with all the other import statements
from comtypes.gen import STKUtil
from comtypes.gen import STKObjects
root.NewScenario("NewTestScenario")
scenario = root.CurrentScenario #scenario -> IAgStkObject
scenario2 = scenario.QueryInterface(STKObjects.IAgScenario) #scenaro2 -> IAgScenario
scenario2.StartTime = "1 Jun 2016 16:00:00.000"
scenario2.StopTime = "2 Jun 2016 16:00:00.000"
root.Rewind()
#Insert Facilites from text file using connect. Each line of the text file is
#formatted:
#FacName,Longitude,Latitude
with open("Facilities.txt", "r") as faclist:
for line in faclist:
facData = line.strip().split(",")
insertNewFacCmd = "New / */Facility {}".format(facData[0])
root.ExecuteCommand(insertNewFacCmd)
setPositionCmd = "SetPosition */Facility/{} Geodetic {} {} Terrain".format(facData[0], facData[2], facData[1])
root.ExecuteCommand(setPositionCmd)
setColorCommand = "Graphics */Facility/{} SetColor blue".format(facData[0])
root.ExecuteCommand(setColorCommand)
#Create sensor constellation, used later to hold all the sensor objects
sensorConst = scenario.Children.New(STKObjects.eConstellation, "SensorConst")
sensorConst2 = sensorConst.QueryInterface(STKObjects.IAgConstellation)
#Build satellite constellation, attach sensors, assign sensor to constellation object
i = 1
for RAAN in range(0,180,45): # 4 orbit planes
j = 1
for trueAnomaly in range(0,360,45): # 8 sats per plane
#insert satellite
newSat = scenario.Children.New(STKObjects.eSatellite, "Sat{}{}".format(i,j))
newSat2 = newSat.QueryInterface(STKObjects.IAgSatellite)
#change some basic display attributes
newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Color = 65535
newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Line.Width = STKObjects.e1
newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Inherit = False
newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesOrbit).IsGroundTrackVisible = False
#Buildup Initial State using TwoBody Propagator and Classical Orbital Elements
keplarian = newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.ConvertTo(STKUtil.eOrbitStateClassical).QueryInterface(STKObjects.IAgOrbitStateClassical)
keplarian.SizeShapeTpye = STKObjects.eSizeShapeSemimajorAxis
keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).SemiMajorAxis = 7159
keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).Eccentricity = 0
keplarian.Orientation.Inclination = 86.4
keplarian.Orientation.ArgOfPerigee = 0
keplarian.Orientation.AscNodeType = STKObjects.eAscNodeRAAN
keplarian.Orientation.AscNode.QueryInterface(STKObjects.IAgOrientationAscNodeRAAN).Value = RAAN
keplarian.LocationType = STKObjects.eLocationTrueAnomaly
keplarian.Location.QueryInterface(STKObjects.IAgClassicalLocationTrueAnomaly).Value = trueAnomaly + (45/2)*(i%2) #Stagger TrueAnomalies for every other orbital plane
newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.Assign(keplarian)
newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).Propagate()
#Attach sensors to each satellite
sensor = newSat.Children.New(STKObjects.eSensor,"Sensor{}{}".format(i,j))
sensor2 = sensor.QueryInterface(STKObjects.IAgSensor)
sensor2.CommonTasks.SetPatternSimpleConic(62.5, 2)
#Add the sensor to the SensorConstellation
sensorConst2.Objects.Add("Satellite/Sat{0}{1}/Sensor/Sensor{0}{1}".format(i,j))
#Adjust the translucenty of the sensor projections
sensor2.VO.PercentTranslucency = 75
sensor2.Graphics.LineStyle = STKUtil.eDotted
j+=1
i+=1
#Create a Chain object for each Facility to the constellation.
facCount = scenario.Children.GetElements(STKObjects.eFacility).Count
for i in range(facCount):
#Create Chain
facName = scenario.Children.GetElements(STKObjects.eFacility).Item(i).InstanceName
chain = scenario.Children.New(STKObjects.eChain, "{}ToSensorConst".format(facName))
chain2 = chain.QueryInterface(STKObjects.IAgChain)
#Modify some display properties
chain2.Graphics.Animation.Color = 65280
chain2.Graphics.Animation.LineWidth = STKObjects.e1
chain2.Graphics.Animation.IsHighlightVisible = False
#Add objects to the chain
chain2.Objects.Add("Facility/{}".format(facName))
chain2.Objects.Add("Constellation/SensorConst")
#Get complete chain access data
compAcc = chain.DataProviders.Item("Complete Access").QueryInterface(STKObjects.IAgDataPrvInterval).Exec(scenario2.StartTime,scenario2.StopTime)
el = compAcc.DataSets.ElementNames
numRows = compAcc.DataSets.RowCount
maxOutage = []
#Save out the report to a text file
with open("{}CompleteChainAccess.txt".format(facName),"w") as dataFile:
dataFile.write("{},{},{},{}\n".format(el[0],el[1],el[2],el[3]))
for row in range(numRows):
rowData = compAcc.DataSets.GetRow(row)
dataFile.write("{},{},{},{}\n".format(rowData[0],rowData[1],rowData[2],rowData[3]))
dataFile.close()
#Get max outage time for each chain, print to console and save to file
with open("MaxOutageData.txt", "a") as outageFile:
if numRows == 1:
outageFile.write("{},NA,NA,NA\n".format(facName))
print("{}: No Outage".format(facName))
else:
#Get StartTimes and StopTimes as lists
startTimes = list(compAcc.DataSets.GetDataSetByName("Start Time").GetValues())
stopTimes = list(compAcc.DataSets.GetDataSetByName("Stop Time").GetValues())
#convert to from strings to datetimes
startDatetimes = np.array([dt.datetime.strptime(startTime[:-3], "%d %b %Y %H:%M:%S.%f") for startTime in startTimes])
stopDatetimes = np.array([dt.datetime.strptime(stopTime[:-3], "%d %b %Y %H:%M:%S.%f") for stopTime in stopTimes])
outages = startDatetimes[1:] - stopDatetimes[:-1]
maxOutage = np.amax(outages).total_seconds()
start = stopTimes[np.argmax(outages)]
stop = startTimes[np.argmax(outages)+1]
outageFile.write("{},{},{},{}\n".format(facName,maxOutage,start,stop))
print("{}: {} seconds from {} until {}".format(facName, maxOutage, start, stop))
root.Rewind()
root.Save()
|
[
"os.remove",
"numpy.argmax",
"os.path.exists",
"numpy.amax",
"datetime.datetime.strptime",
"comtypes.client.CreateObject"
] |
[((433, 468), 'os.path.exists', 'os.path.exists', (['"""MaxOutageData.txt"""'], {}), "('MaxOutageData.txt')\n", (447, 468), False, 'import os\n'), ((763, 796), 'comtypes.client.CreateObject', 'CreateObject', (['"""STK12.Application"""'], {}), "('STK12.Application')\n", (775, 796), False, 'from comtypes.client import CreateObject\n'), ((474, 504), 'os.remove', 'os.remove', (['"""MaxOutageData.txt"""'], {}), "('MaxOutageData.txt')\n", (483, 504), False, 'import os\n'), ((8044, 8062), 'numpy.argmax', 'np.argmax', (['outages'], {}), '(outages)\n', (8053, 8062), True, 'import numpy as np\n'), ((7665, 7725), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['startTime[:-3]', '"""%d %b %Y %H:%M:%S.%f"""'], {}), "(startTime[:-3], '%d %b %Y %H:%M:%S.%f')\n", (7685, 7725), True, 'import datetime as dt\n'), ((7794, 7853), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['stopTime[:-3]', '"""%d %b %Y %H:%M:%S.%f"""'], {}), "(stopTime[:-3], '%d %b %Y %H:%M:%S.%f')\n", (7814, 7853), True, 'import datetime as dt\n'), ((7981, 7997), 'numpy.amax', 'np.amax', (['outages'], {}), '(outages)\n', (7988, 7997), True, 'import numpy as np\n'), ((8094, 8112), 'numpy.argmax', 'np.argmax', (['outages'], {}), '(outages)\n', (8103, 8112), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
test_parameter
~~~~~~~~~~~~~~~
Tests for `gagepy.parameter` class
:copyright: 2015 by <NAME>, see AUTHORS
:license: United States Geological Survey (USGS), see LICENSE file
"""
import pytest
import os
import numpy as np
from datetime import datetime
from gagepy.parameter import Parameter
def test_parameter_init(dates_daily):
parameter = Parameter(name = "Discharge",
dates = dates_daily,
values = np.array([100, 110, 105, 107, 112]),
units = "cubic feet per second (Mean)",
code = "06_00060_00003")
assert list(parameter.dates) == list(dates_daily)
assert parameter.code == "06_00060_00003"
assert parameter.name == "Discharge"
assert parameter.units == "cubic feet per second (Mean)"
assert list(parameter.values) == list(np.array([100, 110, 105, 107, 112]))
def test_parameter_values_mean_max_min_without_nan(dates_daily):
parameter = Parameter(name = "Discharge",
dates = dates_daily,
units = "cubic feet per second (Mean)",
code = "06_00060_00003",
values = np.array([1, 2, 3, 4, 5]))
assert parameter.mean == 3.0
assert parameter.max == 5.0
assert parameter.min == 1.0
def test_parameter_values_mean_max_min_with_nan(dates_daily):
parameter = Parameter(name = "Discharge",
dates = dates_daily,
units = "cubic feet per second (Mean)",
code = "06_00060_00003",
values = np.array([1, 2, 3, np.nan, 12]))
assert parameter.mean == 4.5 # sum(values)/len(values) -> 18/4 = 4.5
assert parameter.max == 12.0
assert parameter.min == 1.0
def test_max_min_date(dates_daily):
parameter = Parameter(name = "Discharge",
dates = dates_daily,
units = "cubic feet per second (Mean)",
code = "06_00060_00003",
values = np.array([1, 2, 3, 4, 5]))
assert parameter.max_date == datetime(2015, 8, 5, 0, 0)
assert parameter.min_date == datetime(2015, 8, 1, 0, 0)
def test_max_min_date_with_nan(dates_daily):
parameter = Parameter(name = "Discharge",
dates = dates_daily,
units = "cubic feet per second (Mean)",
code = "06_00060_00003",
values = np.array([1, 2, np.nan, 4, 5]))
assert parameter.max_date == datetime(2015, 8, 5, 0, 0)
assert parameter.min_date == datetime(2015, 8, 1, 0, 0)
def test_print_parameter_by_not_capturing_stdout(dates_daily):
parameter = Parameter(name = "Discharge",
dates = dates_daily,
units = "cubic feet per second (Mean)",
code = "06_00060_00003",
values = np.array([1, 2, 3, 4, 5]))
print(parameter)
|
[
"numpy.array",
"datetime.datetime"
] |
[((2211, 2237), 'datetime.datetime', 'datetime', (['(2015)', '(8)', '(5)', '(0)', '(0)'], {}), '(2015, 8, 5, 0, 0)\n', (2219, 2237), False, 'from datetime import datetime\n'), ((2271, 2297), 'datetime.datetime', 'datetime', (['(2015)', '(8)', '(1)', '(0)', '(0)'], {}), '(2015, 8, 1, 0, 0)\n', (2279, 2297), False, 'from datetime import datetime\n'), ((2657, 2683), 'datetime.datetime', 'datetime', (['(2015)', '(8)', '(5)', '(0)', '(0)'], {}), '(2015, 8, 5, 0, 0)\n', (2665, 2683), False, 'from datetime import datetime\n'), ((2717, 2743), 'datetime.datetime', 'datetime', (['(2015)', '(8)', '(1)', '(0)', '(0)'], {}), '(2015, 8, 1, 0, 0)\n', (2725, 2743), False, 'from datetime import datetime\n'), ((509, 544), 'numpy.array', 'np.array', (['[100, 110, 105, 107, 112]'], {}), '([100, 110, 105, 107, 112])\n', (517, 544), True, 'import numpy as np\n'), ((908, 943), 'numpy.array', 'np.array', (['[100, 110, 105, 107, 112]'], {}), '([100, 110, 105, 107, 112])\n', (916, 943), True, 'import numpy as np\n'), ((1258, 1283), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (1266, 1283), True, 'import numpy as np\n'), ((1693, 1724), 'numpy.array', 'np.array', (['[1, 2, 3, np.nan, 12]'], {}), '([1, 2, 3, np.nan, 12])\n', (1701, 1724), True, 'import numpy as np\n'), ((2150, 2175), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (2158, 2175), True, 'import numpy as np\n'), ((2591, 2621), 'numpy.array', 'np.array', (['[1, 2, np.nan, 4, 5]'], {}), '([1, 2, np.nan, 4, 5])\n', (2599, 2621), True, 'import numpy as np\n'), ((3055, 3080), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (3063, 3080), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# @Time : 2020/2/12 15:47
# @Author : Chen
# @File : datasets.py
# @Software: PyCharm
import os, warnings
from mxnet.gluon.data import dataset, sampler
from mxnet import image
import numpy as np
class IdxSampler(sampler.Sampler):
"""Samples elements from [0, length) randomly without replacement.
Parameters
----------
length : int
Length of the sequence.
"""
def __init__(self, indices_selected):
if isinstance(indices_selected, list):
indices_selected = np.array(indices_selected)
self._indices_selected = indices_selected
self._length = indices_selected.shape[0]
def __iter__(self):
indices = self._indices_selected
np.random.shuffle(indices)
return iter(indices)
def __len__(self):
return self._length
class ImageFolderDataset(dataset.Dataset):
"""A dataset for loading image files stored in a folder structure.
like::
root/car/0001.jpg
root/car/xxxa.jpg
root/car/yyyb.jpg
root/bus/123.jpg
root/bus/023.jpg
root/bus/wwww.jpg
Parameters
----------
root : str
Path to root directory.
flag : {0, 1}, default 1
If 0, always convert loaded images to greyscale (1 channel).
If 1, always convert loaded images to colored (3 channels).
transform : callable, default None
A function that takes data and label and transforms them::
transform = lambda data, label: (data.astype(np.float32)/255, label)
Attributes
----------
synsets : list
List of class names. `synsets[i]` is the name for the integer label `i`
items : list of tuples
List of all images in (filename, label) pairs.
"""
def __init__(self, root, flag=1, transform=None, pseudo_labels=None):
self._root = os.path.expanduser(root)
self._flag = flag
self._transform = transform
self._exts = ['.jpg', '.jpeg', '.png']
self._list_images(self._root)
self._pseudo_labels = pseudo_labels
def _list_images(self, root):
self.synsets = []
self.items = []
for folder in sorted(os.listdir(root)):
path = os.path.join(root, folder)
if not os.path.isdir(path):
warnings.warn('Ignoring %s, which is not a directory.'%path, stacklevel=3)
continue
label = len(self.synsets)
self.synsets.append(folder)
for filename in sorted(os.listdir(path)):
filename = os.path.join(path, filename)
ext = os.path.splitext(filename)[1]
if ext.lower() not in self._exts:
warnings.warn('Ignoring %s of type %s. Only support %s'%(
filename, ext, ', '.join(self._exts)))
continue
self.items.append((filename, label))
def __getitem__(self, idx):
img = image.imread(self.items[idx][0], self._flag)
label = self.items[idx][1]
if self._transform is not None:
return self._transform(img, label)
if self._pseudo_labels is not None:
pseudo_label = self._pseudo_labels[idx]
return img, label, idx, pseudo_label
return img, label, idx
def __len__(self):
return len(self.items)
|
[
"os.path.join",
"os.path.isdir",
"numpy.array",
"mxnet.image.imread",
"os.path.splitext",
"warnings.warn",
"os.path.expanduser",
"os.listdir",
"numpy.random.shuffle"
] |
[((741, 767), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (758, 767), True, 'import numpy as np\n'), ((1879, 1903), 'os.path.expanduser', 'os.path.expanduser', (['root'], {}), '(root)\n', (1897, 1903), False, 'import os, warnings\n'), ((2991, 3035), 'mxnet.image.imread', 'image.imread', (['self.items[idx][0]', 'self._flag'], {}), '(self.items[idx][0], self._flag)\n', (3003, 3035), False, 'from mxnet import image\n'), ((541, 567), 'numpy.array', 'np.array', (['indices_selected'], {}), '(indices_selected)\n', (549, 567), True, 'import numpy as np\n'), ((2210, 2226), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (2220, 2226), False, 'import os, warnings\n'), ((2248, 2274), 'os.path.join', 'os.path.join', (['root', 'folder'], {}), '(root, folder)\n', (2260, 2274), False, 'import os, warnings\n'), ((2294, 2313), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (2307, 2313), False, 'import os, warnings\n'), ((2331, 2407), 'warnings.warn', 'warnings.warn', (["('Ignoring %s, which is not a directory.' % path)"], {'stacklevel': '(3)'}), "('Ignoring %s, which is not a directory.' % path, stacklevel=3)\n", (2344, 2407), False, 'import os, warnings\n'), ((2544, 2560), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2554, 2560), False, 'import os, warnings\n'), ((2590, 2618), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (2602, 2618), False, 'import os, warnings\n'), ((2641, 2667), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (2657, 2667), False, 'import os, warnings\n')]
|
#!/usr/bin/env python
import copy
import glob
import logging
import os
import re
import numpy as np
from astropy.io import fits
from scipy import interpolate, ndimage, optimize, signal
try:
from charis.image import Image
except:
from image import Image
log = logging.getLogger('main')
class PSFLets:
"""
Helper class to deal with the PSFLets on the detector. Does most of the heavy lifting
during the wavelength calibration step.
"""
def __init__(self, load=False, infile=None, infiledir='.'):
'''
Initialize the class
Parameters
----------
load: Boolean
Whether to load an already-existing wavelength calibration file
infile: String
If load is True, this is the name of the file
infiledir: String
If load is True, this is the directory in which the file resides
'''
self.xindx = None
self.yindx = None
self.lam_indx = None
self.nlam = None
self.nlam_max = None
self.interp_arr = None
self.order = None
if load:
self.loadpixsol(infile, infiledir)
def loadpixsol(self, infile=None, infiledir='./calibrations'):
'''
Loads existing wavelength calibration file
Parameters
----------
infile: String
Name of the file
infiledir: String
Directory in which the file resides
'''
if infile is None:
infile = re.sub('//', '/', infiledir + '/PSFloc.fits')
hdulist = fits.open(infile)
try:
self.xindx = hdulist[0].data
self.yindx = hdulist[1].data
self.lam_indx = hdulist[2].data
self.nlam = hdulist[3].data.astype(int)
except:
raise RuntimeError("File " + infile +
" does not appear to contain a CHARIS wavelength solution in the appropriate format.")
self.nlam_max = np.amax(self.nlam)
def savepixsol(self, outdir="calibrations/"):
'''
Saves wavelength calibration file
Parameters
----------
outdir: String
Directory in which to put the file. The file is name PSFloc.fits and is a
multi-extension FITS file, each extension corresponding to:
0. the list of wavelengths at which the calibration is done
1. a 2D ndarray with the X position of all lenslets
2. a 2D ndarray with the Y position of all lenslets
3. a 2D ndarray with the number of valid wavelengths for a given lenslet (some wavelengths fall outside of the detector area)
'''
if not os.path.isdir(outdir):
raise IOError("Attempting to save pixel solution to directory " + outdir + ". Directory does not exist.")
outfile = re.sub('//', '/', outdir + '/PSFloc.fits')
out = fits.HDUList(fits.PrimaryHDU(self.xindx))
out.append(fits.PrimaryHDU(self.yindx))
out.append(fits.PrimaryHDU(self.lam_indx))
out.append(fits.PrimaryHDU(self.nlam.astype(int)))
try:
out.writeto(outfile, overwrite=True)
except:
raise
def geninterparray(self, lam, allcoef, order=3):
'''
Set up array to solve for best-fit polynomial fits to the
coefficients of the wavelength solution. These will be used
to smooth/interpolate the wavelength solution, and
ultimately to compute its inverse.
Parameters
----------
lam: float
Wavelength in nm
allcoef: list of lists floats
Polynomial coefficients of wavelength solution
order: int
Order of polynomial wavelength solution
Notes
-----
Populates the attribute interp_arr in PSFLet class
'''
self.interp_arr = np.zeros((order + 1, allcoef.shape[1]))
self.order = order
xarr = np.ones((lam.shape[0], order + 1))
for i in range(1, order + 1):
xarr[:, i] = np.log(lam)**i
for i in range(self.interp_arr.shape[1]):
coef = np.linalg.lstsq(xarr, allcoef[:, i])[0]
self.interp_arr[:, i] = coef
def return_locations_short(self, coef, xindx, yindx):
'''
Returns the x,y detector location of a given lenslet for a given polynomial fit
Parameters
----------
coef: lists floats
Polynomial coefficients of fit for a single wavelength
xindx: int
X index of lenslet in lenslet array
yindx: int
Y index of lenslet in lenslet array
Returns
-------
interp_x: float
X coordinate on the detector
interp_y: float
Y coordinate on the detector
'''
coeforder = int(np.sqrt(coef.shape[0])) - 1
interp_x, interp_y = _transform(xindx, yindx, coeforder, coef)
return interp_x, interp_y
def return_res(self, lam, allcoef, xindx, yindx,
order=3, lam1=None, lam2=None):
'''
Returns the spectral resolution and interpolated wavelength array
Parameters
----------
lam: float
Wavelength in nm
allcoef: list of lists floats
Polynomial coefficients of wavelength solution
xindx: int
X index of lenslet in lenslet array
yindx: int
Y index of lenslet in lenslet array
order: int
Order of polynomial wavelength solution
lam1: float
Shortest wavelength in nm
lam2: float
Longest wavelength in nm
Returns
-------
interp_lam: array
Array of wavelengths
R: float
Effective spectral resolution
'''
if lam1 is None:
lam1 = np.amin(lam) / 1.04
if lam2 is None:
lam2 = np.amax(lam) * 1.03
interporder = order
if self.interp_arr is None:
self.geninterparray(lam, allcoef, order=order)
coeforder = int(np.sqrt(allcoef.shape[1])) - 1
n_spline = 100
interp_lam = np.linspace(lam1, lam2, n_spline)
dy = []
dx = []
for i in range(n_spline):
coef = np.zeros((coeforder + 1) * (coeforder + 2))
for k in range(1, interporder + 1):
coef += k * self.interp_arr[k] * np.log(interp_lam[i])**(k - 1)
_dx, _dy = _transform(xindx, yindx, coeforder, coef)
dx += [_dx]
dy += [_dy]
R = np.sqrt(np.asarray(dy)**2 + np.asarray(dx)**2)
return interp_lam, R
def monochrome_coef(self, lam, alllam=None, allcoef=None, order=3):
if self.interp_arr is None:
if alllam is None or allcoef is None:
raise ValueError("Interpolation array has not been computed. Must call monochrome_coef with arrays.")
self.geninterparray(alllam, allcoef, order=order)
coef = np.zeros(self.interp_arr[0].shape)
for k in range(self.order + 1):
coef += self.interp_arr[k] * np.log(lam)**k
return coef
def return_locations(self, lam, allcoef, xindx, yindx, order=3):
'''
Calculates the detector coordinates of lenslet located at `xindx`, `yindx`
for desired wavelength `lam`
Parameters
----------
lam: float
Wavelength in nm
allcoef: list of floats
Polynomial coefficients of wavelength solution
xindx: int
X index of lenslet in lenslet array
yindx: int
Y index of lenslet in lenslet array
order: int
Order of polynomial wavelength solution
Returns
-------
interp_x: float
X coordinate on the detector
interp_y: float
Y coordinate on the detector
'''
if len(allcoef.shape) == 1:
coeforder = int(np.sqrt(allcoef.shape[0])) - 1
interp_x, interp_y = _transform(xindx, yindx, coeforder, allcoef)
return interp_x, interp_y
if self.interp_arr is None:
self.geninterparray(lam, allcoef, order=order)
coeforder = int(np.sqrt(allcoef.shape[1])) - 1
if not (coeforder + 1) * (coeforder + 2) == allcoef.shape[1]:
raise ValueError("Number of coefficients incorrect for polynomial order.")
coef = np.zeros((coeforder + 1) * (coeforder + 2))
for k in range(self.order + 1):
coef += self.interp_arr[k] * np.log(lam)**k
interp_x, interp_y = _transform(xindx, yindx, coeforder, coef)
return interp_x, interp_y
def genpixsol(self, lam, allcoef, order=3, lam1=None, lam2=None):
"""
Calculates the wavelength at the center of each pixel within a microspectrum
Parameters
----------
lam: float
Wavelength in nm
allcoef: list of floats
List describing the polynomial coefficients that best fit the lenslets,
for all wavelengths
order: int
Order of the polynomical fit
lam1: float
Lowest wavelength in nm
lam2: float
Highest wavelength in nm
Notes
-----
This functions fills in most of the fields of the PSFLet class: the array
of xindx, yindx, nlam, lam_indx and nlam_max
"""
###################################################################
# Read in wavelengths of spots, coefficients of wavelength
# solution. Obtain extrapolated limits of wavlength solution
# to 4% below and 3% above limits of the coefficient file by
# default.
###################################################################
if lam1 is None:
lam1 = np.amin(lam) / 1.04
if lam2 is None:
lam2 = np.amax(lam) * 1.03
interporder = order
if self.interp_arr is None:
self.geninterparray(lam, allcoef, order=order)
coeforder = int(np.sqrt(allcoef.shape[1])) - 1
if not (coeforder + 1) * (coeforder + 2) == allcoef.shape[1]:
raise ValueError("Number of coefficients incorrect for polynomial order.")
xindx = np.arange(-100, 101)
xindx, yindx = np.meshgrid(xindx, xindx)
n_spline = 100
interp_x = np.zeros(tuple([n_spline] + list(xindx.shape)))
interp_y = np.zeros(interp_x.shape)
interp_lam = np.linspace(lam1, lam2, n_spline)
for i in range(n_spline):
coef = np.zeros((coeforder + 1) * (coeforder + 2))
for k in range(interporder + 1):
coef += self.interp_arr[k] * np.log(interp_lam[i])**k
interp_x[i], interp_y[i] = _transform(xindx, yindx, coeforder, coef)
x = np.zeros(tuple(list(xindx.shape) + [1000]))
y = np.zeros(x.shape)
nlam = np.zeros(xindx.shape, np.int)
lam_out = np.zeros(y.shape)
good = np.zeros(xindx.shape)
for ix in range(xindx.shape[0]):
for iy in range(xindx.shape[1]):
pix_x = interp_x[:, ix, iy]
pix_y = interp_y[:, ix, iy]
if np.all(pix_x < 0) or np.all(pix_x > 2048) or np.all(pix_y < 0) or np.all(pix_y > 2048):
continue
if pix_y[-1] < pix_y[0]:
try:
tck_y = interpolate.splrep(pix_y[::-1], interp_lam[::-1], k=1, s=0)
except:
print(pix_x, pix_y)
raise
else:
tck_y = interpolate.splrep(pix_y, interp_lam, k=1, s=0)
y1, y2 = [int(np.amin(pix_y)) + 1, int(np.amax(pix_y))]
tck_x = interpolate.splrep(interp_lam, pix_x, k=1, s=0)
nlam[ix, iy] = y2 - y1 + 1
y[ix, iy, :nlam[ix, iy]] = np.arange(y1, y2 + 1)
lam_out[ix, iy, :nlam[ix, iy]] = interpolate.splev(y[ix, iy, :nlam[ix, iy]], tck_y)
x[ix, iy, :nlam[ix, iy]] = interpolate.splev(lam_out[ix, iy, :nlam[ix, iy]], tck_x)
for nlam_max in range(x.shape[-1]):
if np.all(y[:, :, nlam_max] == 0):
break
self.xindx = x[:, :, :nlam_max]
self.yindx = y[:, :, :nlam_max]
self.nlam = nlam
self.lam_indx = lam_out[:, :, :nlam_max]
self.nlam_max = np.amax(nlam)
def _initcoef(order, scale=15.02, phi=np.arctan2(1.926, -1), x0=0, y0=0):
"""
Private function _initcoef in locate_psflets
Create a set of coefficients including a rotation matrix plus zeros.
Parameters
----------
order: int
The polynomial order of the grid distortion
scale: float
The linear separation in pixels of the PSFlets. Default 15.02.
phi: float
The pitch angle of the lenslets. Default atan(1.926)
x0: float
x offset to apply to the central pixel. Default 0
y0: float
y offset to apply to the central pixel. Default 0
Returns
-------
coef: list of floats
A list of length (order+1)*(order+2) to be optimized.
Notes
-----
The list of coefficients has space for a polynomial fit of the
input order (i.e., for order 3, up to terms like x**3 and x**2*y,
but not x**3*y). It is all zeros in the output apart from the
rotation matrix given by scale and phi.
"""
try:
if not order == int(order):
raise ValueError("Polynomial order must be integer")
else:
if order < 1 or order > 5:
raise ValueError("Polynomial order must be >0, <=5")
except:
raise ValueError("Polynomial order must be integer")
n = (order + 1) * (order + 2)
coef = np.zeros((n))
coef[0] = x0
coef[1] = scale * np.cos(phi)
coef[order + 1] = -scale * np.sin(phi)
coef[n / 2] = y0
coef[n / 2 + 1] = scale * np.sin(phi)
coef[n / 2 + order + 1] = scale * np.cos(phi)
return list(coef)
def _pullorder(coef, order=1):
coeforder = int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12)
coef_short = []
i = 0
for ix in range(coeforder + 1):
for iy in range(coeforder - ix + 1):
if ix + iy <= order:
coef_short += [coef[i]]
i += 1
for ix in range(coeforder + 1):
for iy in range(coeforder - ix + 1):
if ix + iy <= order:
coef_short += [coef[i]]
i += 1
return coef_short
def _insertorder(coefshort, coef):
coeforder = int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12)
shortorder = int(np.sqrt(len(coefshort) + 0.25) - 1.5 + 1e-12)
i = 0
j = 0
for ix in range(coeforder + 1):
for iy in range(coeforder - ix + 1):
if ix + iy <= shortorder:
coef[i] = coefshort[j]
j += 1
i += 1
for ix in range(coeforder + 1):
for iy in range(coeforder - ix + 1):
if ix + iy <= shortorder:
coef[i] = coefshort[j]
j += 1
i += 1
return coef
def _transform(x, y, order, coef, highordercoef=None):
"""
Private function _transform in locate_psflets
Apply the coefficients given to transform the coordinates using
a polynomial.
Parameters
----------
x: ndarray
Rectilinear grid
y: ndarray of floats
Rectilinear grid
order: int
Order of the polynomial fit
coef: list of floats
List of the coefficients. Must match the length required by
order = (order+1)*(order+2)
highordercoef: Boolean
Returns
-------
_x: ndarray
Transformed coordinates
_y: ndarray
Transformed coordinates
"""
try:
if not len(coef) == (order + 1) * (order + 2):
pass # raise ValueError("Number of coefficients incorrect for polynomial order.")
except:
raise AttributeError("order must be integer, coef should be a list.")
try:
if not order == int(order):
raise ValueError("Polynomial order must be integer")
else:
if order < 1 or order > 5:
raise ValueError("Polynomial order must be >0, <=5")
except:
raise ValueError("Polynomial order must be integer")
# n**2 + 3*n + 2 = (n + 1.5)**2 - 0.25
# = (1/4)*((2*n + 3)**2 - 1) = len(coef)
order1 = int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12)
_x = np.zeros(np.asarray(x).shape)
_y = np.zeros(np.asarray(y).shape)
i = 0
for ix in range(order1 + 1):
for iy in range(order1 - ix + 1):
_x += coef[i] * x**ix * y**iy
i += 1
for ix in range(order1 + 1):
for iy in range(order1 - ix + 1):
_y += coef[i] * x**ix * y**iy
i += 1
if highordercoef is None:
return [_x, _y]
else:
order2 = int(np.sqrt(len(highordercoef) + 0.25) - 1.5 + 1e-12)
i = 0
for ix in range(order2 + 1):
for iy in range(order1 - ix + 1):
if ix + iy <= order1:
continue
_x += coef[i] * x**ix * y**iy
i += 1
for ix in range(order2 + 1):
for iy in range(order1 - ix + 1):
if ix + iy <= order1:
continue
_y += coef[i] * x**ix * y**iy
i += 1
return [_x, _y]
def _corrval(coef, x, y, filtered, order, trimfrac=0.1, highordercoef=None):
"""
Private function _corrval in locate_psflets
Return the negative of the sum of the middle XX% of the PSFlet
spot fluxes (disregarding those with the most and the least flux
to limit the impact of outliers). Analogous to the trimmed mean.
Parameters
----------
coef: list of floats
coefficients for polynomial transformation
x: ndarray
coordinates of lenslets
y: ndarray
coordinates of lenslets
filtered: ndarray
image convolved with gaussian PSFlet
order: int
order of the polynomial fit
trimfrac: float
fraction of outliers (high & low combined) to trim
Default 0.1 (5% trimmed on the high end, 5% on the low end)
highordercoef: boolean
Returns
-------
score: float
Negative sum of PSFlet fluxes, to be minimized
"""
#################################################################
# Use np.nan for lenslet coordinates outside the CHARIS FOV,
# discard these from the calculation before trimming.
#################################################################
_x, _y = _transform(x, y, order, coef, highordercoef)
vals = ndimage.map_coordinates(filtered, [_y, _x], mode='constant',
cval=np.nan, prefilter=False)
vals_ok = vals[np.where(np.isfinite(vals))]
iclip = int(vals_ok.shape[0] * trimfrac / 2)
vals_sorted = np.sort(vals_ok)
score = -1 * np.sum(vals_sorted[iclip:-iclip])
return score
def locatePSFlets(inImage, polyorder=2, sig=0.7, coef=None, trimfrac=0.1,
phi=np.arctan2(1.926, -1), scale=15.02, fitorder=None):
"""
function locatePSFlets takes an Image class, assumed to be a
monochromatic grid of spots with read noise and shot noise, and
returns the esimated positions of the spot centroids. This is
designed to constrain the domain of the PSF-let fitting later in
the pipeline.
Parameters
----------
imImage: Image class
Assumed to be a monochromatic grid of spots
polyorder: float
order of the polynomial coordinate transformation. Default 2.
sig: float
standard deviation of convolving Gaussian used
for estimating the grid of centroids. Should be close
to the true value for the PSF-let spots. Default 0.7.
coef: list
initial guess of the coefficients of polynomial
coordinate transformation
trimfrac: float
fraction of lenslet outliers (high & low
combined) to trim in the minimization. Default 0.1
(5% trimmed on the high end, 5% on the low end)
Returns
-------
x: 2D ndarray
Estimated spot centroids in x.
y: 2D ndarray
Estimated spot centroids in y.
good:2D boolean ndarray
True for lenslets with spots inside the detector footprint
coef: list of floats
List of best-fit polynomial coefficients
Notes
-----
the coefficients, if not supplied, are initially set to the
known pitch angle and scale. A loop then does a quick check to find
reasonable offsets in x and y. With all of the first-order polynomial
coefficients set, the optimizer refines these and the higher-order
coefficients. This routine seems to be relatively robust down to
per-lenslet signal-to-noise ratios of order unity (or even a little
less).
Important note: as of now (09/2015), the number of lenslets to grid
is hard-coded as 1/10 the dimensionality of the final array. This is
sufficient to cover the detector for the fiducial lenslet spacing.
"""
#############################################################
# Convolve with a Gaussian, centroid the filtered image.
#############################################################
x = np.arange(-1 * int(3 * sig + 1), int(3 * sig + 1) + 1)
x, y = np.meshgrid(x, x)
gaussian = np.exp(-(x**2 + y**2) / (2 * sig**2))
if inImage.ivar is None:
unfiltered = signal.convolve2d(inImage.data, gaussian, mode='same')
else:
unfiltered = signal.convolve2d(inImage.data * inImage.ivar, gaussian, mode='same')
unfiltered /= signal.convolve2d(inImage.ivar, gaussian, mode='same') + 1e-10
filtered = ndimage.interpolation.spline_filter(unfiltered)
#############################################################
# x, y: Grid of lenslet IDs, Lenslet (0, 0) is the center.
#############################################################
gridfrac = 20
ydim, xdim = inImage.data.shape
x = np.arange(-(ydim // gridfrac), ydim // gridfrac + 1)
x, y = np.meshgrid(x, x)
#############################################################
# Set up polynomial coefficients, convert from lenslet
# coordinates to coordinates on the detector array.
# Then optimize the coefficients.
# We want to start with a decent guess, so we use a grid of
# offsets. Seems to be robust down to SNR/PSFlet ~ 1
# Create slice indices for subimages to perform the intial
# fits on. The new dimensionality in both x and y is 2*subsize
#############################################################
if coef is None:
ix_arr = np.arange(0, 14, 0.5)
iy_arr = np.arange(0, 25, 0.5)
log.info("Initializing PSFlet location transformation coefficients")
init = True
else:
ix_arr = np.arange(-3.0, 3.05, 0.2)
iy_arr = np.arange(-3.0, 3.05, 0.2)
coef_save = list(coef[:])
log.info("Initializing transformation coefficients with previous values")
init = False
bestval = 0
subshape = xdim * 3 // 8
_s = x.shape[0] * 3 // 8
subfiltered = ndimage.interpolation.spline_filter(unfiltered[subshape:-subshape, subshape:-subshape])
for ix in ix_arr:
for iy in iy_arr:
if init:
coef = _initcoef(polyorder, x0=ix + xdim / 2. - subshape,
y0=iy + ydim / 2. - subshape, scale=scale, phi=phi)
else:
coef = copy.deepcopy(coef_save)
coef[0] += ix - subshape
coef[(polyorder + 1) * (polyorder + 2) / 2] += iy - subshape
newval = _corrval(coef, x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s],
subfiltered, polyorder, trimfrac)
if newval < bestval:
bestval = newval
coef_opt = copy.deepcopy(coef)
if init:
log.info("Performing initial optimization of PSFlet location transformation coefficients for frame " + inImage.filename)
res = optimize.minimize(_corrval, coef_opt, args=(
x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac), method='Powell')
coef_opt = res.x
else:
log.info("Performing initial optimization of PSFlet location transformation coefficients for frame " + inImage.filename)
coef_lin = _pullorder(coef_opt, 1)
res = optimize.minimize(_corrval, coef_lin, args=(
x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac, coef_opt), method='Powell', options={'xtol': 1e-6, 'ftol': 1e-6})
coef_lin = res.x
coef_opt = _insertorder(coef_lin, coef_opt)
coef_opt[0] += subshape
coef_opt[(polyorder + 1) * (polyorder + 2) / 2] += subshape
#############################################################
# If we have coefficients from last time, we assume that we
# are now at a slightly higher wavelength, so try out offsets
# that are slightly to the right to get a good initial guess.
#############################################################
log.info("Performing final optimization of PSFlet location transformation coefficients for frame " + inImage.filename)
if not init and fitorder is not None:
coef_lin = _pullorder(coef_opt, fitorder)
res = optimize.minimize(_corrval, coef_lin, args=(x, y, filtered, polyorder, trimfrac,
coef_opt), method='Powell', options={'xtol': 1e-5, 'ftol': 1e-5})
coef_lin = res.x
coef_opt = _insertorder(coef_lin, coef_opt)
else:
res = optimize.minimize(_corrval, coef_opt, args=(x, y, filtered, polyorder, trimfrac),
method='Powell', options={'xtol': 1e-5, 'ftol': 1e-5})
coef_opt = res.x
if not res.success:
log.info("Optimizing PSFlet location transformation coefficients may have failed for frame " + inImage.filename)
_x, _y = _transform(x, y, polyorder, coef_opt)
#############################################################
# Boolean: do the lenslet PSFlets lie within the detector?
#############################################################
good = (_x > 5) * (_x < xdim - 5) * (_y > 5) * (_y < ydim - 5)
return [_x, _y, good, coef_opt]
|
[
"numpy.arctan2",
"numpy.sum",
"numpy.amin",
"astropy.io.fits.PrimaryHDU",
"numpy.ones",
"numpy.sin",
"numpy.arange",
"numpy.exp",
"scipy.optimize.minimize",
"numpy.meshgrid",
"scipy.signal.convolve2d",
"numpy.isfinite",
"numpy.linspace",
"scipy.ndimage.interpolation.spline_filter",
"re.sub",
"scipy.interpolate.splrep",
"copy.deepcopy",
"numpy.asarray",
"numpy.sort",
"astropy.io.fits.open",
"numpy.cos",
"numpy.all",
"numpy.log",
"numpy.linalg.lstsq",
"os.path.isdir",
"numpy.zeros",
"numpy.amax",
"scipy.interpolate.splev",
"scipy.ndimage.map_coordinates",
"logging.getLogger",
"numpy.sqrt"
] |
[((271, 296), 'logging.getLogger', 'logging.getLogger', (['"""main"""'], {}), "('main')\n", (288, 296), False, 'import logging\n'), ((12613, 12634), 'numpy.arctan2', 'np.arctan2', (['(1.926)', '(-1)'], {}), '(1.926, -1)\n', (12623, 12634), True, 'import numpy as np\n'), ((13936, 13947), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (13944, 13947), True, 'import numpy as np\n'), ((18929, 19023), 'scipy.ndimage.map_coordinates', 'ndimage.map_coordinates', (['filtered', '[_y, _x]'], {'mode': '"""constant"""', 'cval': 'np.nan', 'prefilter': '(False)'}), "(filtered, [_y, _x], mode='constant', cval=np.nan,\n prefilter=False)\n", (18952, 19023), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((19171, 19187), 'numpy.sort', 'np.sort', (['vals_ok'], {}), '(vals_ok)\n', (19178, 19187), True, 'import numpy as np\n'), ((19354, 19375), 'numpy.arctan2', 'np.arctan2', (['(1.926)', '(-1)'], {}), '(1.926, -1)\n', (19364, 19375), True, 'import numpy as np\n'), ((21646, 21663), 'numpy.meshgrid', 'np.meshgrid', (['x', 'x'], {}), '(x, x)\n', (21657, 21663), True, 'import numpy as np\n'), ((21679, 21722), 'numpy.exp', 'np.exp', (['(-(x ** 2 + y ** 2) / (2 * sig ** 2))'], {}), '(-(x ** 2 + y ** 2) / (2 * sig ** 2))\n', (21685, 21722), True, 'import numpy as np\n'), ((22025, 22072), 'scipy.ndimage.interpolation.spline_filter', 'ndimage.interpolation.spline_filter', (['unfiltered'], {}), '(unfiltered)\n', (22060, 22072), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((22332, 22384), 'numpy.arange', 'np.arange', (['(-(ydim // gridfrac))', '(ydim // gridfrac + 1)'], {}), '(-(ydim // gridfrac), ydim // gridfrac + 1)\n', (22341, 22384), True, 'import numpy as np\n'), ((22396, 22413), 'numpy.meshgrid', 'np.meshgrid', (['x', 'x'], {}), '(x, x)\n', (22407, 22413), True, 'import numpy as np\n'), ((23477, 23569), 'scipy.ndimage.interpolation.spline_filter', 'ndimage.interpolation.spline_filter', (['unfiltered[subshape:-subshape, subshape:-subshape]'], {}), '(unfiltered[subshape:-subshape, subshape\n :-subshape])\n', (23512, 23569), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((1581, 1598), 'astropy.io.fits.open', 'fits.open', (['infile'], {}), '(infile)\n', (1590, 1598), False, 'from astropy.io import fits\n'), ((1999, 2017), 'numpy.amax', 'np.amax', (['self.nlam'], {}), '(self.nlam)\n', (2006, 2017), True, 'import numpy as np\n'), ((2869, 2911), 're.sub', 're.sub', (['"""//"""', '"""/"""', "(outdir + '/PSFloc.fits')"], {}), "('//', '/', outdir + '/PSFloc.fits')\n", (2875, 2911), False, 'import re\n'), ((3907, 3946), 'numpy.zeros', 'np.zeros', (['(order + 1, allcoef.shape[1])'], {}), '((order + 1, allcoef.shape[1]))\n', (3915, 3946), True, 'import numpy as np\n'), ((3989, 4023), 'numpy.ones', 'np.ones', (['(lam.shape[0], order + 1)'], {}), '((lam.shape[0], order + 1))\n', (3996, 4023), True, 'import numpy as np\n'), ((6224, 6257), 'numpy.linspace', 'np.linspace', (['lam1', 'lam2', 'n_spline'], {}), '(lam1, lam2, n_spline)\n', (6235, 6257), True, 'import numpy as np\n'), ((7076, 7110), 'numpy.zeros', 'np.zeros', (['self.interp_arr[0].shape'], {}), '(self.interp_arr[0].shape)\n', (7084, 7110), True, 'import numpy as np\n'), ((8523, 8566), 'numpy.zeros', 'np.zeros', (['((coeforder + 1) * (coeforder + 2))'], {}), '((coeforder + 1) * (coeforder + 2))\n', (8531, 8566), True, 'import numpy as np\n'), ((10382, 10402), 'numpy.arange', 'np.arange', (['(-100)', '(101)'], {}), '(-100, 101)\n', (10391, 10402), True, 'import numpy as np\n'), ((10426, 10451), 'numpy.meshgrid', 'np.meshgrid', (['xindx', 'xindx'], {}), '(xindx, xindx)\n', (10437, 10451), True, 'import numpy as np\n'), ((10563, 10587), 'numpy.zeros', 'np.zeros', (['interp_x.shape'], {}), '(interp_x.shape)\n', (10571, 10587), True, 'import numpy as np\n'), ((10609, 10642), 'numpy.linspace', 'np.linspace', (['lam1', 'lam2', 'n_spline'], {}), '(lam1, lam2, n_spline)\n', (10620, 10642), True, 'import numpy as np\n'), ((11006, 11023), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (11014, 11023), True, 'import numpy as np\n'), ((11039, 11068), 'numpy.zeros', 'np.zeros', (['xindx.shape', 'np.int'], {}), '(xindx.shape, np.int)\n', (11047, 11068), True, 'import numpy as np\n'), ((11087, 11104), 'numpy.zeros', 'np.zeros', (['y.shape'], {}), '(y.shape)\n', (11095, 11104), True, 'import numpy as np\n'), ((11120, 11141), 'numpy.zeros', 'np.zeros', (['xindx.shape'], {}), '(xindx.shape)\n', (11128, 11141), True, 'import numpy as np\n'), ((12559, 12572), 'numpy.amax', 'np.amax', (['nlam'], {}), '(nlam)\n', (12566, 12572), True, 'import numpy as np\n'), ((13990, 14001), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (13996, 14001), True, 'import numpy as np\n'), ((14033, 14044), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (14039, 14044), True, 'import numpy as np\n'), ((14096, 14107), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (14102, 14107), True, 'import numpy as np\n'), ((14146, 14157), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (14152, 14157), True, 'import numpy as np\n'), ((19205, 19238), 'numpy.sum', 'np.sum', (['vals_sorted[iclip:-iclip]'], {}), '(vals_sorted[iclip:-iclip])\n', (19211, 19238), True, 'import numpy as np\n'), ((21768, 21822), 'scipy.signal.convolve2d', 'signal.convolve2d', (['inImage.data', 'gaussian'], {'mode': '"""same"""'}), "(inImage.data, gaussian, mode='same')\n", (21785, 21822), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((21854, 21923), 'scipy.signal.convolve2d', 'signal.convolve2d', (['(inImage.data * inImage.ivar)', 'gaussian'], {'mode': '"""same"""'}), "(inImage.data * inImage.ivar, gaussian, mode='same')\n", (21871, 21923), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((22991, 23012), 'numpy.arange', 'np.arange', (['(0)', '(14)', '(0.5)'], {}), '(0, 14, 0.5)\n', (23000, 23012), True, 'import numpy as np\n'), ((23030, 23051), 'numpy.arange', 'np.arange', (['(0)', '(25)', '(0.5)'], {}), '(0, 25, 0.5)\n', (23039, 23051), True, 'import numpy as np\n'), ((23176, 23202), 'numpy.arange', 'np.arange', (['(-3.0)', '(3.05)', '(0.2)'], {}), '(-3.0, 3.05, 0.2)\n', (23185, 23202), True, 'import numpy as np\n'), ((23220, 23246), 'numpy.arange', 'np.arange', (['(-3.0)', '(3.05)', '(0.2)'], {}), '(-3.0, 3.05, 0.2)\n', (23229, 23246), True, 'import numpy as np\n'), ((24386, 24524), 'scipy.optimize.minimize', 'optimize.minimize', (['_corrval', 'coef_opt'], {'args': '(x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac)', 'method': '"""Powell"""'}), "(_corrval, coef_opt, args=(x[_s:-_s, _s:-_s], y[_s:-_s, _s\n :-_s], subfiltered, polyorder, trimfrac), method='Powell')\n", (24403, 24524), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((24755, 24947), 'scipy.optimize.minimize', 'optimize.minimize', (['_corrval', 'coef_lin'], {'args': '(x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac,\n coef_opt)', 'method': '"""Powell"""', 'options': "{'xtol': 1e-06, 'ftol': 1e-06}"}), "(_corrval, coef_lin, args=(x[_s:-_s, _s:-_s], y[_s:-_s, _s\n :-_s], subfiltered, polyorder, trimfrac, coef_opt), method='Powell',\n options={'xtol': 1e-06, 'ftol': 1e-06})\n", (24772, 24947), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((25681, 25838), 'scipy.optimize.minimize', 'optimize.minimize', (['_corrval', 'coef_lin'], {'args': '(x, y, filtered, polyorder, trimfrac, coef_opt)', 'method': '"""Powell"""', 'options': "{'xtol': 1e-05, 'ftol': 1e-05}"}), "(_corrval, coef_lin, args=(x, y, filtered, polyorder,\n trimfrac, coef_opt), method='Powell', options={'xtol': 1e-05, 'ftol': \n 1e-05})\n", (25698, 25838), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((25988, 26130), 'scipy.optimize.minimize', 'optimize.minimize', (['_corrval', 'coef_opt'], {'args': '(x, y, filtered, polyorder, trimfrac)', 'method': '"""Powell"""', 'options': "{'xtol': 1e-05, 'ftol': 1e-05}"}), "(_corrval, coef_opt, args=(x, y, filtered, polyorder,\n trimfrac), method='Powell', options={'xtol': 1e-05, 'ftol': 1e-05})\n", (26005, 26130), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((1517, 1562), 're.sub', 're.sub', (['"""//"""', '"""/"""', "(infiledir + '/PSFloc.fits')"], {}), "('//', '/', infiledir + '/PSFloc.fits')\n", (1523, 1562), False, 'import re\n'), ((2709, 2730), 'os.path.isdir', 'os.path.isdir', (['outdir'], {}), '(outdir)\n', (2722, 2730), False, 'import os\n'), ((2939, 2966), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['self.xindx'], {}), '(self.xindx)\n', (2954, 2966), False, 'from astropy.io import fits\n'), ((2987, 3014), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['self.yindx'], {}), '(self.yindx)\n', (3002, 3014), False, 'from astropy.io import fits\n'), ((3035, 3065), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['self.lam_indx'], {}), '(self.lam_indx)\n', (3050, 3065), False, 'from astropy.io import fits\n'), ((6344, 6387), 'numpy.zeros', 'np.zeros', (['((coeforder + 1) * (coeforder + 2))'], {}), '((coeforder + 1) * (coeforder + 2))\n', (6352, 6387), True, 'import numpy as np\n'), ((10697, 10740), 'numpy.zeros', 'np.zeros', (['((coeforder + 1) * (coeforder + 2))'], {}), '((coeforder + 1) * (coeforder + 2))\n', (10705, 10740), True, 'import numpy as np\n'), ((12326, 12356), 'numpy.all', 'np.all', (['(y[:, :, nlam_max] == 0)'], {}), '(y[:, :, nlam_max] == 0)\n', (12332, 12356), True, 'import numpy as np\n'), ((16694, 16707), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (16704, 16707), True, 'import numpy as np\n'), ((16733, 16746), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (16743, 16746), True, 'import numpy as np\n'), ((19083, 19100), 'numpy.isfinite', 'np.isfinite', (['vals'], {}), '(vals)\n', (19094, 19100), True, 'import numpy as np\n'), ((21946, 22000), 'scipy.signal.convolve2d', 'signal.convolve2d', (['inImage.ivar', 'gaussian'], {'mode': '"""same"""'}), "(inImage.ivar, gaussian, mode='same')\n", (21963, 22000), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((4087, 4098), 'numpy.log', 'np.log', (['lam'], {}), '(lam)\n', (4093, 4098), True, 'import numpy as np\n'), ((4172, 4208), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['xarr', 'allcoef[:, i]'], {}), '(xarr, allcoef[:, i])\n', (4187, 4208), True, 'import numpy as np\n'), ((4878, 4900), 'numpy.sqrt', 'np.sqrt', (['coef.shape[0]'], {}), '(coef.shape[0])\n', (4885, 4900), True, 'import numpy as np\n'), ((5914, 5926), 'numpy.amin', 'np.amin', (['lam'], {}), '(lam)\n', (5921, 5926), True, 'import numpy as np\n'), ((5978, 5990), 'numpy.amax', 'np.amax', (['lam'], {}), '(lam)\n', (5985, 5990), True, 'import numpy as np\n'), ((6148, 6173), 'numpy.sqrt', 'np.sqrt', (['allcoef.shape[1]'], {}), '(allcoef.shape[1])\n', (6155, 6173), True, 'import numpy as np\n'), ((8319, 8344), 'numpy.sqrt', 'np.sqrt', (['allcoef.shape[1]'], {}), '(allcoef.shape[1])\n', (8326, 8344), True, 'import numpy as np\n'), ((9944, 9956), 'numpy.amin', 'np.amin', (['lam'], {}), '(lam)\n', (9951, 9956), True, 'import numpy as np\n'), ((10008, 10020), 'numpy.amax', 'np.amax', (['lam'], {}), '(lam)\n', (10015, 10020), True, 'import numpy as np\n'), ((10177, 10202), 'numpy.sqrt', 'np.sqrt', (['allcoef.shape[1]'], {}), '(allcoef.shape[1])\n', (10184, 10202), True, 'import numpy as np\n'), ((11909, 11956), 'scipy.interpolate.splrep', 'interpolate.splrep', (['interp_lam', 'pix_x'], {'k': '(1)', 's': '(0)'}), '(interp_lam, pix_x, k=1, s=0)\n', (11927, 11956), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((12044, 12065), 'numpy.arange', 'np.arange', (['y1', '(y2 + 1)'], {}), '(y1, y2 + 1)\n', (12053, 12065), True, 'import numpy as np\n'), ((12115, 12165), 'scipy.interpolate.splev', 'interpolate.splev', (['y[ix, iy, :nlam[ix, iy]]', 'tck_y'], {}), '(y[ix, iy, :nlam[ix, iy]], tck_y)\n', (12132, 12165), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((12209, 12265), 'scipy.interpolate.splev', 'interpolate.splev', (['lam_out[ix, iy, :nlam[ix, iy]]', 'tck_x'], {}), '(lam_out[ix, iy, :nlam[ix, iy]], tck_x)\n', (12226, 12265), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((23834, 23858), 'copy.deepcopy', 'copy.deepcopy', (['coef_save'], {}), '(coef_save)\n', (23847, 23858), False, 'import copy\n'), ((24209, 24228), 'copy.deepcopy', 'copy.deepcopy', (['coef'], {}), '(coef)\n', (24222, 24228), False, 'import copy\n'), ((6651, 6665), 'numpy.asarray', 'np.asarray', (['dy'], {}), '(dy)\n', (6661, 6665), True, 'import numpy as np\n'), ((6671, 6685), 'numpy.asarray', 'np.asarray', (['dx'], {}), '(dx)\n', (6681, 6685), True, 'import numpy as np\n'), ((7192, 7203), 'numpy.log', 'np.log', (['lam'], {}), '(lam)\n', (7198, 7203), True, 'import numpy as np\n'), ((8051, 8076), 'numpy.sqrt', 'np.sqrt', (['allcoef.shape[0]'], {}), '(allcoef.shape[0])\n', (8058, 8076), True, 'import numpy as np\n'), ((8648, 8659), 'numpy.log', 'np.log', (['lam'], {}), '(lam)\n', (8654, 8659), True, 'import numpy as np\n'), ((11336, 11353), 'numpy.all', 'np.all', (['(pix_x < 0)'], {}), '(pix_x < 0)\n', (11342, 11353), True, 'import numpy as np\n'), ((11357, 11377), 'numpy.all', 'np.all', (['(pix_x > 2048)'], {}), '(pix_x > 2048)\n', (11363, 11377), True, 'import numpy as np\n'), ((11381, 11398), 'numpy.all', 'np.all', (['(pix_y < 0)'], {}), '(pix_y < 0)\n', (11387, 11398), True, 'import numpy as np\n'), ((11402, 11422), 'numpy.all', 'np.all', (['(pix_y > 2048)'], {}), '(pix_y > 2048)\n', (11408, 11422), True, 'import numpy as np\n'), ((11764, 11811), 'scipy.interpolate.splrep', 'interpolate.splrep', (['pix_y', 'interp_lam'], {'k': '(1)', 's': '(0)'}), '(pix_y, interp_lam, k=1, s=0)\n', (11782, 11811), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((6485, 6506), 'numpy.log', 'np.log', (['interp_lam[i]'], {}), '(interp_lam[i])\n', (6491, 6506), True, 'import numpy as np\n'), ((10831, 10852), 'numpy.log', 'np.log', (['interp_lam[i]'], {}), '(interp_lam[i])\n', (10837, 10852), True, 'import numpy as np\n'), ((11552, 11611), 'scipy.interpolate.splrep', 'interpolate.splrep', (['pix_y[::-1]', 'interp_lam[::-1]'], {'k': '(1)', 's': '(0)'}), '(pix_y[::-1], interp_lam[::-1], k=1, s=0)\n', (11570, 11611), False, 'from scipy import interpolate, ndimage, optimize, signal\n'), ((11868, 11882), 'numpy.amax', 'np.amax', (['pix_y'], {}), '(pix_y)\n', (11875, 11882), True, 'import numpy as np\n'), ((11843, 11857), 'numpy.amin', 'np.amin', (['pix_y'], {}), '(pix_y)\n', (11850, 11857), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Demo showing how km_dict and insegtannotator may be used together for
interactive segmentation.
@author: vand and abda
"""
import sys
import insegtannotator
import skimage.io
import skimage.data
import km_dict
import numpy as np
#%% EXAMPLE 1: glass fibres
## loading image
print('Loading image')
filename = '../data/glass.png'
image = skimage.io.imread(filename)
#%% EXAMPLE 2: nerve fibres
## loading image
print('Loading image')
filename = '../data/nerve_im_scale.png'
image = skimage.io.imread(filename)
#%% COMMON PART
patch_size = 11
branching_factor = 5
number_layers = 5
number_training_patches = 35000
normalization = False
image_float = image.astype(np.float)/255
# Build tree
T = km_dict.build_km_tree(image_float, patch_size, branching_factor, number_training_patches, number_layers, normalization)
# Search km-tree and get assignment
A, number_nodes = km_dict.search_km_tree(image_float, T, branching_factor, normalization)
# number of repetitions for updating the segmentation
number_repetitions = 2
def processing_function(labels):
r,c = labels.shape
l = np.max(labels)+1
label_image = np.zeros((r,c,l))
for k in range(number_repetitions):
for i in range(1,l):
label_image[:,:,i] = (labels == i).astype(float)
D = km_dict.improb_to_dictprob(A, label_image, number_nodes, patch_size) # Dictionary
P = km_dict.dictprob_to_improb(A, D, patch_size) # Probability map
labels = np.argmax(P,axis=2) # Segmentation
return labels
print('Showtime')
# showtime
app = insegtannotator.PyQt5.QtWidgets.QApplication([])
ex = insegtannotator.InSegtAnnotator(image, processing_function)
app.exec()
sys.exit()
|
[
"km_dict.build_km_tree",
"numpy.argmax",
"numpy.zeros",
"km_dict.dictprob_to_improb",
"km_dict.search_km_tree",
"numpy.max",
"insegtannotator.PyQt5.QtWidgets.QApplication",
"km_dict.improb_to_dictprob",
"sys.exit",
"insegtannotator.InSegtAnnotator"
] |
[((753, 876), 'km_dict.build_km_tree', 'km_dict.build_km_tree', (['image_float', 'patch_size', 'branching_factor', 'number_training_patches', 'number_layers', 'normalization'], {}), '(image_float, patch_size, branching_factor,\n number_training_patches, number_layers, normalization)\n', (774, 876), False, 'import km_dict\n'), ((927, 998), 'km_dict.search_km_tree', 'km_dict.search_km_tree', (['image_float', 'T', 'branching_factor', 'normalization'], {}), '(image_float, T, branching_factor, normalization)\n', (949, 998), False, 'import km_dict\n'), ((1604, 1652), 'insegtannotator.PyQt5.QtWidgets.QApplication', 'insegtannotator.PyQt5.QtWidgets.QApplication', (['[]'], {}), '([])\n', (1648, 1652), False, 'import insegtannotator\n'), ((1659, 1718), 'insegtannotator.InSegtAnnotator', 'insegtannotator.InSegtAnnotator', (['image', 'processing_function'], {}), '(image, processing_function)\n', (1690, 1718), False, 'import insegtannotator\n'), ((1730, 1740), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1738, 1740), False, 'import sys\n'), ((1176, 1195), 'numpy.zeros', 'np.zeros', (['(r, c, l)'], {}), '((r, c, l))\n', (1184, 1195), True, 'import numpy as np\n'), ((1141, 1155), 'numpy.max', 'np.max', (['labels'], {}), '(labels)\n', (1147, 1155), True, 'import numpy as np\n'), ((1336, 1404), 'km_dict.improb_to_dictprob', 'km_dict.improb_to_dictprob', (['A', 'label_image', 'number_nodes', 'patch_size'], {}), '(A, label_image, number_nodes, patch_size)\n', (1362, 1404), False, 'import km_dict\n'), ((1430, 1474), 'km_dict.dictprob_to_improb', 'km_dict.dictprob_to_improb', (['A', 'D', 'patch_size'], {}), '(A, D, patch_size)\n', (1456, 1474), False, 'import km_dict\n'), ((1510, 1530), 'numpy.argmax', 'np.argmax', (['P'], {'axis': '(2)'}), '(P, axis=2)\n', (1519, 1530), True, 'import numpy as np\n')]
|
import argparse
import numpy as np
import matplotlib.pyplot as plt
from FAUSTPy import *
#######################################################
# set up command line arguments
#######################################################
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--faustfloat',
dest="faustfloat",
default="float",
help="The value of FAUSTFLOAT.")
parser.add_argument('-p', '--path',
dest="faust_path",
default="",
help="The path to the FAUST compiler.")
parser.add_argument('-c', '--cflags',
dest="cflags",
default=[],
type=str.split,
help="Extra compiler flags")
parser.add_argument('-s', '--fs',
dest="fs",
default=48000,
type=int,
help="The sampling frequency")
args = parser.parse_args()
#######################################################
# initialise the FAUST object and get the default parameters
#######################################################
wrapper.FAUST_PATH = args.faust_path
dattorro = FAUST("dattorro_notch_cut_regalia.dsp", args.fs, args.faustfloat,
extra_compile_args=args.cflags)
def_Q = dattorro.dsp.ui.p_Q
def_Gain = dattorro.dsp.ui.p_Gain
def_Freq = dattorro.dsp.ui.p_Center_Freq
#######################################################
# plot the frequency response with the default settings
#######################################################
audio = np.zeros((dattorro.dsp.num_in, args.fs), dtype=dattorro.dsp.dtype)
audio[:, 0] = 1
out = dattorro.compute(audio)
print(audio)
print(out)
spec = np.fft.fft(out)[:, :args.fs/2]
fig = plt.figure()
p = fig.add_subplot(
1, 1, 1,
title="Frequency response with the default settings\n"
"(Q={}, F={:.2f} Hz, G={:.0f} dB FS)".format(
def_Q.zone, def_Freq.zone, 20*np.log10(def_Gain.zone+1e-8)
),
xlabel="Frequency in Hz (log)",
ylabel="Magnitude in dB FS",
xscale="log"
)
p.plot(20*np.log10(np.absolute(spec.T)+1e-8))
p.legend(("Left channel", "Right channel"), loc="best")
#######################################################
# plot the frequency response with varying Q
#######################################################
Q = np.linspace(def_Q.min, def_Q.max, 10)
dattorro.dsp.ui.p_Center_Freq = 1e2
dattorro.dsp.ui.p_Gain = 10**(-0.5) # -10 dB
cur_G = dattorro.dsp.ui.p_Gain.zone
cur_F = dattorro.dsp.ui.p_Center_Freq.zone
fig = plt.figure()
p = fig.add_subplot(
1, 1, 1,
title="Frequency response "
"(G={:.0f} dB FS, F={} Hz)".format(20*np.log10(cur_G+1e-8), cur_F),
xlabel="Frequency in Hz (log)",
ylabel="Magnitude in dB FS",
xscale="log"
)
for q in Q:
dattorro.dsp.ui.p_Q = q
out = dattorro.compute(audio)
spec = np.fft.fft(out)[0, :args.fs/2]
p.plot(20*np.log10(np.absolute(spec.T)+1e-8),
label="Q={}".format(q))
p.legend(loc="best")
#######################################################
# plot the frequency response with varying gain
#######################################################
# start at -60 dB because the minimum is at an extremely low -160 dB
G = np.logspace(-3, np.log10(def_Gain.max), 10)
dattorro.dsp.ui.p_Q = 2
cur_Q = dattorro.dsp.ui.p_Q.zone
cur_F = dattorro.dsp.ui.p_Center_Freq.zone
fig = plt.figure()
p = fig.add_subplot(
1, 1, 1,
title="Frequency response (Q={}, F={} Hz)".format(cur_Q, cur_F),
xlabel="Frequency in Hz (log)",
ylabel="Magnitude in dB FS",
xscale="log"
)
for g in G:
dattorro.dsp.ui.p_Gain = g
out = dattorro.compute(audio)
spec = np.fft.fft(out)[0, :args.fs/2]
p.plot(20*np.log10(np.absolute(spec.T)+1e-8),
label="G={:.3g} dB FS".format(20*np.log10(g+1e-8)))
p.legend(loc="best")
###########################################################
# plot the frequency response with varying center frequency
###########################################################
F = np.logspace(np.log10(def_Freq.min), np.log10(def_Freq.max), 10)
dattorro.dsp.ui.p_Q = def_Q.default
dattorro.dsp.ui.p_Gain = 10**(-0.5) # -10 dB
cur_Q = dattorro.dsp.ui.p_Q.zone
cur_G = dattorro.dsp.ui.p_Gain.zone
fig = plt.figure()
p = fig.add_subplot(
1, 1, 1,
title="Frequency response "
"(Q={}, G={:.0f} dB FS)".format(cur_Q, 20*np.log10(cur_G+1e-8)),
xlabel="Frequency in Hz (log)",
ylabel="Magnitude in dB FS",
xscale="log"
)
for f in F:
dattorro.dsp.ui.p_Center_Freq = f
out = dattorro.compute(audio)
spec = np.fft.fft(out)[0, :args.fs/2]
p.plot(20*np.log10(np.absolute(spec.T)+1e-8),
label="F={:.2f} Hz".format(f))
p.legend(loc="best")
################
# show the plots
################
plt.show()
print("everything passes!")
|
[
"numpy.absolute",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"numpy.fft.fft",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.log10"
] |
[((244, 269), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (267, 269), False, 'import argparse\n'), ((1627, 1693), 'numpy.zeros', 'np.zeros', (['(dattorro.dsp.num_in, args.fs)'], {'dtype': 'dattorro.dsp.dtype'}), '((dattorro.dsp.num_in, args.fs), dtype=dattorro.dsp.dtype)\n', (1635, 1693), True, 'import numpy as np\n'), ((1812, 1824), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1822, 1824), True, 'import matplotlib.pyplot as plt\n'), ((2413, 2450), 'numpy.linspace', 'np.linspace', (['def_Q.min', 'def_Q.max', '(10)'], {}), '(def_Q.min, def_Q.max, 10)\n', (2424, 2450), True, 'import numpy as np\n'), ((2621, 2633), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2631, 2633), True, 'import matplotlib.pyplot as plt\n'), ((3479, 3491), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3489, 3491), True, 'import matplotlib.pyplot as plt\n'), ((4349, 4361), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4359, 4361), True, 'import matplotlib.pyplot as plt\n'), ((4886, 4896), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4894, 4896), True, 'import matplotlib.pyplot as plt\n'), ((1774, 1789), 'numpy.fft.fft', 'np.fft.fft', (['out'], {}), '(out)\n', (1784, 1789), True, 'import numpy as np\n'), ((3342, 3364), 'numpy.log10', 'np.log10', (['def_Gain.max'], {}), '(def_Gain.max)\n', (3350, 3364), True, 'import numpy as np\n'), ((4137, 4159), 'numpy.log10', 'np.log10', (['def_Freq.min'], {}), '(def_Freq.min)\n', (4145, 4159), True, 'import numpy as np\n'), ((4161, 4183), 'numpy.log10', 'np.log10', (['def_Freq.max'], {}), '(def_Freq.max)\n', (4169, 4183), True, 'import numpy as np\n'), ((2952, 2967), 'numpy.fft.fft', 'np.fft.fft', (['out'], {}), '(out)\n', (2962, 2967), True, 'import numpy as np\n'), ((3772, 3787), 'numpy.fft.fft', 'np.fft.fft', (['out'], {}), '(out)\n', (3782, 3787), True, 'import numpy as np\n'), ((4687, 4702), 'numpy.fft.fft', 'np.fft.fft', (['out'], {}), '(out)\n', (4697, 4702), True, 'import numpy as np\n'), ((2018, 2049), 'numpy.log10', 'np.log10', (['(def_Gain.zone + 1e-08)'], {}), '(def_Gain.zone + 1e-08)\n', (2026, 2049), True, 'import numpy as np\n'), ((2167, 2186), 'numpy.absolute', 'np.absolute', (['spec.T'], {}), '(spec.T)\n', (2178, 2186), True, 'import numpy as np\n'), ((2748, 2771), 'numpy.log10', 'np.log10', (['(cur_G + 1e-08)'], {}), '(cur_G + 1e-08)\n', (2756, 2771), True, 'import numpy as np\n'), ((4480, 4503), 'numpy.log10', 'np.log10', (['(cur_G + 1e-08)'], {}), '(cur_G + 1e-08)\n', (4488, 4503), True, 'import numpy as np\n'), ((3007, 3026), 'numpy.absolute', 'np.absolute', (['spec.T'], {}), '(spec.T)\n', (3018, 3026), True, 'import numpy as np\n'), ((3827, 3846), 'numpy.absolute', 'np.absolute', (['spec.T'], {}), '(spec.T)\n', (3838, 3846), True, 'import numpy as np\n'), ((3898, 3917), 'numpy.log10', 'np.log10', (['(g + 1e-08)'], {}), '(g + 1e-08)\n', (3906, 3917), True, 'import numpy as np\n'), ((4742, 4761), 'numpy.absolute', 'np.absolute', (['spec.T'], {}), '(spec.T)\n', (4753, 4761), True, 'import numpy as np\n')]
|
import numpy as np
from numpy import linalg as LA
class LDA():
def __init__(self, dim = 2):
self.dim = dim
self.matrixTransf = None
def fit_transform(self, X, labels):
positive = []
negative = []
for i in range(len(labels)):
if labels[i] == 1:
positive.append(X[i])
else:
negative.append(X[i])
positive = np.array(positive)
negative = np.array(negative)
media_pos = np.mean(positive, axis = 0)
media_neg = np.mean(negative, axis = 0)
cov_pos = np.cov(positive.T)
cov_neg = np.cov(negative.T)
SW = cov_pos + cov_neg
sub = (media_pos - media_neg)
print(SW.shape)
print(sub.shape)
wLDA = np.matmul(LA.pinv(SW), sub)
self.matrixTransf = np.array(wLDA)
print("Matriz de transformação")
print(self.matrixTransf)
res = np.matmul(X, self.matrixTransf.T)
return res
|
[
"numpy.mean",
"numpy.array",
"numpy.matmul",
"numpy.cov",
"numpy.linalg.pinv"
] |
[((441, 459), 'numpy.array', 'np.array', (['positive'], {}), '(positive)\n', (449, 459), True, 'import numpy as np\n'), ((479, 497), 'numpy.array', 'np.array', (['negative'], {}), '(negative)\n', (487, 497), True, 'import numpy as np\n'), ((527, 552), 'numpy.mean', 'np.mean', (['positive'], {'axis': '(0)'}), '(positive, axis=0)\n', (534, 552), True, 'import numpy as np\n'), ((575, 600), 'numpy.mean', 'np.mean', (['negative'], {'axis': '(0)'}), '(negative, axis=0)\n', (582, 600), True, 'import numpy as np\n'), ((621, 639), 'numpy.cov', 'np.cov', (['positive.T'], {}), '(positive.T)\n', (627, 639), True, 'import numpy as np\n'), ((658, 676), 'numpy.cov', 'np.cov', (['negative.T'], {}), '(negative.T)\n', (664, 676), True, 'import numpy as np\n'), ((902, 916), 'numpy.array', 'np.array', (['wLDA'], {}), '(wLDA)\n', (910, 916), True, 'import numpy as np\n'), ((1014, 1047), 'numpy.matmul', 'np.matmul', (['X', 'self.matrixTransf.T'], {}), '(X, self.matrixTransf.T)\n', (1023, 1047), True, 'import numpy as np\n'), ((847, 858), 'numpy.linalg.pinv', 'LA.pinv', (['SW'], {}), '(SW)\n', (854, 858), True, 'from numpy import linalg as LA\n')]
|
import tensorflow as tf
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from PIL import Image
import Network
import dataset
from Network import BATCH_SIZE
from dataset import DataSet
def output_predict(depths, images, depths_discretized, depths_reconstructed, output_dir):
print("output predict into %s" % output_dir)
if not tf.gfile.Exists(output_dir):
tf.gfile.MakeDirs(output_dir)
for i, _ in enumerate(images):
image, depth, depth_discretized, depth_reconstructed = images[i], depths[i], depths_discretized[i], \
depths_reconstructed[i]
pilimg = Image.fromarray(np.uint8(image))
image_name = "%s/%03d_org.png" % (output_dir, i)
pilimg.save(image_name)
depth = depth.transpose(2, 0, 1)
if np.max(depth) != 0:
ra_depth = (depth / np.max(depth)) * 255.0
else:
ra_depth = depth * 255.0
depth_pil = Image.fromarray(np.uint8(ra_depth[0]), mode="L")
depth_name = "%s/%03d.png" % (output_dir, i)
depth_pil.save(depth_name)
for j in range(dataset.DEPTH_DIM):
ra_depth = depth_discretized[:, :, j] * 255.0
depth_discr_pil = Image.fromarray(np.uint8(ra_depth), mode="L")
depth_discr_name = "%s/%03d_%03d_discr.png" % (output_dir, i, j)
depth_discr_pil.save(depth_discr_name)
# for j in range(DEPTH_DIM):
# ra_depth = mask[:, :, j]
# depth_discr_pil = Image.fromarray(np.uint8(ra_depth), mode="L")
# depth_discr_name = "%s/%03d_%03d_discr_m.png" % (output_dir, i, j)
# depth_discr_pil.save(depth_discr_name)
#
# for j in range(DEPTH_DIM):
# ra_depth = mask_lower[:, :, j]
# depth_discr_pil = Image.fromarray(np.uint8(ra_depth), mode="L")
# depth_discr_name = "%s/%03d_%03d_discr_ml.png" % (output_dir, i, j)
# depth_discr_pil.save(depth_discr_name)
depth = depth_reconstructed[:, :, 0]
if np.max(depth) != 0:
ra_depth = (depth / np.max(depth)) * 255.0
else:
ra_depth = depth * 255.0
depth_pil = Image.fromarray(np.uint8(ra_depth), mode="L")
depth_name = "%s/%03d_reconstructed.png" % (output_dir, i)
depth_pil.save(depth_name)
def playground_loss_function(labels, logits):
# in rank 2, [elements, classes]
# tf.nn.weighted_cross_entropy_with_logits(labels, logits, weights)
losses = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
return losses
def prob_to_logit(probs):
return np.log(probs / (1 - probs))
def softmax(x):
"""Same behaviour as tf.nn.softmax in tensorflow"""
e_x = np.exp(x)
sum_per_row = np.tile(e_x.sum(axis=1), (x.shape[1], 1)).T
print('e_x', '\n', e_x)
print('sum_per_row', '\n', sum_per_row)
return e_x / sum_per_row
def softmax_cross_entropy_loss(labels, logits):
"""Same behaviour as tf.nn.softmax_cross_entropy_with_logits in tensorflow"""
loss_per_row = - np.sum(labels * np.log(softmax(logits)), axis=1)
return loss_per_row
def labels_to_info_gain(labels, logits, alpha=0.2):
last_axis = len(logits.shape) - 1
label_idx = np.tile(np.argmax(labels, axis=last_axis), (labels.shape[last_axis], 1)).T
prob_bin_idx = np.tile(range(logits.shape[last_axis]), (labels.shape[0], 1))
# print('label_idx', '\n', label_idx)
# print('probs_idx', '\n', prob_bin_idx)
info_gain = np.exp(-alpha * (label_idx - prob_bin_idx)**2)
print('info gain', '\n', info_gain)
return info_gain
def tf_labels_to_info_gain(labels, logits, alpha=0.2):
last_axis = len(logits.shape) - 1
label_idx = tf.expand_dims(tf.argmax(labels, axis=last_axis), 0)
label_idx = tf.cast(label_idx, dtype=tf.int32)
label_idx = tf.tile(label_idx, [labels.shape[last_axis], 1])
label_idx = tf.transpose(label_idx)
prob_bin_idx = tf.expand_dims(tf.range(logits.shape[last_axis], dtype=tf.int32), last_axis)
prob_bin_idx = tf.transpose(prob_bin_idx)
prob_bin_idx = tf.tile(prob_bin_idx, [labels.shape[0], 1])
difference = (label_idx - prob_bin_idx)**2
difference = tf.cast(difference, dtype=tf.float32)
info_gain = tf.exp(-alpha * difference)
return info_gain
def informed_cross_entropy_loss(labels, logits):
"""Same behaviour as tf.nn.weighted_cross_entropy_with_logits in tensorflow"""
probs = softmax(logits)
print('probs', '\n', probs)
logged_probs = np.log(probs)
print('logged probs', '\n', logged_probs)
loss_per_row = - np.sum(labels_to_info_gain(labels, logits) * logged_probs, axis=1)
return loss_per_row
def playing_with_losses():
labels = np.array([
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
# [0, 1, 0, 0, 0],
# [0, 0, 1, 0, 0],
# [0, 0, 0, 1, 0],
# [0, 0, 0, 0, 1],
# [1, 0, 0, 0, 0],
])
logits = np.array([
[0, 20, 0, 0, 0],
[0, 10, 0, 0, 0],
[0, 2, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 0, 0, 1],
[0, 1, 0, 0, 0],
# [3, 1, 1, 1, 1],
# [0, 10, 0, 0, 0],
# [1, 5, 1, 1, 1],
# [0, 0, 1, 0, 0],
# [1, 1, 4, 1, 1],
# [1, 1, 1, 4, 1],
# [1, 1, 1, 1, 4],
# [4, 1, 1, 1, 1],
])
probs = softmax(logits)
loss = softmax_cross_entropy_loss(labels=labels, logits=logits)
new_loss = informed_cross_entropy_loss(labels=labels, logits=logits)
with tf.Graph().as_default():
with tf.Session() as sess:
logits_tf = tf.constant(logits, dtype=tf.float32)
labels_tf = tf.constant(labels, dtype=tf.float32)
probs_tf = sess.run(tf.nn.softmax(logits_tf))
loss_tf = sess.run(tf.nn.softmax_cross_entropy_with_logits(labels=labels_tf, logits=logits_tf))
new_loss_tf = sess.run(tf.nn.softmax_cross_entropy_with_logits(labels=tf_labels_to_info_gain(labels, logits_tf), logits=logits_tf))
# print('labels', '\n', labels)
# print('logits', '\n', logits)
# print('probs', '\n', probs)
# print('probs diff', '\n', probs - probs_tf)
print('loss', '\n', loss)
print('loss_tf', '\n', loss_tf)
print('loss diff', '\n', loss - loss_tf)
print('new_loss', '\n', new_loss)
print('new_loss_tf', '\n', new_loss_tf)
print('new loss diff', '\n', new_loss - new_loss_tf)
# f, axarr = plt.subplots(2, 3)
# axarr[0, 0].set_title('sample 1')
# axarr[0, 0].plot(probs[0, :])
# axarr[0, 1].set_title('sample 2')
# axarr[0, 1].plot(probs[1, :])
# axarr[1, 0].set_title('sample 3')
# axarr[1, 0].plot(probs[2, :])
# axarr[1, 1].set_title('sample 4')
# axarr[1, 1].plot(probs[3, :])
plt.plot(probs[0, :], color='r')
plt.plot(probs[1, :], color='g')
plt.plot(probs[2, :], color='b')
plt.plot(probs[3, :], color='y')
plt.show()
def input_parser(filename):
assert tf.get_default_session() is sess
tf.logging.warning(('filename', filename))
channel_data = tf.data.TextLineDataset(filename).map(lambda line: tf.decode_csv(line, [["path"], ["annotation"]]))
return channel_data
def filenames_to_data(rgb_filename, voxelmap_filename):
tf.logging.warning(('rgb_filename', rgb_filename))
rgb_image = dataset.DataSet.filename_to_input_image(rgb_filename)
voxelmap = tf.py_func(dataset.DataSet.filename_to_target_voxelmap, [voxelmap_filename], tf.int32)
voxelmap.set_shape([dataset.TARGET_WIDTH, dataset.TARGET_HEIGHT, dataset.DEPTH_DIM])
# voxelmap = dataset.DataSet.filename_to_target_voxelmap(voxelmap_filename)
depth_reconstructed = dataset.DataSet.tf_voxelmap_to_depth(voxelmap)
return rgb_image, voxelmap, depth_reconstructed
def tf_new_data_api_experiments():
# global sess
batch_size = 4
with sess.as_default():
tf.logging.set_verbosity(tf.logging.INFO)
# dataset = tf.data.TFRecordDataset(['train-voxel-gta.csv', 'test-voxel-gta.csv'])
train_imgs = tf.constant(['train-voxel-gta.csv'])
filename_list = tf.data.Dataset.from_tensor_slices(train_imgs)
filename_pairs = filename_list.flat_map(input_parser)
data_pairs = filename_pairs.map(filenames_to_data)
data_pairs = data_pairs.batch(batch_size)
#
# # input
# image = dataset.DataSet.filename_to_input_image(filename)
# # target
# voxelmap = dataset.DataSet.filename_to_target_voxelmap(voxelmap_filename)
# depth_reconstructed = dataset.DataSet.tf_voxelmap_to_depth(voxelmap)
iterator = data_pairs.make_one_shot_iterator()
batch_images, batch_voxels, batch_depths = iterator.get_next()
for i in range(1):
images_values, voxels_values, depths_values = sess.run([batch_images, batch_voxels, batch_depths])
for j in range(batch_size):
plt.figure(figsize=(10, 6))
plt.axis('off')
plt.imshow(images_values[j, :, :, :].astype(dtype=np.uint8))
plt.savefig('inspections/out-{}-rgb.png'.format(j), bbox_inches='tight')
plt.figure(figsize=(10, 6))
plt.axis('off')
plt.imshow(depths_values[j, :, :].T, cmap='gray')
plt.savefig('inspections/out-{}-depth.png'.format(j), bbox_inches='tight')
# pure numpy calculation of depth image from voxelmap
occupied_ndc_grid = voxels_values[j, :, :, :]
occupied_ndc_grid = np.flip(occupied_ndc_grid, axis=2)
depth_size = occupied_ndc_grid.shape[2]
new_depth = np.argmax(occupied_ndc_grid, axis=2)
new_depth = new_depth.T
new_depth *= int(255/depth_size)
plt.figure(figsize=(10, 7))
plt.axis('off')
plt.imshow(new_depth, cmap='gray')
plt.savefig('inspections/out-{}-depth-np.png'.format(j), bbox_inches='tight')
def load_numpy_bin():
# name = 'inspections/2018-03-07--17-57-32--527.bin'
name = 'inspections/2018-03-07--17-57-32--527.npy'
# numpy_voxelmap = np.fromfile(name, sep=';')
numpy_voxelmap = np.load(name)
print(numpy_voxelmap.shape)
# numpy_voxelmap = numpy_voxelmap.reshape([240, 160, 100])
numpy_voxelmap = np.flip(numpy_voxelmap, axis=2)
# now I have just boolean for each value
# so I create mask to assign higher value to booleans in higher index
depth_size = numpy_voxelmap.shape[2]
new_depth = np.argmax(numpy_voxelmap, axis=2)
new_depth = new_depth.T
new_depth *= int(255 / depth_size)
plt.figure(figsize=(10, 6))
plt.axis('off')
plt.imshow(new_depth, cmap='gray')
plt.savefig('inspections/2018-03-07--17-57-32--527.png', bbox_inches='tight')
sess = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}))
if __name__ == '__main__':
# playing_with_losses()
# tf_dataset_experiments()
# load_numpy_bin()
tf_new_data_api_experiments()
# arr = np.array([
# [1, 1, 1, 2],
# [2, 2, 2, 4],
# [4, 4, 4, 8],
# ])
# with tf.Graph().as_default():
# with tf.Session() as sess:
# logits_tf = tf.constant(arr, dtype=tf.float32)
# tf_mean = sess.run(tf.reduce_mean(logits_tf))
# print('tf_mean\n', tf_mean)
#
# print('mean\n', np.mean(arr))
# print('sum_per_row\n', np.sum(arr, axis=1))
# print('mean_of_sum\n', np.mean(np.sum(arr, axis=1), axis=0))
# ds = DataSet(8)
# ds.load_params('train.csv')
#
# d = list(range(1, 100))
# d_min = np.min(d)
# d_max = 20
# num_bins = 10
# q_calc = (np.log(np.max(d)) - np.log(d_min)) / (num_bins - 1)
# # q = 0.5 # width of quantization bin
# l = np.round((np.log(d) - np.log(d_min)) / q_calc)
#
# print(d)
# print(l)
#
# print('q_calc', q_calc)
#
# f, axarr = plt.subplots(2, 2)
# axarr[0, 0].plot(d)
# axarr[0, 1].plot(np.log(d))
# axarr[1, 0].plot(np.log(d) - np.log(d_min))
# axarr[1, 1].plot((np.log(d) - np.log(d_min)) / q_calc)
# plt.show()
# with tf.Graph().as_default():
# with tf.Session() as sess:
# x = tf.constant(d)
#
# # for i in range(500):
# # if i % 500 == 0:
# # print('hi', i)
#
# IMAGE_HEIGHT = 240
# IMAGE_WIDTH = 320
# TARGET_HEIGHT = 120
# TARGET_WIDTH = 160
# DEPTH_DIM = 10
#
# filename_queue = tf.train.string_input_producer(['train.csv'], shuffle=True)
# reader = tf.TextLineReader()
# _, serialized_example = reader.read(filename_queue)
# filename, depth_filename = tf.decode_csv(serialized_example, [["path"], ["annotation"]])
# # input
# jpg = tf.read_file(filename)
# image = tf.image.decode_jpeg(jpg, channels=3)
# image = tf.cast(image, tf.float32)
# # target
# depth_png = tf.read_file(depth_filename)
# depth = tf.image.decode_png(depth_png, channels=1)
# depth = tf.cast(depth, tf.float32)
# depth = depth / 255.0
# # depth = tf.cast(depth, tf.int64)
# # resize
# image = tf.image.resize_images(image, (IMAGE_HEIGHT, IMAGE_WIDTH))
# depth = tf.image.resize_images(depth, (TARGET_HEIGHT, TARGET_WIDTH))
#
# depth_discretized = dataset.DataSet.discretize_depth(depth)
#
# invalid_depth = tf.sign(depth)
#
# batch_size = 8
# # generate batch
# images, depths, depths_discretized, invalid_depths = tf.train.batch(
# [image, depth, depth_discretized, invalid_depth],
# batch_size=batch_size,
# num_threads=4,
# capacity=40)
#
# depth_reconstructed, weights, mask, mask_multiplied, mask_multiplied_sum = Network.Network.bins_to_depth(depths_discretized)
#
# print('weights: ', weights)
#
# coord = tf.train.Coordinator()
# threads = tf.train.start_queue_runners(sess=sess, coord=coord)
#
# images_val, depths_val, depths_discretized_val, invalid_depths_val, depth_reconstructed_val, mask_val, mask_multiplied_val, mask_multiplied_sum_val = sess.run(
# [images, depths, depths_discretized, invalid_depths, depth_reconstructed, mask, mask_multiplied, mask_multiplied_sum])
# sess.run(images)
#
# output_predict(depths_val, images_val, depths_discretized_val,
# depth_reconstructed_val, 'kunda')
#
# depth_reconstructed_val = depth_reconstructed_val[:, :, :, 0]
# coord.request_stop()
# coord.join(threads)
#
# layer = 2
# f, axarr = plt.subplots(2, 3)
# axarr[0, 0].set_title('masks_val')
# axarr[0, 0].imshow(mask_val[0, :, :, layer])
# axarr[0, 1].set_title('mask_multiplied_val')
# axarr[0, 1].imshow(mask_multiplied_val[0, :, :, layer])
# axarr[1, 0].set_title('depths_val')
# axarr[1, 0].imshow(depths_val[0, :, :, 0])
# axarr[1, 1].set_title('depths_discretized_val')
# axarr[1, 1].imshow(depths_discretized_val[0, :, :, layer])
# axarr[0, 2].set_title('mask_multiplied_sum_val')
# axarr[0, 2].imshow(mask_multiplied_sum_val[0, :, :])
# axarr[1, 2].set_title('depth_reconstructed_val')
# axarr[1, 2].imshow(depth_reconstructed_val[0, :, :])
# plt.show()
# network = Network.Network()
# network.prepare()
# total_vars = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
# print('trainable vars: ', total_vars)
# for output bins = 200: 73 696 786
# for output bins = 100: 65 312 586
|
[
"numpy.load",
"tensorflow.gfile.Exists",
"numpy.argmax",
"tensorflow.logging.warning",
"dataset.DataSet.filename_to_input_image",
"tensorflow.logging.set_verbosity",
"tensorflow.ConfigProto",
"matplotlib.pyplot.figure",
"numpy.exp",
"tensorflow.nn.softmax",
"tensorflow.data.TextLineDataset",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"matplotlib.pyplot.imshow",
"dataset.DataSet.tf_voxelmap_to_depth",
"tensorflow.cast",
"numpy.max",
"tensorflow.exp",
"tensorflow.range",
"numpy.uint8",
"matplotlib.pyplot.show",
"tensorflow.Session",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.tile",
"matplotlib.use",
"tensorflow.Graph",
"tensorflow.decode_csv",
"numpy.flip",
"numpy.log",
"matplotlib.pyplot.plot",
"tensorflow.py_func",
"tensorflow.gfile.MakeDirs",
"tensorflow.argmax",
"matplotlib.pyplot.axis",
"tensorflow.data.Dataset.from_tensor_slices",
"numpy.array",
"matplotlib.pyplot.savefig",
"tensorflow.get_default_session"
] |
[((61, 82), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (75, 82), False, 'import matplotlib\n'), ((2575, 2644), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'labels': 'labels', 'logits': 'logits'}), '(labels=labels, logits=logits)\n', (2614, 2644), True, 'import tensorflow as tf\n'), ((2702, 2729), 'numpy.log', 'np.log', (['(probs / (1 - probs))'], {}), '(probs / (1 - probs))\n', (2708, 2729), True, 'import numpy as np\n'), ((2814, 2823), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (2820, 2823), True, 'import numpy as np\n'), ((3580, 3628), 'numpy.exp', 'np.exp', (['(-alpha * (label_idx - prob_bin_idx) ** 2)'], {}), '(-alpha * (label_idx - prob_bin_idx) ** 2)\n', (3586, 3628), True, 'import numpy as np\n'), ((3868, 3902), 'tensorflow.cast', 'tf.cast', (['label_idx'], {'dtype': 'tf.int32'}), '(label_idx, dtype=tf.int32)\n', (3875, 3902), True, 'import tensorflow as tf\n'), ((3919, 3967), 'tensorflow.tile', 'tf.tile', (['label_idx', '[labels.shape[last_axis], 1]'], {}), '(label_idx, [labels.shape[last_axis], 1])\n', (3926, 3967), True, 'import tensorflow as tf\n'), ((3984, 4007), 'tensorflow.transpose', 'tf.transpose', (['label_idx'], {}), '(label_idx)\n', (3996, 4007), True, 'import tensorflow as tf\n'), ((4123, 4149), 'tensorflow.transpose', 'tf.transpose', (['prob_bin_idx'], {}), '(prob_bin_idx)\n', (4135, 4149), True, 'import tensorflow as tf\n'), ((4169, 4212), 'tensorflow.tile', 'tf.tile', (['prob_bin_idx', '[labels.shape[0], 1]'], {}), '(prob_bin_idx, [labels.shape[0], 1])\n', (4176, 4212), True, 'import tensorflow as tf\n'), ((4277, 4314), 'tensorflow.cast', 'tf.cast', (['difference'], {'dtype': 'tf.float32'}), '(difference, dtype=tf.float32)\n', (4284, 4314), True, 'import tensorflow as tf\n'), ((4331, 4358), 'tensorflow.exp', 'tf.exp', (['(-alpha * difference)'], {}), '(-alpha * difference)\n', (4337, 4358), True, 'import tensorflow as tf\n'), ((4593, 4606), 'numpy.log', 'np.log', (['probs'], {}), '(probs)\n', (4599, 4606), True, 'import numpy as np\n'), ((4807, 4924), 'numpy.array', 'np.array', (['[[0, 1, 0, 0, 0], [0, 1, 0, 0, 0], [0, 1, 0, 0, 0], [0, 1, 0, 0, 0], [0, 1,\n 0, 0, 0], [0, 1, 0, 0, 0]]'], {}), '([[0, 1, 0, 0, 0], [0, 1, 0, 0, 0], [0, 1, 0, 0, 0], [0, 1, 0, 0, 0\n ], [0, 1, 0, 0, 0], [0, 1, 0, 0, 0]])\n', (4815, 4924), True, 'import numpy as np\n'), ((5123, 5241), 'numpy.array', 'np.array', (['[[0, 20, 0, 0, 0], [0, 10, 0, 0, 0], [0, 2, 0, 0, 0], [1, 1, 1, 0, 0], [0, \n 1, 0, 0, 1], [0, 1, 0, 0, 0]]'], {}), '([[0, 20, 0, 0, 0], [0, 10, 0, 0, 0], [0, 2, 0, 0, 0], [1, 1, 1, 0,\n 0], [0, 1, 0, 0, 1], [0, 1, 0, 0, 0]])\n', (5131, 5241), True, 'import numpy as np\n'), ((6936, 6968), 'matplotlib.pyplot.plot', 'plt.plot', (['probs[0, :]'], {'color': '"""r"""'}), "(probs[0, :], color='r')\n", (6944, 6968), True, 'import matplotlib.pyplot as plt\n'), ((6973, 7005), 'matplotlib.pyplot.plot', 'plt.plot', (['probs[1, :]'], {'color': '"""g"""'}), "(probs[1, :], color='g')\n", (6981, 7005), True, 'import matplotlib.pyplot as plt\n'), ((7010, 7042), 'matplotlib.pyplot.plot', 'plt.plot', (['probs[2, :]'], {'color': '"""b"""'}), "(probs[2, :], color='b')\n", (7018, 7042), True, 'import matplotlib.pyplot as plt\n'), ((7047, 7079), 'matplotlib.pyplot.plot', 'plt.plot', (['probs[3, :]'], {'color': '"""y"""'}), "(probs[3, :], color='y')\n", (7055, 7079), True, 'import matplotlib.pyplot as plt\n'), ((7085, 7095), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7093, 7095), True, 'import matplotlib.pyplot as plt\n'), ((7174, 7216), 'tensorflow.logging.warning', 'tf.logging.warning', (["('filename', filename)"], {}), "(('filename', filename))\n", (7192, 7216), True, 'import tensorflow as tf\n'), ((7422, 7472), 'tensorflow.logging.warning', 'tf.logging.warning', (["('rgb_filename', rgb_filename)"], {}), "(('rgb_filename', rgb_filename))\n", (7440, 7472), True, 'import tensorflow as tf\n'), ((7489, 7542), 'dataset.DataSet.filename_to_input_image', 'dataset.DataSet.filename_to_input_image', (['rgb_filename'], {}), '(rgb_filename)\n', (7528, 7542), False, 'import dataset\n'), ((7558, 7648), 'tensorflow.py_func', 'tf.py_func', (['dataset.DataSet.filename_to_target_voxelmap', '[voxelmap_filename]', 'tf.int32'], {}), '(dataset.DataSet.filename_to_target_voxelmap, [voxelmap_filename],\n tf.int32)\n', (7568, 7648), True, 'import tensorflow as tf\n'), ((7840, 7886), 'dataset.DataSet.tf_voxelmap_to_depth', 'dataset.DataSet.tf_voxelmap_to_depth', (['voxelmap'], {}), '(voxelmap)\n', (7876, 7886), False, 'import dataset\n'), ((10385, 10398), 'numpy.load', 'np.load', (['name'], {}), '(name)\n', (10392, 10398), True, 'import numpy as np\n'), ((10515, 10546), 'numpy.flip', 'np.flip', (['numpy_voxelmap'], {'axis': '(2)'}), '(numpy_voxelmap, axis=2)\n', (10522, 10546), True, 'import numpy as np\n'), ((10725, 10758), 'numpy.argmax', 'np.argmax', (['numpy_voxelmap'], {'axis': '(2)'}), '(numpy_voxelmap, axis=2)\n', (10734, 10758), True, 'import numpy as np\n'), ((10831, 10858), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (10841, 10858), True, 'import matplotlib.pyplot as plt\n'), ((10863, 10878), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10871, 10878), True, 'import matplotlib.pyplot as plt\n'), ((10883, 10917), 'matplotlib.pyplot.imshow', 'plt.imshow', (['new_depth'], {'cmap': '"""gray"""'}), "(new_depth, cmap='gray')\n", (10893, 10917), True, 'import matplotlib.pyplot as plt\n'), ((10922, 10999), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""inspections/2018-03-07--17-57-32--527.png"""'], {'bbox_inches': '"""tight"""'}), "('inspections/2018-03-07--17-57-32--527.png', bbox_inches='tight')\n", (10933, 10999), True, 'import matplotlib.pyplot as plt\n'), ((379, 406), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['output_dir'], {}), '(output_dir)\n', (394, 406), True, 'import tensorflow as tf\n'), ((416, 445), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['output_dir'], {}), '(output_dir)\n', (433, 445), True, 'import tensorflow as tf\n'), ((3814, 3847), 'tensorflow.argmax', 'tf.argmax', (['labels'], {'axis': 'last_axis'}), '(labels, axis=last_axis)\n', (3823, 3847), True, 'import tensorflow as tf\n'), ((4042, 4091), 'tensorflow.range', 'tf.range', (['logits.shape[last_axis]'], {'dtype': 'tf.int32'}), '(logits.shape[last_axis], dtype=tf.int32)\n', (4050, 4091), True, 'import tensorflow as tf\n'), ((7137, 7161), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (7159, 7161), True, 'import tensorflow as tf\n'), ((8049, 8090), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (8073, 8090), True, 'import tensorflow as tf\n'), ((8203, 8239), 'tensorflow.constant', 'tf.constant', (["['train-voxel-gta.csv']"], {}), "(['train-voxel-gta.csv'])\n", (8214, 8239), True, 'import tensorflow as tf\n'), ((8264, 8310), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['train_imgs'], {}), '(train_imgs)\n', (8298, 8310), True, 'import tensorflow as tf\n'), ((11027, 11066), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'device_count': "{'GPU': 0}"}), "(device_count={'GPU': 0})\n", (11041, 11066), True, 'import tensorflow as tf\n'), ((712, 727), 'numpy.uint8', 'np.uint8', (['image'], {}), '(image)\n', (720, 727), True, 'import numpy as np\n'), ((870, 883), 'numpy.max', 'np.max', (['depth'], {}), '(depth)\n', (876, 883), True, 'import numpy as np\n'), ((1032, 1053), 'numpy.uint8', 'np.uint8', (['ra_depth[0]'], {}), '(ra_depth[0])\n', (1040, 1053), True, 'import numpy as np\n'), ((2110, 2123), 'numpy.max', 'np.max', (['depth'], {}), '(depth)\n', (2116, 2123), True, 'import numpy as np\n'), ((2272, 2290), 'numpy.uint8', 'np.uint8', (['ra_depth'], {}), '(ra_depth)\n', (2280, 2290), True, 'import numpy as np\n'), ((3329, 3362), 'numpy.argmax', 'np.argmax', (['labels'], {'axis': 'last_axis'}), '(labels, axis=last_axis)\n', (3338, 3362), True, 'import numpy as np\n'), ((5727, 5739), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5737, 5739), True, 'import tensorflow as tf\n'), ((5773, 5810), 'tensorflow.constant', 'tf.constant', (['logits'], {'dtype': 'tf.float32'}), '(logits, dtype=tf.float32)\n', (5784, 5810), True, 'import tensorflow as tf\n'), ((5835, 5872), 'tensorflow.constant', 'tf.constant', (['labels'], {'dtype': 'tf.float32'}), '(labels, dtype=tf.float32)\n', (5846, 5872), True, 'import tensorflow as tf\n'), ((7236, 7269), 'tensorflow.data.TextLineDataset', 'tf.data.TextLineDataset', (['filename'], {}), '(filename)\n', (7259, 7269), True, 'import tensorflow as tf\n'), ((7287, 7334), 'tensorflow.decode_csv', 'tf.decode_csv', (['line', "[['path'], ['annotation']]"], {}), "(line, [['path'], ['annotation']])\n", (7300, 7334), True, 'import tensorflow as tf\n'), ((1301, 1319), 'numpy.uint8', 'np.uint8', (['ra_depth'], {}), '(ra_depth)\n', (1309, 1319), True, 'import numpy as np\n'), ((5689, 5699), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (5697, 5699), True, 'import tensorflow as tf\n'), ((5905, 5929), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits_tf'], {}), '(logits_tf)\n', (5918, 5929), True, 'import tensorflow as tf\n'), ((5962, 6037), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'labels': 'labels_tf', 'logits': 'logits_tf'}), '(labels=labels_tf, logits=logits_tf)\n', (6001, 6037), True, 'import tensorflow as tf\n'), ((9083, 9110), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (9093, 9110), True, 'import matplotlib.pyplot as plt\n'), ((9127, 9142), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (9135, 9142), True, 'import matplotlib.pyplot as plt\n'), ((9326, 9353), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (9336, 9353), True, 'import matplotlib.pyplot as plt\n'), ((9370, 9385), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (9378, 9385), True, 'import matplotlib.pyplot as plt\n'), ((9402, 9451), 'matplotlib.pyplot.imshow', 'plt.imshow', (['depths_values[j, :, :].T'], {'cmap': '"""gray"""'}), "(depths_values[j, :, :].T, cmap='gray')\n", (9412, 9451), True, 'import matplotlib.pyplot as plt\n'), ((9712, 9746), 'numpy.flip', 'np.flip', (['occupied_ndc_grid'], {'axis': '(2)'}), '(occupied_ndc_grid, axis=2)\n', (9719, 9746), True, 'import numpy as np\n'), ((9831, 9867), 'numpy.argmax', 'np.argmax', (['occupied_ndc_grid'], {'axis': '(2)'}), '(occupied_ndc_grid, axis=2)\n', (9840, 9867), True, 'import numpy as np\n'), ((9973, 10000), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (9983, 10000), True, 'import matplotlib.pyplot as plt\n'), ((10017, 10032), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10025, 10032), True, 'import matplotlib.pyplot as plt\n'), ((10049, 10083), 'matplotlib.pyplot.imshow', 'plt.imshow', (['new_depth'], {'cmap': '"""gray"""'}), "(new_depth, cmap='gray')\n", (10059, 10083), True, 'import matplotlib.pyplot as plt\n'), ((922, 935), 'numpy.max', 'np.max', (['depth'], {}), '(depth)\n', (928, 935), True, 'import numpy as np\n'), ((2162, 2175), 'numpy.max', 'np.max', (['depth'], {}), '(depth)\n', (2168, 2175), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.